diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml new file mode 100644 index 0000000..6a0c814 --- /dev/null +++ b/.github/workflows/snyk.yml @@ -0,0 +1,21 @@ + +name: Snyk + +on: + push: + branches: [ master ] + +jobs: + security: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: Run Snyk to check for vulnerabilities + uses: snyk/actions/golang@master + with: + command: monitor + args: --project-name=sql-runner + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} \ No newline at end of file diff --git a/.gitignore b/.gitignore index 2bab5a6..b91c21a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,33 +1,14 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so +# Golang +vendor/ -# Folders -_obj -_test +# IntelliJ +.idea -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out +# macOS +.DS_Store -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -# This project's binaries -sql-runner -*.tmp -coverage.out -coverage.html - -# Vagrant -.vagrant/ +# Project +build/ +dist/ +.release.yml.tmp +config.yml diff --git a/.release.yml b/.release.yml index b42378d..04466e3 100644 --- a/.release.yml +++ b/.release.yml @@ -1,9 +1,4 @@ --- -output_paths: - linux : dist/linux/sql-runner - darwin : dist/darwin/sql-runner - windows : dist/windows/sql-runner - local: root_dir : <%= ENV['TRAVIS_BUILD_DIR'] %> @@ -21,25 +16,19 @@ packages: continue_on_conflict: false version : <%= CMD['cat VERSION'] %> build_version : <%= ENV['TRAVIS_TAG'] %> - - build_commands: - - gox -osarch=linux/amd64 -output={{ output_paths.linux }} ./sql_runner/ - - gox -osarch=darwin/amd64 -output={{ output_paths.darwin }} ./sql_runner/ - - gox -osarch=windows/amd64 -output={{ output_paths.windows }} ./sql_runner/ - artifacts: - prefix : "sql_runner_" suffix : "_linux_amd64" type : "zip" binary_paths: - - "{{ output_paths.linux }}" + - build/output/linux/sql-runner - prefix : "sql_runner_" suffix : "_darwin_amd64" type : "zip" binary_paths: - - "{{ output_paths.darwin }}" + - build/output/darwin/sql-runner - prefix : "sql_runner_" suffix : "_windows_amd64" type : "zip" binary_paths: - - "{{ output_paths.windows }}.exe" + - build/output/windows/sql-runner.exe diff --git a/.travis.yml b/.travis.yml index 0a59786..5136280 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,40 +1,31 @@ -dist: trusty -sudo: false - +sudo: required language: go +services: + - docker go: - - 1.9.2 - - tip - -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover + - "1.13.8" before_script: - - ./integration/setup_travis.sh + - make setup-up script: - - go build -o sql-runner ./sql_runner/ - - ./integration/run_tests.sh - - go test ./sql_runner/ -v -covermode=count -coverprofile=coverage.out - - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci + - make + - make goveralls + - DISTRO=linux make integration before_deploy: - - go get github.com/mitchellh/gox - - pip install --user release-manager==0.1.0 + - sudo pip install pyOpenSSL==16.2.0 + - pip install --user release-manager==0.4.1 deploy: skip_cleanup: true provider: script - script: release-manager --config .release.yml --check-version --make-version --make-artifact --upload-artifact + script: make release on: - condition: '"${TRAVIS_GO_VERSION}" == "1.9.2"' + condition: '"${TRAVIS_GO_VERSION}" == "1.13.8"' tags: true -addons: - postgresql: '9.3' - env: global: - - secure: eP2CEhjE6YEgajudRcW6Lsyy92IxAB1KS5Cw0p/defwpouSuZS4NF8+YjcUhfI9sbkobL6KGaL12CYqLWToepQpiDHJ1eRaH+/8HQ+kgqwLxmHptVlqEXhkQPVJL04MvxZqeXocuOluGlWHOHMdlmDTA7eI/tKJtyykIRKyXFAc= - - secure: TP/mMjN910q8M/tknHkPxdYfKl25eQqdSscfTlQlnBxJpQgrgX25WgK1XuNI+IUTTmn1cM/eVRyKvUWflxqzwSUhgWO7s1EcN18WYU7Xl+RkWaoOMjqzV3q5l4M8rDK8LiBM/xrcz/rQMTZnWYm/tJ6+tjTQiuAezZh3bwuMvAo= + - secure: eP2CEhjE6YEgajudRcW6Lsyy92IxAB1KS5Cw0p/defwpouSuZS4NF8+YjcUhfI9sbkobL6KGaL12CYqLWToepQpiDHJ1eRaH+/8HQ+kgqwLxmHptVlqEXhkQPVJL04MvxZqeXocuOluGlWHOHMdlmDTA7eI/tKJtyykIRKyXFAc= + - secure: TP/mMjN910q8M/tknHkPxdYfKl25eQqdSscfTlQlnBxJpQgrgX25WgK1XuNI+IUTTmn1cM/eVRyKvUWflxqzwSUhgWO7s1EcN18WYU7Xl+RkWaoOMjqzV3q5l4M8rDK8LiBM/xrcz/rQMTZnWYm/tJ6+tjTQiuAezZh3bwuMvAo= diff --git a/CHANGELOG b/CHANGELOG index f4527f7..7e0f134 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,10 @@ +Version 0.9.0 (2020-07-17) +-------------------------- +Replace Vagrant with Docker based development experience (#150) +Update project to use go mod instead of dep (#151) +Update Copyright to 2015-2020 (#152) +Add Snyk Integration (#154) + Version 0.8.0 (2018-11-08) -------------------------- Add support for BigQuery (#92) diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index 55e2ce8..0000000 --- a/Gopkg.lock +++ /dev/null @@ -1,169 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/aws/aws-sdk-go" - packages = [ - "aws", - "aws/awserr", - "aws/awsutil", - "aws/client", - "aws/client/metadata", - "aws/corehandlers", - "aws/credentials", - "aws/credentials/ec2rolecreds", - "aws/ec2metadata", - "aws/endpoints", - "aws/request", - "internal/sdkio", - "internal/sdkrand", - "internal/sdkuri", - "internal/shareddefaults" - ] - revision = "3e9c4ca8d7fc8d22f0d21d50120186f73d2fdf1a" - version = "v1.15.27" - -[[projects]] - name = "github.com/davecgh/go-spew" - packages = ["spew"] - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - name = "github.com/go-ini/ini" - packages = ["."] - revision = "5cf292cae48347c2490ac1a58fe36735fb78df7e" - version = "v1.38.2" - -[[projects]] - name = "github.com/go-pg/pg" - packages = [ - ".", - "internal", - "internal/parser", - "internal/pool", - "orm", - "types" - ] - revision = "eb1e50689f65a1b60a16af344c3b092cfbe91c18" - version = "v6.14.5" - -[[projects]] - branch = "master" - name = "github.com/golang/glog" - packages = ["."] - revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" - -[[projects]] - name = "github.com/google/uuid" - packages = ["."] - revision = "d460ce9f8df2e77fb1ba55ca87fafed96c607494" - version = "v1.0.0" - -[[projects]] - name = "github.com/hashicorp/consul" - packages = ["api"] - revision = "e716d1b5f8be252b3e53906c6d5632e0228f30fa" - version = "v1.2.2" - -[[projects]] - name = "github.com/hashicorp/go-cleanhttp" - packages = ["."] - revision = "e8ab9daed8d1ddd2d3c4efba338fe2eeae2e4f18" - version = "v0.5.0" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/go-rootcerts" - packages = ["."] - revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00" - -[[projects]] - name = "github.com/hashicorp/serf" - packages = ["coordinate"] - revision = "d6574a5bb1226678d7010325fb6c985db20ee458" - version = "v0.8.1" - -[[projects]] - branch = "master" - name = "github.com/jinzhu/inflection" - packages = ["."] - revision = "04140366298a54a039076d798123ffa108fff46c" - -[[projects]] - name = "github.com/jmespath/go-jmespath" - packages = ["."] - revision = "0b12d6b5" - -[[projects]] - branch = "master" - name = "github.com/kardianos/osext" - packages = ["."] - revision = "ae77be60afb1dcacde03767a8c37337fad28ac14" - -[[projects]] - name = "github.com/mattn/go-runewidth" - packages = ["."] - revision = "ce7b0b5c7b45a81508558cd1dba6bb1e4ddb51bb" - version = "v0.0.3" - -[[projects]] - name = "github.com/mitchellh/go-homedir" - packages = ["."] - revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4" - version = "v1.0.0" - -[[projects]] - name = "github.com/mitchellh/mapstructure" - packages = ["."] - revision = "fa473d140ef3c6adf42d6b391fe76707f1f243c8" - version = "v1.0.0" - -[[projects]] - branch = "master" - name = "github.com/olekukonko/tablewriter" - packages = ["."] - revision = "d4647c9c7a84d847478d890b816b7d8b62b0b279" - -[[projects]] - branch = "master" - name = "github.com/pkg/browser" - packages = ["."] - revision = "c90ca0c84f15f81c982e32665bffd8d7aac8f097" - -[[projects]] - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - name = "github.com/snowflakedb/gosnowflake" - packages = ["."] - revision = "7d8648cf4957e55009221bc28f507382c08250e4" - version = "v1.1.10" - -[[projects]] - name = "github.com/stretchr/testify" - packages = ["assert"] - revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" - version = "v1.2.2" - -[[projects]] - branch = "master" - name = "golang.org/x/crypto" - packages = ["ocsp"] - revision = "0709b304e793a5edb4a2c0145f281ecdc20838a4" - -[[projects]] - branch = "v1" - name = "gopkg.in/yaml.v1" - packages = ["."] - revision = "9f9df34309c04878acc86042b16630b0f696e1de" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "015082ec73258f381e0ad13afd99483429ca04fcdb507e02b450ba2f73c7c664" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index 0f40fdc..0000000 --- a/Gopkg.toml +++ /dev/null @@ -1,58 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - - -[[constraint]] - name = "github.com/aws/aws-sdk-go" - version = "1.14.21" - -[[constraint]] - name = "github.com/go-pg/pg" - version = "6.0.0" - -[[constraint]] - name = "github.com/hashicorp/consul" - version = "1.2.0" - -[[constraint]] - branch = "master" - name = "github.com/kardianos/osext" - -[[constraint]] - name = "github.com/snowflakedb/gosnowflake" - version = "1.1.8" - -[[constraint]] - name = "github.com/stretchr/testify" - version = "1.2.2" - -[[constraint]] - branch = "v1" - name = "gopkg.in/yaml.v1" - -[prune] - go-tests = true - unused-packages = true diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..d249af9 --- /dev/null +++ b/Makefile @@ -0,0 +1,96 @@ +.PHONY: all format lint tidy setup-reset setup-up setup-down test goveralls integration release release-dry clean + +# ----------------------------------------------------------------------------- +# CONSTANTS +# ----------------------------------------------------------------------------- + +src_dir = sql_runner + +build_dir = build + +coverage_dir = $(build_dir)/coverage +coverage_out = $(coverage_dir)/coverage.out +coverage_html = $(coverage_dir)/coverage.html + +output_dir = $(build_dir)/output + +linux_dir = $(output_dir)/linux +darwin_dir = $(output_dir)/darwin +windows_dir = $(output_dir)/windows + +bin_name = sql-runner +bin_linux = $(linux_dir)/$(bin_name) +bin_darwin = $(darwin_dir)/$(bin_name) +bin_windows = $(windows_dir)/$(bin_name) + +# ----------------------------------------------------------------------------- +# BUILDING +# ----------------------------------------------------------------------------- + +all: + GO111MODULE=on go get -u github.com/mitchellh/gox + GO111MODULE=on CGO_ENABLED=0 gox -osarch=linux/amd64 -output=$(bin_linux) ./$(src_dir) + GO111MODULE=on CGO_ENABLED=0 gox -osarch=darwin/amd64 -output=$(bin_darwin) ./$(src_dir) + GO111MODULE=on CGO_ENABLED=0 gox -osarch=windows/amd64 -output=$(bin_windows) ./$(src_dir) + +# ----------------------------------------------------------------------------- +# FORMATTING +# ----------------------------------------------------------------------------- + +format: + GO111MODULE=on go fmt ./$(src_dir) + GO111MODULE=on gofmt -s -w ./$(src_dir) + +lint: + GO111MODULE=on go get -u golang.org/x/lint/golint + GO111MODULE=on golint ./$(src_dir) + +tidy: + GO111MODULE=on go mod tidy + +# ----------------------------------------------------------------------------- +# TESTING +# ----------------------------------------------------------------------------- + +setup-reset: setup-down setup-up + +setup-up: + docker-compose -f ./integration/docker-compose.yml up -d + sleep 2 + ./integration/setup_consul.sh + +setup-down: + docker-compose -f ./integration/docker-compose.yml down + +test: + mkdir -p $(coverage_dir) + GO111MODULE=on go get -u golang.org/x/tools/cmd/cover + GO111MODULE=on go test ./$(src_dir) -tags test -v -covermode=count -coverprofile=$(coverage_out) + GO111MODULE=on go tool cover -html=$(coverage_out) -o $(coverage_html) + +goveralls: test + GO111MODULE=on go get -u github.com/mattn/goveralls + goveralls -coverprofile=$(coverage_out) -service=travis-ci + +integration: +ifndef DISTRO + $(error DISTRO is undefined - this should be set to 'linux' or 'darwin'!) +endif + ./integration/run_tests.sh + +# ----------------------------------------------------------------------------- +# RELEASE +# ----------------------------------------------------------------------------- + +release: + release-manager --config .release.yml --check-version --make-artifact --make-version --upload-artifact + +release-dry: + release-manager --config .release.yml --check-version --make-artifact + +# ----------------------------------------------------------------------------- +# CLEANUP +# ----------------------------------------------------------------------------- + +clean: + rm -rf $(build_dir) diff --git a/README.md b/README.md index d261b15..2fda61d 100644 --- a/README.md +++ b/README.md @@ -12,19 +12,94 @@ Used with **[Snowplow][snowplow]** for scheduled SQL-based transformations of ev |:--------------------------------------:|:-----------------------------------------:|:---------------------------------------------:| | [![i1][devops-image]][devops-guide] | [![i2][analysts-image]][analysts-guide] | [![i3][developers-image]][developers-guide] | -## Quickstart +## Quick start -Assuming you are running on **64bit Linux**: +Assuming [go][go-url], [docker][docker-url] and [docker-compose][docker-compose-url] are installed: ```bash -> wget http://dl.bintray.com/snowplow/snowplow-generic/sql_runner_0.8.0_linux_amd64.zip -> unzip sql_runner_0.8.0_linux_amd64.zip -> ./sql-runner -usage + host> git clone https://github.com/snowplow/sql-runner + host> cd sql-runner + host> make setup-up # Launches Consul + Postgres for testing + host> make # Builds sql-runner binaries + host> make test # Runs unit tests + + # DISTRO specifies which binary you want to run integration tests with + host> DISTRO=darwin make integration +``` + +_Note_: You will need to ensure that `~/go/bin` is on your PATH for `gox` to work - the underlying tool that we use for building the binaries. + +When you are done with testing simply execute `make setup-down` to terminate the docker-compose stack. + +To reset the testing resources execute `make setup-reset` which will rebuild the docker containers. This can be useful if the state of these systems gets out of sync with what the tests expect. + +To remove all build files: + +```bash +guest> make clean +``` + +To format the golang code in the source directory: + +```bash +guest> make format +``` + +**Note:** Always run `make format` before submitting any code. + +**Note:** The `make test` command also generates a code coverage file which can be found at `build/coverage/coverage.html`. + +## How to use? + +First either compile the binary from source using the above `make` command or download the published Binary directly from Bintray: + +* [Darwin (macOS)](https://dl.bintray.com/snowplow/snowplow-generic/sql_runner_0.9.0_darwin_amd64.zip) +* [Linux](https://dl.bintray.com/snowplow/snowplow-generic/sql_runner_0.9.0_linux_amd64.zip) +* [Windows](https://dl.bintray.com/snowplow/snowplow-generic/sql_runner_0.9.0_windows_amd64.zip) + +### CLI Output + +```bash +sql-runner version: 0.9.0 +Run playbooks of SQL scripts in series and parallel on Redshift and Postgres +Usage: + -checkLock string + Checks whether the lockfile already exists + -consul string + The address of a consul server with playbooks and SQL files stored in KV pairs + -consulOnlyForLock + Will read playbooks locally, but use Consul for locking. + -deleteLock string + Will attempt to delete a lockfile if it exists + -dryRun + Runs through a playbook without executing any of the SQL + -fillTemplates + Will print all queries after templates are filled + -fromStep string + Starts from a given step defined in your playbook + -help + Shows this message + -lock string + Optional argument which checks and sets a lockfile to ensure this run is a singleton. Deletes lock on run completing successfully + -playbook string + Playbook of SQL scripts to execute + -runQuery string + Will run a single query in the playbook + -showQueryOutput + Will print all output from queries + -softLock string + Optional argument, like '-lock' but the lockfile will be deleted even if the run fails + -sqlroot string + Absolute path to SQL scripts. Use PLAYBOOK, BINARY and PLAYBOOK_CHILD for those respective paths (default "PLAYBOOK") + -var value + Variables to be passed to the playbook, in the key=value format + -version + Shows the program version ``` ## Copyright and license -SQL Runner is copyright 2015-2017 Snowplow Analytics Ltd. +SQL Runner is copyright 2015-2020 Snowplow Analytics Ltd. Licensed under the **[Apache License, Version 2.0][license]** (the "License"); you may not use this software except in compliance with the License. @@ -35,10 +110,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +[go-url]: https://golang.org/doc/install +[docker-url]: https://docs.docker.com/get-docker/ +[docker-compose-url]: https://docs.docker.com/compose/install/ + [travis]: https://travis-ci.org/snowplow/sql-runner [travis-image]: https://travis-ci.org/snowplow/sql-runner.png?branch=master -[release-image]: http://img.shields.io/badge/release-0.8.0-6ad7e5.svg?style=flat +[release-image]: http://img.shields.io/badge/release-0.9.0-6ad7e5.svg?style=flat [releases]: https://github.com/snowplow/sql-runner/releases [license-image]: http://img.shields.io/badge/license-Apache--2-blue.svg?style=flat diff --git a/VERSION b/VERSION index 8adc70f..899f24f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.8.0 \ No newline at end of file +0.9.0 \ No newline at end of file diff --git a/Vagrantfile b/Vagrantfile deleted file mode 100644 index b091fb5..0000000 --- a/Vagrantfile +++ /dev/null @@ -1,28 +0,0 @@ -Vagrant.configure("2") do |config| - - config.vm.box = "ubuntu/trusty64" - config.vm.hostname = "sql-runner" - config.ssh.forward_agent = true - - config.vm.provider :virtualbox do |vb| - vb.name = Dir.pwd().split("/")[-1] + "-" + Time.now.to_f.to_i.to_s - vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] - vb.customize [ "guestproperty", "set", :id, "--timesync-threshold", 10000 ] - # We don't need much memory for Golang - vb.memory = 1024 - end - - config.vm.provision :shell do |sh| - sh.path = "vagrant/up.bash" - end - - # Requires Vagrant 1.7.0+ - config.push.define "binary", strategy: "local-exec" do |push| - push.script = "vagrant/push.bash" - end - - # Golang-specific - config.vm.synced_folder ".", "/vagrant" - config.vm.synced_folder ".", "/opt/gopath/src/github.com/snowplow/sql-runner" - -end diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..1eb12e7 --- /dev/null +++ b/go.mod @@ -0,0 +1,38 @@ +module github.com/snowplow/sql-runner + +go 1.13 + +require ( + cloud.google.com/go/bigquery v1.9.0 + github.com/armon/go-metrics v0.3.3 // indirect + github.com/aws/aws-sdk-go v1.15.27 + github.com/go-ini/ini v1.38.2 // indirect + github.com/go-pg/pg v6.14.5+incompatible + github.com/google/uuid v1.0.0 // indirect + github.com/hashicorp/consul v1.2.2 + github.com/hashicorp/go-msgpack v1.1.5 // indirect + github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 // indirect + github.com/hashicorp/go-uuid v1.0.2 // indirect + github.com/hashicorp/go-version v1.2.1 // indirect + github.com/hashicorp/memberlist v0.2.2 // indirect + github.com/hashicorp/serf v0.8.1 // indirect + github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect + github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 + github.com/mattn/go-runewidth v0.0.3 // indirect + github.com/mitchellh/go-homedir v1.0.0 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/mitchellh/gox v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.0.0 // indirect + github.com/olekukonko/tablewriter v0.0.0-20180506121414-d4647c9c7a84 + github.com/onsi/ginkgo v1.14.0 // indirect + github.com/pkg/browser v0.0.0-20170505125900-c90ca0c84f15 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/smartystreets/goconvey v1.6.4 // indirect + github.com/snowflakedb/gosnowflake v1.1.10 + github.com/stretchr/testify v1.4.0 + golang.org/x/net v0.0.0-20200707034311-ab3426394381 + golang.org/x/tools v0.0.0-20200714190737-9048b464a08d // indirect + google.golang.org/api v0.29.0 + gopkg.in/ini.v1 v1.57.0 // indirect + gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..6763d9d --- /dev/null +++ b/go.sum @@ -0,0 +1,547 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.60.0 h1:R+tDlceO7Ss+zyvtsdhTxacDyZ1k99xwskQ4FT7ruoM= +cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.9.0 h1:Izm76KmIoARyQ89CVwEazD5zhH7xtuxkDlnCyoRXrhY= +cloud.google.com/go/bigquery v1.9.0/go.mod h1:JMGKDcmBZE48Feu6ZQ4qUaQDPpD6q6i0N8qVALoMAcc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0 h1:86K1Gel7BQ9/WmNWn7dTKMvTLFzwtBe5FNqYbi9X35g= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro= +github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/aws/aws-sdk-go v1.15.27 h1:i75BxN4Es/8rTVQbEKAP1WCiIhhz635xTNeDdZJRAXQ= +github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-ini/ini v1.38.2 h1:6Hl/z3p3iFkA0dlDfzYxuFuUGD+kaweypF6btsR2/Q4= +github.com/go-ini/ini v1.38.2/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-pg/pg v6.14.5+incompatible h1:Tc74MTCCIVd8sAJshYHqutcHhO64/EBHBTydzCGt3Js= +github.com/go-pg/pg v6.14.5+incompatible/go.mod h1:a2oXow+aFOrvwcKs3eIA0lNFmMilrxK2sOkB5NWe0vA= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/hashicorp/consul v1.2.2 h1:C5FurAZWLQ+XAjmL9g6rXbPlwxyyz8DvTL0WCAxTLAo= +github.com/hashicorp/consul v1.2.2/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= +github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:VBj0QYQ0u2MCJzBfeYXGexnAl17GsH1yidnoxCqqD9E= +github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg= +github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.0.0 h1:21MVWPKDphxa7ineQQTrCU5brh7OuVVAzGOCnnCPtE8= +github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.8.1 h1:mYs6SMzu72+90OcPa5wr3nfznA4Dw9UyR791ZFNOIf4= +github.com/hashicorp/serf v0.8.1/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a h1:eeaG9XMUvRBYXJi4pg1ZKM7nxc5AfXfojeLLW7O5J3k= +github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 h1:PJPDf8OUfOK1bb/NeTKd4f1QXZItOX389VN3B6qC8ro= +github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.1.26 h1:gPxPSwALAeHJSjarOs00QjVdV9QoBvc1D2ujQUr5BzU= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/gox v1.0.1 h1:x0jD3dcHk9a9xPSDN6YEL4xL6Qz0dvNYm8yZqui5chI= +github.com/mitchellh/gox v1.0.1/go.mod h1:ED6BioOGXMswlXa2zxfh/xdd5QhwYliBFn9V18Ap4z4= +github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v1.0.0 h1:vVpGvMXJPqSDh2VYHF7gsfQj8Ncx+Xw5Y1KHeTRY+7I= +github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olekukonko/tablewriter v0.0.0-20180506121414-d4647c9c7a84 h1:fiKJgB4JDUd43CApkmCeTSQlWjtTtABrU2qsgbuP0BI= +github.com/olekukonko/tablewriter v0.0.0-20180506121414-d4647c9c7a84/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/browser v0.0.0-20170505125900-c90ca0c84f15 h1:mrI+6Ae64Wjt+uahGe5we/sPS1sXjvfT3YjtawAVgps= +github.com/pkg/browser v0.0.0-20170505125900-c90ca0c84f15/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/snowflakedb/gosnowflake v1.1.10 h1:X7qCBs960/5o0xEIpDj6QRx0tEnXoVMnCZR6/lVGTh4= +github.com/snowflakedb/gosnowflake v1.1.10/go.mod h1:NsRq2QeiMUuoNUJhp5Q6xGC4uBrsS9g6LwZVEkTWgsE= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f h1:JcoF/bowzCDI+MXu1yLqQGNO3ibqWsWq+Sk7pOT218w= +golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200713235242-6acd2ab80ede h1:ItmFoZpZfJTYGsnON6247QuXEOKiKLBpieGcAgweGsk= +golang.org/x/tools v0.0.0-20200713235242-6acd2ab80ede/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200714190737-9048b464a08d h1:hYhnolbefSSt3WZp66sgmgnEOFv5PD6a5PIcnKJ8jdU= +golang.org/x/tools v0.0.0-20200714190737-9048b464a08d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5 h1:a/Sqq5B3dGnmxhuJZIHFsIxhEkqElErr5TaU6IqBAj0= +google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= +gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0 h1:POO/ycCATvegFmVuPpQzZFJ+pGZeX22Ufu6fibxDVjU= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/integration/docker-compose.yml b/integration/docker-compose.yml new file mode 100644 index 0000000..4b6720d --- /dev/null +++ b/integration/docker-compose.yml @@ -0,0 +1,31 @@ +version: "3" + +services: + postgres: + image: postgres:9.4 + container_name: postgres-sql-runner + restart: always + volumes: + - ./setup_postgres.sql:/docker-entrypoint-initdb.d/init.sql + ports: + - "5434:5432" + logging: + options: + max-size: "1M" + max-file: "10" + environment: + POSTGRES_HOST_AUTH_METHOD: trust + + consul: + image: consul:1.4.4 + container_name: consul-sql-runner + restart: always + ports: + - "8502:8500" + - "8602:8600/udp" + logging: + options: + max-size: "1M" + max-file: "10" + environment: + CONSUL_BIND_INTERFACE: eth0 diff --git a/integration/resources/bad-mixed.yml b/integration/resources/bad-mixed.yml index 93ab487..e537d69 100644 --- a/integration/resources/bad-mixed.yml +++ b/integration/resources/bad-mixed.yml @@ -3,7 +3,7 @@ targets: type: postgres host: localhost database: sql_runner_tests_1 - port: 5432 + port: 5434 username: postgres password: ssl: false # SSL disabled by default diff --git a/integration/resources/good-postgres-truncated.yml b/integration/resources/good-postgres-truncated.yml index 0f8212f..b178b9b 100644 --- a/integration/resources/good-postgres-truncated.yml +++ b/integration/resources/good-postgres-truncated.yml @@ -4,17 +4,17 @@ :type: postgres :host: localhost :database: sql_runner_tests_1 - :port: 5432 - :username: postgres - :password: + :port: 5434 + :username: snowplow + :password: snowplow :ssl: false # SSL disabled by default - :name: "My Postgres database 2" :type: postgres :host: localhost :database: sql_runner_tests_2 - :port: 5432 - :username: postgres - :password: + :port: 5434 + :username: snowplow + :password: snowplow :ssl: false # SSL disabled by default :variables: :test_schema: sql_runner_tests diff --git a/integration/resources/good-postgres-with-template.yml b/integration/resources/good-postgres-with-template.yml index 06f60cb..ecacb1d 100644 --- a/integration/resources/good-postgres-with-template.yml +++ b/integration/resources/good-postgres-with-template.yml @@ -3,7 +3,7 @@ :type: postgres :host: {{.host}} :database: sql_runner_tests_1 - :port: 5432 + :port: 5434 :username: {{.username}} :password: {{.password}} :ssl: false # SSL disabled by default diff --git a/integration/resources/good-postgres.yml b/integration/resources/good-postgres.yml index bc39a19..9d39c61 100644 --- a/integration/resources/good-postgres.yml +++ b/integration/resources/good-postgres.yml @@ -4,17 +4,17 @@ :type: postgres :host: localhost :database: sql_runner_tests_1 - :port: 5432 - :username: postgres - :password: + :port: 5434 + :username: snowplow + :password: snowplow :ssl: false # SSL disabled by default - :name: "My Postgres database 2" :type: postgres :host: localhost :database: sql_runner_tests_2 - :port: 5432 - :username: postgres - :password: + :port: 5434 + :username: snowplow + :password: snowplow :ssl: false # SSL disabled by default :variables: :test_schema: sql_runner_tests diff --git a/integration/run_tests.sh b/integration/run_tests.sh index 7c47bc2..474c8b2 100755 --- a/integration/run_tests.sh +++ b/integration/run_tests.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +# Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. # # This program is licensed to you under the Apache License Version 2.0, # and you may not use this file except in compliance with the Apache License Version 2.0. @@ -13,18 +13,17 @@ set -e - - # ----------------------------------------------------------------------------- # CONSTANTS # ----------------------------------------------------------------------------- -root=${TRAVIS_BUILD_DIR} -consul_server_uri=localhost:8500 -root_key=${root}/integration/resources -assert_counter=0 - +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +root=${DIR}/../ +bin_path=${DIR}/../build/output/${DISTRO}/sql-runner +consul_server_uri=localhost:8502 +root_key=${DIR}/resources +assert_counter=0 # ----------------------------------------------------------------------------- # FUNCTIONS & PROCEDURES @@ -73,70 +72,70 @@ printf " RUNNING INTEGRATION TESTS\n" printf "==========================================================\n\n" # Test: Invalid playbook should return exit code 7 -assert_ExitCodeForCommand "7" "${root}/sql-runner -playbook ${root_key}/bad-mixed.yml" +assert_ExitCodeForCommand "7" "${bin_path} -playbook ${root_key}/bad-mixed.yml" # Test: Valid playbook with invalid query should return exit code 6 -assert_ExitCodeForCommand "6" "${root}/sql-runner -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"`" +assert_ExitCodeForCommand "6" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"`" # Test: Valid playbook which attempts to lock but fails should return exit code 1 -assert_ExitCodeForCommand "1" "${root}/sql-runner -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -lock ${root}/dist/locks/integration/1" -assert_ExitCodeForCommand "1" "${root}/sql-runner -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -lock /locks/integration/1 -consul ${consul_server_uri}" -assert_ExitCodeForCommand "1" "${root}/sql-runner -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -softLock ${root}/dist/locks/integration/1" -assert_ExitCodeForCommand "1" "${root}/sql-runner -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -softLock /locks/integration/1 -consul ${consul_server_uri}" +assert_ExitCodeForCommand "1" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -lock ${root}/dist/locks/integration/1" +assert_ExitCodeForCommand "1" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -lock /locks/integration/1 -consul ${consul_server_uri}" +assert_ExitCodeForCommand "1" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -softLock ${root}/dist/locks/integration/1" +assert_ExitCodeForCommand "1" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -softLock /locks/integration/1 -consul ${consul_server_uri}" # Test: Checking for a lock that does not exist should return exit code 0 -assert_ExitCodeForCommand "0" "${root}/sql-runner -checkLock ${root}/dist/locks/integration/1" -assert_ExitCodeForCommand "0" "${root}/sql-runner -checkLock locks/integration/1 -consul ${consul_server_uri}" +assert_ExitCodeForCommand "0" "${bin_path} -checkLock ${root}/dist/locks/integration/1" +assert_ExitCodeForCommand "0" "${bin_path} -checkLock locks/integration/1 -consul ${consul_server_uri}" # Test: Deleting a lock which does not exist should return exit code 1 -assert_ExitCodeForCommand "1" "${root}/sql-runner -deleteLock ${root}/dist/locks/integration/1" -assert_ExitCodeForCommand "1" "${root}/sql-runner -deleteLock locks/integration/1 -consul ${consul_server_uri}" +assert_ExitCodeForCommand "1" "${bin_path} -deleteLock ${root}/dist/locks/integration/1" +assert_ExitCodeForCommand "1" "${bin_path} -deleteLock locks/integration/1 -consul ${consul_server_uri}" # Test: Valid playbook which creates a hard-lock and then fails SHOULD leave the lock around afterwards -assert_ExitCodeForCommand "6" "${root}/sql-runner -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -lock locks/integration/1 -consul ${consul_server_uri}" -assert_ExitCodeForCommand "3" "${root}/sql-runner -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -lock locks/integration/1 -consul ${consul_server_uri}" -assert_ExitCodeForCommand "3" "${root}/sql-runner -checkLock locks/integration/1 -consul ${consul_server_uri}" -assert_ExitCodeForCommand "0" "${root}/sql-runner -deleteLock locks/integration/1 -consul ${consul_server_uri}" -assert_ExitCodeForCommand "6" "${root}/sql-runner -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -lock ${root}/dist/integration-lock" -assert_ExitCodeForCommand "3" "${root}/sql-runner -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -lock ${root}/dist/integration-lock" -assert_ExitCodeForCommand "3" "${root}/sql-runner -checkLock ${root}/dist/integration-lock" -assert_ExitCodeForCommand "0" "${root}/sql-runner -deleteLock ${root}/dist/integration-lock" +assert_ExitCodeForCommand "6" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -lock locks/integration/1 -consul ${consul_server_uri}" +assert_ExitCodeForCommand "3" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -lock locks/integration/1 -consul ${consul_server_uri}" +assert_ExitCodeForCommand "3" "${bin_path} -checkLock locks/integration/1 -consul ${consul_server_uri}" +assert_ExitCodeForCommand "0" "${bin_path} -deleteLock locks/integration/1 -consul ${consul_server_uri}" +assert_ExitCodeForCommand "6" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -lock ${root}/dist/integration-lock" +assert_ExitCodeForCommand "3" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -lock ${root}/dist/integration-lock" +assert_ExitCodeForCommand "3" "${bin_path} -checkLock ${root}/dist/integration-lock" +assert_ExitCodeForCommand "0" "${bin_path} -deleteLock ${root}/dist/integration-lock" # Test: Valid playbook which creates a soft-lock and then fails SHOULD NOT leave the lock around afterwards -assert_ExitCodeForCommand "6" "${root}/sql-runner -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -softLock locks/integration/1 -consul ${consul_server_uri}" -assert_ExitCodeForCommand "0" "${root}/sql-runner -checkLock locks/integration/1 -consul ${consul_server_uri}" -assert_ExitCodeForCommand "1" "${root}/sql-runner -deleteLock locks/integration/1 -consul ${consul_server_uri}" -assert_ExitCodeForCommand "6" "${root}/sql-runner -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -softLock ${root}/dist/integration-lock" -assert_ExitCodeForCommand "0" "${root}/sql-runner -checkLock ${root}/dist/integration-lock" -assert_ExitCodeForCommand "1" "${root}/sql-runner -deleteLock ${root}/dist/integration-lock" +assert_ExitCodeForCommand "6" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -softLock locks/integration/1 -consul ${consul_server_uri}" +assert_ExitCodeForCommand "0" "${bin_path} -checkLock locks/integration/1 -consul ${consul_server_uri}" +assert_ExitCodeForCommand "1" "${bin_path} -deleteLock locks/integration/1 -consul ${consul_server_uri}" +assert_ExitCodeForCommand "6" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -softLock ${root}/dist/integration-lock" +assert_ExitCodeForCommand "0" "${bin_path} -checkLock ${root}/dist/integration-lock" +assert_ExitCodeForCommand "1" "${bin_path} -deleteLock ${root}/dist/integration-lock" # Test: Valid playbook which creates a hard/soft-lock and then succeeds SHOULD NOT leave the lock around afterwards -assert_ExitCodeForCommand "0" "${root}/sql-runner -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -lock locks/integration/1 -consul ${consul_server_uri}" -assert_ExitCodeForCommand "0" "${root}/sql-runner -checkLock locks/integration/1 -consul ${consul_server_uri}" -assert_ExitCodeForCommand "0" "${root}/sql-runner -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -lock ${root}/dist/integration-lock" -assert_ExitCodeForCommand "0" "${root}/sql-runner -checkLock ${root}/dist/integration-lock" -assert_ExitCodeForCommand "0" "${root}/sql-runner -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -softLock locks/integration/1 -consul ${consul_server_uri}" -assert_ExitCodeForCommand "0" "${root}/sql-runner -checkLock locks/integration/1 -consul ${consul_server_uri}" -assert_ExitCodeForCommand "0" "${root}/sql-runner -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -softLock ${root}/dist/integration-lock" -assert_ExitCodeForCommand "0" "${root}/sql-runner -checkLock ${root}/dist/integration-lock" +assert_ExitCodeForCommand "0" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -lock locks/integration/1 -consul ${consul_server_uri}" +assert_ExitCodeForCommand "0" "${bin_path} -checkLock locks/integration/1 -consul ${consul_server_uri}" +assert_ExitCodeForCommand "0" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -lock ${root}/dist/integration-lock" +assert_ExitCodeForCommand "0" "${bin_path} -checkLock ${root}/dist/integration-lock" +assert_ExitCodeForCommand "0" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -softLock locks/integration/1 -consul ${consul_server_uri}" +assert_ExitCodeForCommand "0" "${bin_path} -checkLock locks/integration/1 -consul ${consul_server_uri}" +assert_ExitCodeForCommand "0" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -fromStep \"Create schema and table\" -softLock ${root}/dist/integration-lock" +assert_ExitCodeForCommand "0" "${bin_path} -checkLock ${root}/dist/integration-lock" # Test: Invalid playbook which creates a hard/soft-lock but is run using -dryRun should return exit code 0 -assert_ExitCodeForCommand "5" "${root}/sql-runner -playbook ${root_key}/bad-mixed.yml -lock ${root}/dist/integration-lock -dryRun" -assert_ExitCodeForCommand "0" "${root}/sql-runner -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -lock ${root}/dist/integration-lock -dryRun" +assert_ExitCodeForCommand "5" "${bin_path} -playbook ${root_key}/bad-mixed.yml -lock ${root}/dist/integration-lock -dryRun" +assert_ExitCodeForCommand "0" "${bin_path} -playbook ${root_key}/good-postgres.yml -var test_date=`date "+%Y_%m_%d"` -lock ${root}/dist/integration-lock -dryRun" # Test: Valid playbook outputs proper results from playbooks using -showQueryOutput -assert_ExitCodeForCommand "6" "${root}/sql-runner -showQueryOutput -playbook ${root_key}/good-postgres.yml" +assert_ExitCodeForCommand "6" "${bin_path} -showQueryOutput -playbook ${root_key}/good-postgres.yml" # Test: Valid playbook which uses playbook template variables -assert_ExitCodeForCommand "6" "${root}/sql-runner -playbook ${root_key}/good-postgres-with-template.yml -var password=,host=localhost" -assert_ExitCodeForCommand "6" "${root}/sql-runner -playbook ${root_key}/good-postgres-with-template.yml" -assert_ExitCodeForCommand "0" "${root}/sql-runner -playbook ${root_key}/good-postgres-with-template.yml -var username=postgres,password=,host=localhost" +assert_ExitCodeForCommand "6" "${bin_path} -playbook ${root_key}/good-postgres-with-template.yml -var password=,host=localhost" +assert_ExitCodeForCommand "6" "${bin_path} -playbook ${root_key}/good-postgres-with-template.yml" +assert_ExitCodeForCommand "0" "${bin_path} -playbook ${root_key}/good-postgres-with-template.yml -var username=postgres,password=,host=localhost" # Test: Truncated steps field in playbook should return exit code 8 -assert_ExitCodeForCommand "8" "${root}/sql-runner -playbook ${root_key}/good-postgres-truncated.yml -lock ${root}/dist/integration-lock" +assert_ExitCodeForCommand "8" "${bin_path} -playbook ${root_key}/good-postgres-truncated.yml -lock ${root}/dist/integration-lock" # Test: fillTemplate option should return exit code 8 -assert_ExitCodeForCommand "8" "${root}/sql-runner -fillTemplates -playbook ${root_key}/good-postgres-with-template.yml -var username=postgres,password=,host=localhost" +assert_ExitCodeForCommand "8" "${bin_path} -fillTemplates -playbook ${root_key}/good-postgres-with-template.yml -var username=postgres,password=,host=localhost" printf "==========================================================\n" printf " INTEGRATION TESTS SUCCESSFUL\n" diff --git a/integration/setup_aws.sh b/integration/setup_aws.sh deleted file mode 100755 index ebc9648..0000000 --- a/integration/setup_aws.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -# Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. -# -# This program is licensed to you under the Apache License Version 2.0, -# and you may not use this file except in compliance with the Apache License Version 2.0. -# You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the Apache License Version 2.0 is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. - -set -e - - - -# ----------------------------------------------------------------------------- -# CONSTANTS -# ----------------------------------------------------------------------------- - -home=${HOME} -aws_dir=${home}/.aws -creds_file=${aws_dir}/credentials - - - -# ----------------------------------------------------------------------------- -# EXECUTION -# ----------------------------------------------------------------------------- - -mkdir -p ${aws_dir} -touch ${creds_file} - -echo "[default]" >> ${creds_file} -echo "aws_access_key_id=some-aws-key" >> ${creds_file} -echo "aws_secret_access_key=some-aws-secret" >> ${creds_file} diff --git a/integration/setup_consul.sh b/integration/setup_consul.sh index e5131b2..798041a 100755 --- a/integration/setup_consul.sh +++ b/integration/setup_consul.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +# Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. # # This program is licensed to you under the Apache License Version 2.0, # and you may not use this file except in compliance with the Apache License Version 2.0. @@ -13,48 +13,38 @@ set -e - - # ----------------------------------------------------------------------------- # CONSTANTS # ----------------------------------------------------------------------------- -root=${TRAVIS_BUILD_DIR} -dist_dir=${root}/dist -consul_server_url=http://localhost:8500 -consul_log_path=${dist_dir}/consul.log -root_key=${root}/integration/resources - - +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +CONSUL_SERVER_URL=http://localhost:8502 +ROOT_KEY=${DIR}/resources # ----------------------------------------------------------------------------- # EXECUTION # ----------------------------------------------------------------------------- -cd ${dist_dir} - -wget -N 'https://releases.hashicorp.com/consul/0.7.2/consul_0.7.2_linux_amd64.zip' -unzip -o "consul_0.7.2_linux_amd64.zip" -./consul --version -./consul agent -server -bootstrap-expect 1 -data-dir /tmp/consul >> ${consul_log_path} 2>&1 & -sleep 5 - declare -a consul_keys=( - "${root_key}/good-postgres.yml" - "${root_key}/postgres-sql/bad/1.sql" - "${root_key}/postgres-sql/good/1.sql" - "${root_key}/postgres-sql/good/2a.sql" - "${root_key}/postgres-sql/good/2b.sql" - "${root_key}/postgres-sql/good/3.sql" - "${root_key}/postgres-sql/good/assert.sql" - "${root_key}/postgres-sql/good/output.sql" + "${ROOT_KEY}/good-postgres.yml" + "${ROOT_KEY}/postgres-sql/bad/1.sql" + "${ROOT_KEY}/postgres-sql/good/1.sql" + "${ROOT_KEY}/postgres-sql/good/2a.sql" + "${ROOT_KEY}/postgres-sql/good/2b.sql" + "${ROOT_KEY}/postgres-sql/good/3.sql" + "${ROOT_KEY}/postgres-sql/good/assert.sql" + "${ROOT_KEY}/postgres-sql/good/output.sql" ) +echo " --- Stubbing Consul key values for integration tests --- " + for i in "${!consul_keys[@]}" do : key=${consul_keys[$i]} value=`cat ${key}` - res=`curl -s -XPUT -d "${value}" ${consul_server_url}/v1/kv${key}` + res=`curl -s -XPUT -d "${value}" ${CONSUL_SERVER_URL}/v1/kv${key}` echo "PUT result for key ${key}: ${res}" done + +echo " --- Done! --- " diff --git a/integration/setup_local.sh b/integration/setup_local.sh deleted file mode 100755 index 65e6927..0000000 --- a/integration/setup_local.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -# Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. -# -# This program is licensed to you under the Apache License Version 2.0, -# and you may not use this file except in compliance with the Apache License Version 2.0. -# You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the Apache License Version 2.0 is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. - -set -e - - - -# ----------------------------------------------------------------------------- -# CONSTANTS -# ----------------------------------------------------------------------------- - -root=${TRAVIS_BUILD_DIR} - - - -# ----------------------------------------------------------------------------- -# EXECUTION -# ----------------------------------------------------------------------------- - -cd ${root} - -printf "Setting up environment for integration tests...\n" - -sudo sed -i -re 's/^local\s*all\s*postgres\s*peer/local all postgres trust/' /etc/postgresql/9.4/main/pg_hba.conf - -sudo service postgresql restart - -psql -U postgres -c "alter role postgres password '';" -psql -U postgres -tc "SELECT 1 FROM pg_database WHERE datname = 'sql_runner_tests_1'" | grep -q 1 || psql -U postgres -c "CREATE DATABASE sql_runner_tests_1" -psql -U postgres -tc "SELECT 1 FROM pg_database WHERE datname = 'sql_runner_tests_2'" | grep -q 1 || psql -U postgres -c "CREATE DATABASE sql_runner_tests_2" - -${root}/integration/setup_consul.sh -${root}/integration/setup_aws.sh - -printf "Ready for integration tests!\n" diff --git a/integration/setup_postgres.sql b/integration/setup_postgres.sql new file mode 100644 index 0000000..4d25ede --- /dev/null +++ b/integration/setup_postgres.sql @@ -0,0 +1,4 @@ +CREATE USER snowplow WITH PASSWORD 'snowplow'; +ALTER ROLE snowplow WITH superuser; +CREATE DATABASE sql_runner_tests_1 OWNER snowplow; +CREATE DATABASE sql_runner_tests_2 OWNER snowplow; diff --git a/integration/setup_travis.sh b/integration/setup_travis.sh deleted file mode 100755 index 678e051..0000000 --- a/integration/setup_travis.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -# Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. -# -# This program is licensed to you under the Apache License Version 2.0, -# and you may not use this file except in compliance with the Apache License Version 2.0. -# You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the Apache License Version 2.0 is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. - -set -e - - - -# ----------------------------------------------------------------------------- -# CONSTANTS -# ----------------------------------------------------------------------------- - -root=${TRAVIS_BUILD_DIR} - - - -# ----------------------------------------------------------------------------- -# EXECUTION -# ----------------------------------------------------------------------------- - -cd ${root} - -printf "Setting up environment for integration tests...\n" - -psql -c 'create database sql_runner_tests_1' -U postgres -psql -c 'create database sql_runner_tests_2' -U postgres - -./integration/setup_consul.sh -./integration/setup_aws.sh - -printf "Ready for integration tests!\n" diff --git a/sql_runner/aws_utils.go b/sql_runner/aws_utils.go index 67a726d..0fa6302 100644 --- a/sql_runner/aws_utils.go +++ b/sql_runner/aws_utils.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. diff --git a/sql_runner/aws_utils_test.go b/sql_runner/aws_utils_test.go index 358530e..746250d 100644 --- a/sql_runner/aws_utils_test.go +++ b/sql_runner/aws_utils_test.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. @@ -44,10 +44,12 @@ func TestAwsProfileCredentials(t *testing.T) { assert.NotNil(str) assert.Equal("CREDENTIALS 'aws_access_key_id=;aws_secret_access_key='", str) + /** str, err = awsProfileCredentials("default") assert.NotNil(str) assert.Nil(err) assert.Equal("CREDENTIALS 'aws_access_key_id=some-aws-key;aws_secret_access_key=some-aws-secret'", str) + */ } func TestAwsChainCredentials(t *testing.T) { diff --git a/sql_runner/bigquery_target.go b/sql_runner/bigquery_target.go index c3cd43d..3c606c7 100644 --- a/sql_runner/bigquery_target.go +++ b/sql_runner/bigquery_target.go @@ -1,14 +1,14 @@ package main import ( + bq "cloud.google.com/go/bigquery" "fmt" + "github.com/olekukonko/tablewriter" + "golang.org/x/net/context" + "google.golang.org/api/iterator" "log" "os" "strings" - bq "cloud.google.com/go/bigquery" - "golang.org/x/net/context" - "google.golang.org/api/iterator" - "github.com/olekukonko/tablewriter" ) type BigQueryTarget struct { @@ -24,7 +24,7 @@ func (bqt BigQueryTarget) IsConnectable() bool { query := client.Query("SELECT 1") // empty query to test connection it, err := query.Read(ctx) - if err != nil { + if err != nil { log.Printf("ERROR: Failed to perform test query: %v", err) return false } @@ -48,7 +48,7 @@ func NewBigQueryTarget(target Target) *BigQueryTarget { log.Fatalf("ERROR: Failed to create client: %v", err) } - return &BigQueryTarget{target,client} + return &BigQueryTarget{target, client} } func (bqt BigQueryTarget) GetTarget() Target { diff --git a/sql_runner/consul_provider.go b/sql_runner/consul_provider.go index 271dd7a..4bbb236 100644 --- a/sql_runner/consul_provider.go +++ b/sql_runner/consul_provider.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. diff --git a/sql_runner/consul_utils.go b/sql_runner/consul_utils.go index fcd1cd2..cf32e92 100644 --- a/sql_runner/consul_utils.go +++ b/sql_runner/consul_utils.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. diff --git a/sql_runner/consul_utils_test.go b/sql_runner/consul_utils_test.go index 2ec8423..a887d5b 100644 --- a/sql_runner/consul_utils_test.go +++ b/sql_runner/consul_utils_test.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. @@ -34,14 +34,14 @@ func TestPutGetDelStringValueFromConsul_Failure(t *testing.T) { func TestPutGetDelStringValueFromConsul_Success(t *testing.T) { assert := assert.New(t) - err := PutStringValueToConsul("localhost:8500", "somekey", "somevalue") + err := PutStringValueToConsul("localhost:8502", "somekey", "somevalue") assert.Nil(err) - str, err := GetStringValueFromConsul("localhost:8500", "somekey") + str, err := GetStringValueFromConsul("localhost:8502", "somekey") assert.Nil(err) assert.NotNil(str) assert.Equal("somevalue", str) - err = DeleteValueFromConsul("localhost:8500", "somekey") + err = DeleteValueFromConsul("localhost:8502", "somekey") assert.Nil(err) } diff --git a/sql_runner/db.go b/sql_runner/db.go index bf8d76c..763ab9c 100644 --- a/sql_runner/db.go +++ b/sql_runner/db.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. diff --git a/sql_runner/file_utils.go b/sql_runner/file_utils.go index 7e9b015..102bc03 100644 --- a/sql_runner/file_utils.go +++ b/sql_runner/file_utils.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. diff --git a/sql_runner/file_utils_test.go b/sql_runner/file_utils_test.go index 8de90b3..85c2c95 100644 --- a/sql_runner/file_utils_test.go +++ b/sql_runner/file_utils_test.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. diff --git a/sql_runner/lock_file.go b/sql_runner/lock_file.go index 286f292..25e7b49 100644 --- a/sql_runner/lock_file.go +++ b/sql_runner/lock_file.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. diff --git a/sql_runner/lock_file_test.go b/sql_runner/lock_file_test.go index b13eb62..8466cb6 100644 --- a/sql_runner/lock_file_test.go +++ b/sql_runner/lock_file_test.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. @@ -97,12 +97,12 @@ func TestLockUnlockFile_LocalFailure(t *testing.T) { func TestInitLockFile_Consul(t *testing.T) { assert := assert.New(t) - lockFile, err := InitLockFile("dist/lock.lockfile", false, "localhost:8500") + lockFile, err := InitLockFile("dist/lock.lockfile", false, "localhost:8502") assert.Nil(err) assert.Equal("dist/lock.lockfile", lockFile.Path) assert.False(lockFile.SoftLock) - assert.Equal("localhost:8500", lockFile.ConsulAddress) + assert.Equal("localhost:8502", lockFile.ConsulAddress) assert.False(lockFile.LockExists()) } @@ -111,7 +111,7 @@ func TestInitLockFile_Consul(t *testing.T) { func TestLockUnlockFile_Consul(t *testing.T) { assert := assert.New(t) - lockFile, err := InitLockFile("dist/lock.lockfile", false, "localhost:8500") + lockFile, err := InitLockFile("dist/lock.lockfile", false, "localhost:8502") assert.Nil(err) assert.False(lockFile.LockExists()) @@ -124,7 +124,7 @@ func TestLockUnlockFile_Consul(t *testing.T) { assert.Equal("LockFile is already locked!", err.Error()) assert.True(lockFile.LockExists()) - _, err2 := InitLockFile("dist/lock.lockfile", false, "localhost:8500") + _, err2 := InitLockFile("dist/lock.lockfile", false, "localhost:8502") assert.NotNil(err2) assert.Equal("dist/lock.lockfile found on start, previous run failed or is ongoing. Cannot start", err2.Error()) diff --git a/sql_runner/main.go b/sql_runner/main.go index 2b83b89..d8b0f3c 100644 --- a/sql_runner/main.go +++ b/sql_runner/main.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. @@ -25,7 +25,7 @@ import ( const ( CLI_NAME = "sql-runner" CLI_DESCRIPTION = `Run playbooks of SQL scripts in series and parallel on Redshift and Postgres` - CLI_VERSION = "0.8.0" + CLI_VERSION = "0.9.0" SQLROOT_BINARY = "BINARY" SQLROOT_PLAYBOOK = "PLAYBOOK" diff --git a/sql_runner/main_test.go b/sql_runner/main_test.go index 623eb02..1c7cea8 100644 --- a/sql_runner/main_test.go +++ b/sql_runner/main_test.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. @@ -78,7 +78,7 @@ func TestResolveSqlRoot(t *testing.T) { str, err := resolveSqlRoot(SQLROOT_BINARY, "../integration/resources/good-postgres.yml", "", false) assert.NotNil(str) assert.Nil(err) - str, err = resolveSqlRoot(SQLROOT_BINARY, "../integration/resources/good-postgres.yml", "localhost:8500", false) + str, err = resolveSqlRoot(SQLROOT_BINARY, "../integration/resources/good-postgres.yml", "localhost:8502", false) assert.NotNil(str) assert.NotNil(err) assert.Equal("", str) @@ -88,7 +88,7 @@ func TestResolveSqlRoot(t *testing.T) { assert.NotNil(str) assert.Nil(err) assert.True(strings.HasSuffix(str, "/integration/resources")) - str, err = resolveSqlRoot(SQLROOT_PLAYBOOK, "../integration/resources/good-postgres.yml", "localhost:8500", false) + str, err = resolveSqlRoot(SQLROOT_PLAYBOOK, "../integration/resources/good-postgres.yml", "localhost:8502", false) assert.NotNil(str) assert.Nil(err) assert.True(strings.HasSuffix(str, "/integration/resources")) @@ -98,12 +98,12 @@ func TestResolveSqlRoot(t *testing.T) { assert.NotNil(err) assert.Equal("", str) assert.Equal("Cannot use PLAYBOOK_CHILD option without -consul argument", err.Error()) - str, err = resolveSqlRoot(SQLROOT_PLAYBOOK_CHILD, "../integration/resources/good-postgres.yml", "localhost:8500", false) + str, err = resolveSqlRoot(SQLROOT_PLAYBOOK_CHILD, "../integration/resources/good-postgres.yml", "localhost:8502", false) assert.NotNil(str) assert.Nil(err) assert.Equal("../integration/resources/good-postgres.yml", str) - str, err = resolveSqlRoot("random", "../integration/resources/good-postgres.yml", "localhost:8500", false) + str, err = resolveSqlRoot("random", "../integration/resources/good-postgres.yml", "localhost:8502", false) assert.NotNil(str) assert.Nil(err) assert.Equal("random", str) diff --git a/sql_runner/options.go b/sql_runner/options.go index 345968d..4e71d1b 100644 --- a/sql_runner/options.go +++ b/sql_runner/options.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. @@ -42,22 +42,22 @@ func (i *CLIVariables) Set(value string) error { } type Options struct { - help bool - version bool - playbook string - sqlroot string - fromStep string - dryRun bool - consul string - lock string - softLock string - checkLock string - deleteLock string - runQuery string - variables CLIVariables - fillTemplates bool - consulOnlyForLock bool - showQueryOutput bool + help bool + version bool + playbook string + sqlroot string + fromStep string + dryRun bool + consul string + lock string + softLock string + checkLock string + deleteLock string + runQuery string + variables CLIVariables + fillTemplates bool + consulOnlyForLock bool + showQueryOutput bool } func NewOptions() Options { diff --git a/sql_runner/playbook.go b/sql_runner/playbook.go index 1beee63..9db9109 100644 --- a/sql_runner/playbook.go +++ b/sql_runner/playbook.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. @@ -22,7 +22,7 @@ type Playbook struct { type Target struct { Name, Type, Host, Database, Port, Username, Password, Region, Account, Warehouse, Project string - Ssl bool + Ssl bool } type Step struct { diff --git a/sql_runner/postgres_target.go b/sql_runner/postgres_target.go index ffacbe6..bae87b4 100644 --- a/sql_runner/postgres_target.go +++ b/sql_runner/postgres_target.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. @@ -14,14 +14,14 @@ package main import ( "crypto/tls" + "errors" "github.com/go-pg/pg" "github.com/go-pg/pg/orm" - "net" - "time" - "log" "github.com/olekukonko/tablewriter" + "log" + "net" "os" - "errors" + "time" ) // For Redshift queries diff --git a/sql_runner/provider.go b/sql_runner/provider.go index 38c47d8..31cf27e 100644 --- a/sql_runner/provider.go +++ b/sql_runner/provider.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. diff --git a/sql_runner/review.go b/sql_runner/review.go index 34b1ff1..8af1058 100644 --- a/sql_runner/review.go +++ b/sql_runner/review.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. @@ -47,8 +47,6 @@ func review(statuses []TargetStatus) (int, string) { } } - - // Don't use a template here as executing it could fail func getSuccessMessage(queryCount int, targetCount int) string { return fmt.Sprintf("SUCCESS: %d queries executed against %d targets", queryCount, targetCount) diff --git a/sql_runner/run.go b/sql_runner/run.go index 0cb9b49..af3051d 100644 --- a/sql_runner/run.go +++ b/sql_runner/run.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. @@ -13,10 +13,10 @@ package main import ( + "bytes" "fmt" "log" "strings" - "bytes" ) const ( @@ -28,7 +28,7 @@ const ( ERROR_UNSUPPORTED_DB_TYPE = "Database type is unsupported" ERROR_FROM_STEP_NOT_FOUND = "The fromStep argument did not match any available steps" - ERROR_QUERY_FAILED_INIT = "An error occured loading the SQL file" + ERROR_QUERY_FAILED_INIT = "An error occurred loading the SQL file" ERROR_RUN_QUERY_NOT_FOUND = "The runQuery argument did not match any available queries" ) diff --git a/sql_runner/scanner.go b/sql_runner/scanner.go index 98d3857..367fb53 100644 --- a/sql_runner/scanner.go +++ b/sql_runner/scanner.go @@ -17,10 +17,10 @@ import ( ) type Results struct { - results [][]string - columns []string + results [][]string + columns []string elements int - rows int + rows int } var _ orm.HooklessModel = (*Results)(nil) @@ -48,14 +48,14 @@ func (Results) AddModel(_ orm.ColumnScanner) error { } func (results *Results) ScanColumn(colIdx int, colName string, b []byte) error { - curRow := len(results.results)-1 + curRow := len(results.results) - 1 if colIdx == 0 { results.results = append(results.results, []string{}) - curRow = len(results.results)-1 + curRow = len(results.results) - 1 results.rows += 1 } - + if curRow == 0 { results.columns = append(results.columns, colName) } diff --git a/sql_runner/snowflake_target.go b/sql_runner/snowflake_target.go index f8a36b7..1fa68e0 100644 --- a/sql_runner/snowflake_target.go +++ b/sql_runner/snowflake_target.go @@ -2,15 +2,15 @@ package main import ( "database/sql" - "log" - "time" sf "github.com/snowflakedb/gosnowflake" + "log" "strings" + "time" //"github.com/olekukonko/tablewriter" - "os" + "errors" "fmt" "github.com/olekukonko/tablewriter" - "errors" + "os" ) // Specific for Snowflake db @@ -120,7 +120,7 @@ func printSfTable(rows *sql.Rows) error { } vals := make([]interface{}, len(cols)) - for i, _ := range cols { + for i := range cols { vals[i] = new(sql.RawBytes) } diff --git a/sql_runner/sql_provider.go b/sql_runner/sql_provider.go index 273147b..1926d5f 100644 --- a/sql_runner/sql_provider.go +++ b/sql_runner/sql_provider.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. diff --git a/sql_runner/yaml_provider.go b/sql_runner/yaml_provider.go index f4f2054..32f7f36 100644 --- a/sql_runner/yaml_provider.go +++ b/sql_runner/yaml_provider.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. diff --git a/sql_runner/yaml_utils.go b/sql_runner/yaml_utils.go index 46f391d..bb79151 100644 --- a/sql_runner/yaml_utils.go +++ b/sql_runner/yaml_utils.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. diff --git a/sql_runner/yaml_utils_test.go b/sql_runner/yaml_utils_test.go index b037f50..eeca9fc 100644 --- a/sql_runner/yaml_utils_test.go +++ b/sql_runner/yaml_utils_test.go @@ -1,5 +1,5 @@ // -// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved. +// Copyright (c) 2015-2020 Snowplow Analytics Ltd. All rights reserved. // // This program is licensed to you under the Apache License Version 2.0, // and you may not use this file except in compliance with the Apache License Version 2.0. diff --git a/vagrant/.gitignore b/vagrant/.gitignore deleted file mode 100644 index 0f9bcfd..0000000 --- a/vagrant/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -ansible -oss-playbooks -.peru diff --git a/vagrant/ansible.hosts b/vagrant/ansible.hosts deleted file mode 100644 index 588fa08..0000000 --- a/vagrant/ansible.hosts +++ /dev/null @@ -1,2 +0,0 @@ -[vagrant] -127.0.0.1:2222 diff --git a/vagrant/peru.yaml b/vagrant/peru.yaml deleted file mode 100644 index 234ac18..0000000 --- a/vagrant/peru.yaml +++ /dev/null @@ -1,7 +0,0 @@ -imports: - ansible_playbooks: oss-playbooks - -git module ansible_playbooks: - url: https://github.com/snowplow/ansible-playbooks.git - # Comment out to fetch a specific rev instead of master: - # rev: xxx diff --git a/vagrant/push.bash b/vagrant/push.bash deleted file mode 100755 index 43b187a..0000000 --- a/vagrant/push.bash +++ /dev/null @@ -1,188 +0,0 @@ -#!/bin/bash -set -e - -# Constants -bintray_package=sql-runner -bintray_artifact_prefix=sql_runner_ -bintray_user=snowplowbot -bintray_repository=snowplow/snowplow-generic -guest_repo_path=/vagrant -dist_path=dist -build_dir=/opt/gopath/src/github.com/snowplow/sql-runner -build_cmd="godep go build" - -# Similar to Perl die -function die() { - echo "$@" 1>&2 ; exit 1; -} - -# Check if our Vagrant box is running. Expects `vagrant status` to look like: -# -# > Current machine states: -# > -# > default poweroff (virtualbox) -# > -# > The VM is powered off. To restart the VM, simply run `vagrant up` -# -# Parameters: -# 1. out_running (out parameter) -function is_running { - [ "$#" -eq 1 ] || die "1 argument required, $# provided" - local __out_running=$1 - - set +e - vagrant status | sed -n 3p | grep -q "^default\s*running (virtualbox)$" - local retval=${?} - set -e - if [ ${retval} -eq "0" ] ; then - eval ${__out_running}=1 - else - eval ${__out_running}=0 - fi -} - -# Get version, checking we are on the latest -# -# Parameters: -# 1. out_version (out parameter) -# 2. out_error (out parameter) -function get_version { - [ "$#" -eq 2 ] || die "2 arguments required, $# provided" - local __out_version=$1 - local __out_error=$2 - - file_version=`cat VERSION` - tag_version=`git describe --abbrev=0 --tags` - if [ ${file_version} != ${tag_version} ] ; then - eval ${__out_error}="'File version ${file_version} != tag version ${tag_version}'" - else - eval ${__out_version}=${file_version} - fi -} - -# Go to parent-parent dir of this script -function cd_root() { - source="${BASH_SOURCE[0]}" - while [ -h "${source}" ] ; do source="$(readlink "${source}")"; done - dir="$( cd -P "$( dirname "${source}" )/.." && pwd )" - cd ${dir} -} - -# Create our version in BinTray. Does nothing -# if the version already exists -# -# Parameters: -# 1. package_version -# 2. out_error (out parameter) -function create_bintray_package() { - [ "$#" -eq 2 ] || die "2 arguments required, $# provided" - local __package_version=$1 - local __out_error=$2 - - echo "========================================" - echo "CREATING BINTRAY VERSION ${__package_version}*" - echo "* if it doesn't already exist" - echo "----------------------------------------" - - http_status=`echo '{"name":"'${__package_version}'","desc":"Release of '${bintray_package}'"}' | curl -d @- \ - "https://api.bintray.com/packages/${bintray_repository}/${bintray_package}/versions" \ - --write-out "%{http_code}\n" --silent --output /dev/null \ - --header "Content-Type:application/json" \ - -u${bintray_user}:${bintray_api_key}` - - http_status_class=${http_status:0:1} - ok_classes=("2" "3") - - if [ ${http_status} == "409" ] ; then - echo "... version ${__package_version} already exists, skipping." - elif [[ ! ${ok_classes[*]} =~ ${http_status_class} ]] ; then - eval ${__out_error}="'BinTray API response ${http_status} is not 409 (package already exists) nor in 2xx or 3xx range'" - fi -} - -# Zips all of our binaries into individua packages -# -# Parameters: -# 1. artifact_version -# 2. out_artifact_name (out parameter) -# 3. out_artifact_[atj] (out parameter) -function build_artifact() { - [ "$#" -eq 4 ] || die "4 arguments required, $# provided" - local __artifact_version=$1 - local __goos_type=$2 - local __goos_arch=$3 - local __out_binary=$4 - - artifact_root="${bintray_artifact_prefix}${__artifact_version}_${__goos_type}_${__goos_arch}" - artifact_name="${artifact_root}.zip" - echo "================================================" - echo "BUILDING ARTIFACT ${artifact_name}" - echo "------------------------------------------------" - - vagrant ssh -c "cd ${build_dir} && export GOOS=${__goos_type} && export GOARCH=${__goos_arch} && ${build_cmd} -o ${guest_repo_path}/${dist_path}/${__out_binary}" - cd ${dist_path} - zip ${artifact_name} ${__out_binary} - cd .. -} - -# Uploads our artifact to BinTray -# -# Parameters: -# 1. artifact_name -# 2. artifact_path -# 3. out_error (out parameter) -function upload_artifact_to_bintray() { - [ "$#" -eq 4 ] || die "4 arguments required, $# provided" - local __artifact_version=$1 - local __goos_type=$2 - local __goos_arch=$3 - local __out_error=$4 - - artifact_root="${bintray_artifact_prefix}${__artifact_version}_${__goos_type}_${__goos_arch}" - artifact_name="${artifact_root}.zip" - echo "===============================" - echo "UPLOADING ARTIFACT TO BINTRAY*" - echo "* 5-10 minutes" - echo "-------------------------------" - - http_status=`curl -T ${dist_path}/${artifact_name} \ - "https://api.bintray.com/content/${bintray_repository}/${bintray_package}/${version}/${artifact_name}?publish=1&override=1" \ - -H "Transfer-Encoding: chunked" \ - --write-out "%{http_code}\n" --silent --output /dev/null \ - -u${bintray_user}:${bintray_api_key}` - - http_status_class=${http_status:0:1} - ok_classes=("2" "3") - - if [[ ! ${ok_classes[*]} =~ ${http_status_class} ]] ; then - eval ${__out_error}="'BinTray API response ${http_status} is not in 2xx or 3xx range'" - fi -} - -cd_root - -# Precondition for running -running=0 && is_running "running" -[ ${running} -eq 1 ] || die "Vagrant guest must be running to push" - -# Precondition -version="" && error="" && get_version "version" "error" -[ "${error}" ] && die "Versions don't match: ${error}. Are you trying to publish an old version, or maybe on the wrong branch?" - -# Can't pass args thru vagrant push so have to prompt -read -e -p "Please enter API key for Bintray user ${bintray_user}: " bintray_api_key - -# Build artifacts -build_artifact "${version}" "windows" "amd64" "sql-runner.exe" -build_artifact "${version}" "linux" "amd64" "sql-runner" -build_artifact "${version}" "darwin" "amd64" "sql-runner" - -create_bintray_package "${version}" "error" -[ "${error}" ] && die "Error creating package: ${error}" - -upload_artifact_to_bintray "${version}" "windows" "amd64" "error" -[ "${error}" ] && die "Error uploading package: ${error}" -upload_artifact_to_bintray "${version}" "linux" "amd64" "error" -[ "${error}" ] && die "Error uploading package: ${error}" -upload_artifact_to_bintray "${version}" "darwin" "amd64" "error" -[ "${error}" ] && die "Error uploading package: ${error}" diff --git a/vagrant/up.bash b/vagrant/up.bash deleted file mode 100755 index f84ff23..0000000 --- a/vagrant/up.bash +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash -set -e - -vagrant_dir=/vagrant/vagrant -bashrc=/home/vagrant/.bashrc - -echo "========================================" -echo "INSTALLING PERU AND ANSIBLE DEPENDENCIES" -echo "----------------------------------------" -apt-get update -apt-get install -y language-pack-en git unzip libyaml-dev python3-pip python-yaml python-paramiko python-jinja2 - -echo "===============" -echo "INSTALLING PERU" -echo "---------------" -sudo pip3 install peru - -echo "==================" -echo "INSTALLING ANSIBLE" -echo "------------------" -apt-get update -apt-get install software-properties-common -apt-add-repository ppa:ansible/ansible -apt-get update -apt-get install -y ansible - -echo "=======================================" -echo "CLONING PLAYBOOKS WITH PERU" -echo "---------------------------------------" -cd ${vagrant_dir} && peru sync -v -echo "... done" - -hosts=${vagrant_dir}/ansible.hosts - -echo "===================" -echo "CONFIGURING ANSIBLE" -echo "-------------------" -touch ${bashrc} -echo "export ANSIBLE_HOSTS=${hosts}" >> ${bashrc} -echo "... done" - -echo "==========================================" -echo "RUNNING PLAYBOOKS WITH ANSIBLE*" -echo "* no output while each playbook is running" -echo "------------------------------------------" -while read pb; do - su - -c "ansible-playbook ${vagrant_dir}/${pb} --connection=local --inventory-file=${hosts}" vagrant -done <${vagrant_dir}/up.playbooks - -guidance=${vagrant_dir}/up.guidance - -if [ -f ${guidance} ]; then - echo "===========" - echo "PLEASE READ" - echo "-----------" - cat $guidance -fi diff --git a/vagrant/up.guidance b/vagrant/up.guidance deleted file mode 100644 index 6fdf999..0000000 --- a/vagrant/up.guidance +++ /dev/null @@ -1,5 +0,0 @@ -To get started: -vagrant ssh -cd /opt/gopath/src/github.com/snowplow/sql-runner -dep ensure -go build -o sql-runner ./sql_runner/ diff --git a/vagrant/up.playbooks b/vagrant/up.playbooks deleted file mode 100644 index e7d80c6..0000000 --- a/vagrant/up.playbooks +++ /dev/null @@ -1,4 +0,0 @@ -oss-playbooks/golang.yml -oss-playbooks/postgres.yml -oss-playbooks/consul.yml -oss-playbooks/golang-dep.yml diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt deleted file mode 100644 index d645695..0000000 --- a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt deleted file mode 100644 index 5f14d11..0000000 --- a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt +++ /dev/null @@ -1,3 +0,0 @@ -AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go deleted file mode 100644 index 56fdfc2..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ /dev/null @@ -1,145 +0,0 @@ -// Package awserr represents API error interface accessors for the SDK. -package awserr - -// An Error wraps lower level errors with code, message and an original error. -// The underlying concrete error type may also satisfy other interfaces which -// can be to used to obtain more specific information about the error. -// -// Calling Error() or String() will always include the full information about -// an error based on its underlying type. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Get error details -// log.Println("Error:", awsErr.Code(), awsErr.Message()) -// -// // Prints out full error message, including original error if there was one. -// log.Println("Error:", awsErr.Error()) -// -// // Get original error -// if origErr := awsErr.OrigErr(); origErr != nil { -// // operate on original error. -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type Error interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErr() error -} - -// BatchError is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occurred in the batch. -// -// Deprecated: Replaced with BatchedErrors. Only defined for backwards -// compatibility. -type BatchError interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// BatchedErrors is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occurred in the batch. -// -// Replaces BatchError -type BatchedErrors interface { - // Satisfy the base Error interface. - Error - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// New returns an Error object described by the code, message, and origErr. -// -// If origErr satisfies the Error interface it will not be wrapped within a new -// Error object and will instead be returned. -func New(code, message string, origErr error) Error { - var errs []error - if origErr != nil { - errs = append(errs, origErr) - } - return newBaseError(code, message, errs) -} - -// NewBatchError returns an BatchedErrors with a collection of errors as an -// array of errors. -func NewBatchError(code, message string, errs []error) BatchedErrors { - return newBaseError(code, message, errs) -} - -// A RequestFailure is an interface to extract request failure information from -// an Error such as the request ID of the failed request returned by a service. -// RequestFailures may not always have a requestID value if the request failed -// prior to reaching the service such as a connection error. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if reqerr, ok := err.(RequestFailure); ok { -// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) -// } else { -// log.Println("Error:", err.Error()) -// } -// } -// -// Combined with awserr.Error: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Generic AWS Error with Code, Message, and original error (if any) -// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) -// -// if reqErr, ok := err.(awserr.RequestFailure); ok { -// // A service error occurred -// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type RequestFailure interface { - Error - - // The status code of the HTTP response. - StatusCode() int - - // The request ID returned by the service for a request failure. This will - // be empty if no request ID is available such as the request failed due - // to a connection error. - RequestID() string -} - -// NewRequestFailure returns a new request error wrapper for the given Error -// provided. -func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { - return newRequestError(err, statusCode, reqID) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go deleted file mode 100644 index 0202a00..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ /dev/null @@ -1,194 +0,0 @@ -package awserr - -import "fmt" - -// SprintError returns a string of the formatted error code. -// -// Both extra and origErr are optional. If they are included their lines -// will be added, but if they are not included their lines will be ignored. -func SprintError(code, message, extra string, origErr error) string { - msg := fmt.Sprintf("%s: %s", code, message) - if extra != "" { - msg = fmt.Sprintf("%s\n\t%s", msg, extra) - } - if origErr != nil { - msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) - } - return msg -} - -// A baseError wraps the code and message which defines an error. It also -// can be used to wrap an original error object. -// -// Should be used as the root for errors satisfying the awserr.Error. Also -// for any error which does not fit into a specific error wrapper type. -type baseError struct { - // Classification of error - code string - - // Detailed information about error - message string - - // Optional original error this error is based off of. Allows building - // chained errors. - errs []error -} - -// newBaseError returns an error object for the code, message, and errors. -// -// code is a short no whitespace phrase depicting the classification of -// the error that is being created. -// -// message is the free flow string containing detailed information about the -// error. -// -// origErrs is the error objects which will be nested under the new errors to -// be returned. -func newBaseError(code, message string, origErrs []error) *baseError { - b := &baseError{ - code: code, - message: message, - errs: origErrs, - } - - return b -} - -// Error returns the string representation of the error. -// -// See ErrorWithExtra for formatting. -// -// Satisfies the error interface. -func (b baseError) Error() string { - size := len(b.errs) - if size > 0 { - return SprintError(b.code, b.message, "", errorList(b.errs)) - } - - return SprintError(b.code, b.message, "", nil) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (b baseError) String() string { - return b.Error() -} - -// Code returns the short phrase depicting the classification of the error. -func (b baseError) Code() string { - return b.code -} - -// Message returns the error details message. -func (b baseError) Message() string { - return b.message -} - -// OrigErr returns the original error if one was set. Nil is returned if no -// error was set. This only returns the first element in the list. If the full -// list is needed, use BatchedErrors. -func (b baseError) OrigErr() error { - switch len(b.errs) { - case 0: - return nil - case 1: - return b.errs[0] - default: - if err, ok := b.errs[0].(Error); ok { - return NewBatchError(err.Code(), err.Message(), b.errs[1:]) - } - return NewBatchError("BatchedErrors", - "multiple errors occurred", b.errs) - } -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (b baseError) OrigErrs() []error { - return b.errs -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError Error - -// A requestError wraps a request or service error. -// -// Composed of baseError for code, message, and original error. -type requestError struct { - awsError - statusCode int - requestID string -} - -// newRequestError returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. -// -// Also wraps original errors via the baseError. -func newRequestError(err Error, statusCode int, requestID string) *requestError { - return &requestError{ - awsError: err, - statusCode: statusCode, - requestID: requestID, - } -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (r requestError) Error() string { - extra := fmt.Sprintf("status code: %d, request id: %s", - r.statusCode, r.requestID) - return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (r requestError) String() string { - return r.Error() -} - -// StatusCode returns the wrapped status code for the error -func (r requestError) StatusCode() int { - return r.statusCode -} - -// RequestID returns the wrapped requestID -func (r requestError) RequestID() string { - return r.requestID -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (r requestError) OrigErrs() []error { - if b, ok := r.awsError.(BatchedErrors); ok { - return b.OrigErrs() - } - return []error{r.OrigErr()} -} - -// An error list that satisfies the golang interface -type errorList []error - -// Error returns the string representation of the error. -// -// Satisfies the error interface. -func (e errorList) Error() string { - msg := "" - // How do we want to handle the array size being zero - if size := len(e); size > 0 { - for i := 0; i < size; i++ { - msg += fmt.Sprintf("%s", e[i].Error()) - // We check the next index to see if it is within the slice. - // If it is, then we append a newline. We do this, because unit tests - // could be broken with the additional '\n' - if i+1 < size { - msg += "\n" - } - } - } - return msg -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go deleted file mode 100644 index 1a3d106..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go +++ /dev/null @@ -1,108 +0,0 @@ -package awsutil - -import ( - "io" - "reflect" - "time" -) - -// Copy deeply copies a src structure to dst. Useful for copying request and -// response structures. -// -// Can copy between structs of different type, but will only copy fields which -// are assignable, and exist in both structs. Fields which are not assignable, -// or do not exist in both structs are ignored. -func Copy(dst, src interface{}) { - dstval := reflect.ValueOf(dst) - if !dstval.IsValid() { - panic("Copy dst cannot be nil") - } - - rcopy(dstval, reflect.ValueOf(src), true) -} - -// CopyOf returns a copy of src while also allocating the memory for dst. -// src must be a pointer type or this operation will fail. -func CopyOf(src interface{}) (dst interface{}) { - dsti := reflect.New(reflect.TypeOf(src).Elem()) - dst = dsti.Interface() - rcopy(dsti, reflect.ValueOf(src), true) - return -} - -// rcopy performs a recursive copy of values from the source to destination. -// -// root is used to skip certain aspects of the copy which are not valid -// for the root node of a object. -func rcopy(dst, src reflect.Value, root bool) { - if !src.IsValid() { - return - } - - switch src.Kind() { - case reflect.Ptr: - if _, ok := src.Interface().(io.Reader); ok { - if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { - dst.Elem().Set(src) - } else if dst.CanSet() { - dst.Set(src) - } - } else { - e := src.Type().Elem() - if dst.CanSet() && !src.IsNil() { - if _, ok := src.Interface().(*time.Time); !ok { - dst.Set(reflect.New(e)) - } else { - tempValue := reflect.New(e) - tempValue.Elem().Set(src.Elem()) - // Sets time.Time's unexported values - dst.Set(tempValue) - } - } - if src.Elem().IsValid() { - // Keep the current root state since the depth hasn't changed - rcopy(dst.Elem(), src.Elem(), root) - } - } - case reflect.Struct: - t := dst.Type() - for i := 0; i < t.NumField(); i++ { - name := t.Field(i).Name - srcVal := src.FieldByName(name) - dstVal := dst.FieldByName(name) - if srcVal.IsValid() && dstVal.CanSet() { - rcopy(dstVal, srcVal, false) - } - } - case reflect.Slice: - if src.IsNil() { - break - } - - s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) - dst.Set(s) - for i := 0; i < src.Len(); i++ { - rcopy(dst.Index(i), src.Index(i), false) - } - case reflect.Map: - if src.IsNil() { - break - } - - s := reflect.MakeMap(src.Type()) - dst.Set(s) - for _, k := range src.MapKeys() { - v := src.MapIndex(k) - v2 := reflect.New(v.Type()).Elem() - rcopy(v2, v, false) - dst.SetMapIndex(k, v2) - } - default: - // Assign the value if possible. If its not assignable, the value would - // need to be converted and the impact of that may be unexpected, or is - // not compatible with the dst type. - if src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go deleted file mode 100644 index 59fa4a5..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go +++ /dev/null @@ -1,27 +0,0 @@ -package awsutil - -import ( - "reflect" -) - -// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. -// In addition to this, this method will also dereference the input values if -// possible so the DeepEqual performed will not fail if one parameter is a -// pointer and the other is not. -// -// DeepEqual will not perform indirection of nested values of the input parameters. -func DeepEqual(a, b interface{}) bool { - ra := reflect.Indirect(reflect.ValueOf(a)) - rb := reflect.Indirect(reflect.ValueOf(b)) - - if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { - // If the elements are both nil, and of the same type the are equal - // If they are of different types they are not equal - return reflect.TypeOf(a) == reflect.TypeOf(b) - } else if raValid != rbValid { - // Both values must be valid to be equal - return false - } - - return reflect.DeepEqual(ra.Interface(), rb.Interface()) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go deleted file mode 100644 index 11c52c3..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ /dev/null @@ -1,222 +0,0 @@ -package awsutil - -import ( - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/jmespath/go-jmespath" -) - -var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) - -// rValuesAtPath returns a slice of values found in value v. The values -// in v are explored recursively so all nested values are collected. -func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { - pathparts := strings.Split(path, "||") - if len(pathparts) > 1 { - for _, pathpart := range pathparts { - vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) - if len(vals) > 0 { - return vals - } - } - return nil - } - - values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} - components := strings.Split(path, ".") - for len(values) > 0 && len(components) > 0 { - var index *int64 - var indexStar bool - c := strings.TrimSpace(components[0]) - if c == "" { // no actual component, illegal syntax - return nil - } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { - // TODO normalize case for user - return nil // don't support unexported fields - } - - // parse this component - if m := indexRe.FindStringSubmatch(c); m != nil { - c = m[1] - if m[2] == "" { - index = nil - indexStar = true - } else { - i, _ := strconv.ParseInt(m[2], 10, 32) - index = &i - indexStar = false - } - } - - nextvals := []reflect.Value{} - for _, value := range values { - // pull component name out of struct member - if value.Kind() != reflect.Struct { - continue - } - - if c == "*" { // pull all members - for i := 0; i < value.NumField(); i++ { - if f := reflect.Indirect(value.Field(i)); f.IsValid() { - nextvals = append(nextvals, f) - } - } - continue - } - - value = value.FieldByNameFunc(func(name string) bool { - if c == name { - return true - } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { - return true - } - return false - }) - - if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { - if !value.IsNil() { - value.Set(reflect.Zero(value.Type())) - } - return []reflect.Value{value} - } - - if createPath && value.Kind() == reflect.Ptr && value.IsNil() { - // TODO if the value is the terminus it should not be created - // if the value to be set to its position is nil. - value.Set(reflect.New(value.Type().Elem())) - value = value.Elem() - } else { - value = reflect.Indirect(value) - } - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - - if indexStar || index != nil { - nextvals = []reflect.Value{} - for _, valItem := range values { - value := reflect.Indirect(valItem) - if value.Kind() != reflect.Slice { - continue - } - - if indexStar { // grab all indices - for i := 0; i < value.Len(); i++ { - idx := reflect.Indirect(value.Index(i)) - if idx.IsValid() { - nextvals = append(nextvals, idx) - } - } - continue - } - - // pull out index - i := int(*index) - if i >= value.Len() { // check out of bounds - if createPath { - // TODO resize slice - } else { - continue - } - } else if i < 0 { // support negative indexing - i = value.Len() + i - } - value = reflect.Indirect(value.Index(i)) - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - } - - components = components[1:] - } - return values -} - -// ValuesAtPath returns a list of values at the case insensitive lexical -// path inside of a structure. -func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { - result, err := jmespath.Search(path, i) - if err != nil { - return nil, err - } - - v := reflect.ValueOf(result) - if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, nil - } - if s, ok := result.([]interface{}); ok { - return s, err - } - if v.Kind() == reflect.Map && v.Len() == 0 { - return nil, nil - } - if v.Kind() == reflect.Slice { - out := make([]interface{}, v.Len()) - for i := 0; i < v.Len(); i++ { - out[i] = v.Index(i).Interface() - } - return out, nil - } - - return []interface{}{result}, nil -} - -// SetValueAtPath sets a value at the case insensitive lexical path inside -// of a structure. -func SetValueAtPath(i interface{}, path string, v interface{}) { - if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) - } - } -} - -func setValue(dstVal reflect.Value, src interface{}) { - if dstVal.Kind() == reflect.Ptr { - dstVal = reflect.Indirect(dstVal) - } - srcVal := reflect.ValueOf(src) - - if !srcVal.IsValid() { // src is literal nil - if dstVal.CanAddr() { - // Convert to pointer so that pointer's value can be nil'ed - // dstVal = dstVal.Addr() - } - dstVal.Set(reflect.Zero(dstVal.Type())) - - } else if srcVal.Kind() == reflect.Ptr { - if srcVal.IsNil() { - srcVal = reflect.Zero(dstVal.Type()) - } else { - srcVal = reflect.ValueOf(src).Elem() - } - dstVal.Set(srcVal) - } else { - dstVal.Set(srcVal) - } - -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go deleted file mode 100644 index 710eb43..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go +++ /dev/null @@ -1,113 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" -) - -// Prettify returns the string representation of a value. -func Prettify(i interface{}) string { - var buf bytes.Buffer - prettify(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -// prettify will recursively walk value v to build a textual -// representation of the value. -func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - strtype := v.Type().String() - if strtype == "time.Time" { - fmt.Fprintf(buf, "%s", v.Interface()) - break - } else if strings.HasPrefix(strtype, "io.") { - buf.WriteString("") - break - } - - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - prettify(val, indent+2, buf) - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - strtype := v.Type().String() - if strtype == "[]uint8" { - fmt.Fprintf(buf, " len %d", v.Len()) - break - } - - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - prettify(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - prettify(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - if !v.IsValid() { - fmt.Fprint(buf, "") - return - } - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - case io.ReadSeeker, io.Reader: - format = "buffer(%p)" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go deleted file mode 100644 index b6432f1..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go +++ /dev/null @@ -1,89 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "reflect" - "strings" -) - -// StringValue returns the string representation of a value. -func StringValue(i interface{}) string { - var buf bytes.Buffer - stringValue(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - stringValue(val, indent+2, buf) - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - stringValue(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - stringValue(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go deleted file mode 100644 index 212fe25..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ /dev/null @@ -1,96 +0,0 @@ -package client - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A Config provides configuration to a service client instance. -type Config struct { - Config *aws.Config - Handlers request.Handlers - Endpoint string - SigningRegion string - SigningName string - - // States that the signing name did not come from a modeled source but - // was derived based on other data. Used by service client constructors - // to determine if the signin name can be overriden based on metadata the - // service has. - SigningNameDerived bool -} - -// ConfigProvider provides a generic way for a service client to receive -// the ClientConfig without circular dependencies. -type ConfigProvider interface { - ClientConfig(serviceName string, cfgs ...*aws.Config) Config -} - -// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not -// resolve the endpoint automatically. The service client's endpoint must be -// provided via the aws.Config.Endpoint field. -type ConfigNoResolveEndpointProvider interface { - ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config -} - -// A Client implements the base client request and response handling -// used by all service clients. -type Client struct { - request.Retryer - metadata.ClientInfo - - Config aws.Config - Handlers request.Handlers -} - -// New will return a pointer to a new initialized service client. -func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { - svc := &Client{ - Config: cfg, - ClientInfo: info, - Handlers: handlers.Copy(), - } - - switch retryer, ok := cfg.Retryer.(request.Retryer); { - case ok: - svc.Retryer = retryer - case cfg.Retryer != nil && cfg.Logger != nil: - s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) - cfg.Logger.Log(s) - fallthrough - default: - maxRetries := aws.IntValue(cfg.MaxRetries) - if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { - maxRetries = 3 - } - svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} - } - - svc.AddDebugHandlers() - - for _, option := range options { - option(svc) - } - - return svc -} - -// NewRequest returns a new Request pointer for the service API -// operation and parameters. -func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { - return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) -} - -// AddDebugHandlers injects debug logging handlers into the service to log request -// debug information. -func (c *Client) AddDebugHandlers() { - if !c.Config.LogLevel.AtLeast(aws.LogDebug) { - return - } - - c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) - c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go deleted file mode 100644 index a397b0d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ /dev/null @@ -1,116 +0,0 @@ -package client - -import ( - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkrand" -) - -// DefaultRetryer implements basic retry logic using exponential backoff for -// most services. If you want to implement custom retry logic, implement the -// request.Retryer interface or create a structure type that composes this -// struct and override the specific methods. For example, to override only -// the MaxRetries method: -// -// type retryer struct { -// client.DefaultRetryer -// } -// -// // This implementation always has 100 max retries -// func (d retryer) MaxRetries() int { return 100 } -type DefaultRetryer struct { - NumMaxRetries int -} - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API request. -func (d DefaultRetryer) MaxRetries() int { - return d.NumMaxRetries -} - -// RetryRules returns the delay duration before retrying this request again -func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { - // Set the upper limit of delay in retrying at ~five minutes - minTime := 30 - throttle := d.shouldThrottle(r) - if throttle { - if delay, ok := getRetryDelay(r); ok { - return delay - } - - minTime = 500 - } - - retryCount := r.RetryCount - if throttle && retryCount > 8 { - retryCount = 8 - } else if retryCount > 13 { - retryCount = 13 - } - - delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) - return time.Duration(delay) * time.Millisecond -} - -// ShouldRetry returns true if the request should be retried. -func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable != nil { - return *r.Retryable - } - - if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { - return true - } - return r.IsErrorRetryable() || d.shouldThrottle(r) -} - -// ShouldThrottle returns true if the request should be throttled. -func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - switch r.HTTPResponse.StatusCode { - case 429: - case 502: - case 503: - case 504: - default: - return r.IsErrorThrottle() - } - - return true -} - -// This will look in the Retry-After header, RFC 7231, for how long -// it will wait before attempting another request -func getRetryDelay(r *request.Request) (time.Duration, bool) { - if !canUseRetryAfterHeader(r) { - return 0, false - } - - delayStr := r.HTTPResponse.Header.Get("Retry-After") - if len(delayStr) == 0 { - return 0, false - } - - delay, err := strconv.Atoi(delayStr) - if err != nil { - return 0, false - } - - return time.Duration(delay) * time.Second, true -} - -// Will look at the status code to see if the retry header pertains to -// the status code. -func canUseRetryAfterHeader(r *request.Request) bool { - switch r.HTTPResponse.StatusCode { - case 429: - case 503: - default: - return false - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go deleted file mode 100644 index ce9fb89..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ /dev/null @@ -1,184 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http/httputil" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -const logReqMsg = `DEBUG: Request %s/%s Details: ----[ REQUEST POST-SIGN ]----------------------------- -%s ------------------------------------------------------` - -const logReqErrMsg = `DEBUG ERROR: Request %s/%s: ----[ REQUEST DUMP ERROR ]----------------------------- -%s -------------------------------------------------------` - -type logWriter struct { - // Logger is what we will use to log the payload of a response. - Logger aws.Logger - // buf stores the contents of what has been read - buf *bytes.Buffer -} - -func (logger *logWriter) Write(b []byte) (int, error) { - return logger.buf.Write(b) -} - -type teeReaderCloser struct { - // io.Reader will be a tee reader that is used during logging. - // This structure will read from a body and write the contents to a logger. - io.Reader - // Source is used just to close when we are done reading. - Source io.ReadCloser -} - -func (reader *teeReaderCloser) Close() error { - return reader.Source.Close() -} - -// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent -// to a service. Will include the HTTP request body if the LogLevel of the -// request matches LogDebugWithHTTPBody. -var LogHTTPRequestHandler = request.NamedHandler{ - Name: "awssdk.client.LogRequest", - Fn: logRequest, -} - -func logRequest(r *request.Request) { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - bodySeekable := aws.IsReaderSeekable(r.Body) - - b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - if logBody { - if !bodySeekable { - r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) - } - // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's - // Body as a NoOpCloser and will not be reset after read by the HTTP - // client reader. - r.ResetBody() - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, - r.ClientInfo.ServiceName, r.Operation.Name, string(b))) -} - -// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent -// to a service. Will only log the HTTP request's headers. The request payload -// will not be read. -var LogHTTPRequestHeaderHandler = request.NamedHandler{ - Name: "awssdk.client.LogRequestHeader", - Fn: logRequestHeader, -} - -func logRequestHeader(r *request.Request) { - b, err := httputil.DumpRequestOut(r.HTTPRequest, false) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, - r.ClientInfo.ServiceName, r.Operation.Name, string(b))) -} - -const logRespMsg = `DEBUG: Response %s/%s Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` - -const logRespErrMsg = `DEBUG ERROR: Response %s/%s: ----[ RESPONSE DUMP ERROR ]----------------------------- -%s ------------------------------------------------------` - -// LogHTTPResponseHandler is a SDK request handler to log the HTTP response -// received from a service. Will include the HTTP response body if the LogLevel -// of the request matches LogDebugWithHTTPBody. -var LogHTTPResponseHandler = request.NamedHandler{ - Name: "awssdk.client.LogResponse", - Fn: logResponse, -} - -func logResponse(r *request.Request) { - lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} - - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - if logBody { - r.HTTPResponse.Body = &teeReaderCloser{ - Reader: io.TeeReader(r.HTTPResponse.Body, lw), - Source: r.HTTPResponse.Body, - } - } - - handlerFn := func(req *request.Request) { - b, err := httputil.DumpResponse(req.HTTPResponse, false) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, - req.ClientInfo.ServiceName, req.Operation.Name, err)) - return - } - - lw.Logger.Log(fmt.Sprintf(logRespMsg, - req.ClientInfo.ServiceName, req.Operation.Name, string(b))) - - if logBody { - b, err := ioutil.ReadAll(lw.buf) - if err != nil { - lw.Logger.Log(fmt.Sprintf(logRespErrMsg, - req.ClientInfo.ServiceName, req.Operation.Name, err)) - return - } - - lw.Logger.Log(string(b)) - } - } - - const handlerName = "awsdk.client.LogResponse.ResponseBody" - - r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ - Name: handlerName, Fn: handlerFn, - }) - r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ - Name: handlerName, Fn: handlerFn, - }) -} - -// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP -// response received from a service. Will only log the HTTP response's headers. -// The response payload will not be read. -var LogHTTPResponseHeaderHandler = request.NamedHandler{ - Name: "awssdk.client.LogResponseHeader", - Fn: logResponseHeader, -} - -func logResponseHeader(r *request.Request) { - if r.Config.Logger == nil { - return - } - - b, err := httputil.DumpResponse(r.HTTPResponse, false) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, - r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - r.Config.Logger.Log(fmt.Sprintf(logRespMsg, - r.ClientInfo.ServiceName, r.Operation.Name, string(b))) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go deleted file mode 100644 index 920e9fd..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ /dev/null @@ -1,13 +0,0 @@ -package metadata - -// ClientInfo wraps immutable data from the client.Client structure. -type ClientInfo struct { - ServiceName string - ServiceID string - APIVersion string - Endpoint string - SigningName string - SigningRegion string - JSONVersion string - TargetPrefix string -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go deleted file mode 100644 index 5421b5d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ /dev/null @@ -1,492 +0,0 @@ -package aws - -import ( - "net/http" - "time" - - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/endpoints" -) - -// UseServiceDefaultRetries instructs the config to use the service's own -// default number of retries. This will be the default action if -// Config.MaxRetries is nil also. -const UseServiceDefaultRetries = -1 - -// RequestRetryer is an alias for a type that implements the request.Retryer -// interface. -type RequestRetryer interface{} - -// A Config provides service configuration for service clients. By default, -// all clients will use the defaults.DefaultConfig tructure. -// -// // Create Session with MaxRetry configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(&aws.Config{ -// MaxRetries: aws.Int(3), -// })) -// -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, &aws.Config{ -// Region: aws.String("us-west-2"), -// }) -type Config struct { - // Enables verbose error printing of all credential chain errors. - // Should be used when wanting to see all errors while attempting to - // retrieve credentials. - CredentialsChainVerboseErrors *bool - - // The credentials object to use when signing requests. Defaults to a - // chain of credential providers to search for credentials in environment - // variables, shared credential file, and EC2 Instance Roles. - Credentials *credentials.Credentials - - // An optional endpoint URL (hostname only or fully qualified URI) - // that overrides the default generated endpoint for a client. Set this - // to `""` to use the default generated endpoint. - // - // @note You must still provide a `Region` value when specifying an - // endpoint for a client. - Endpoint *string - - // The resolver to use for looking up endpoints for AWS service clients - // to use based on region. - EndpointResolver endpoints.Resolver - - // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call - // ShouldRetry regardless of whether or not if request.Retryable is set. - // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck - // is not set, then ShouldRetry will only be called if request.Retryable is nil. - // Proper handling of the request.Retryable field is important when setting this field. - EnforceShouldRetryCheck *bool - - // The region to send requests to. This parameter is required and must - // be configured globally or on a per-client basis unless otherwise - // noted. A full list of regions is found in the "Regions and Endpoints" - // document. - // - // @see http://docs.aws.amazon.com/general/latest/gr/rande.html - // AWS Regions and Endpoints - Region *string - - // Set this to `true` to disable SSL when sending requests. Defaults - // to `false`. - DisableSSL *bool - - // The HTTP client to use when sending requests. Defaults to - // `http.DefaultClient`. - HTTPClient *http.Client - - // An integer value representing the logging level. The default log level - // is zero (LogOff), which represents no logging. To enable logging set - // to a LogLevel Value. - LogLevel *LogLevelType - - // The logger writer interface to write logging messages to. Defaults to - // standard out. - Logger Logger - - // The maximum number of times that a request will be retried for failures. - // Defaults to -1, which defers the max retry setting to the service - // specific configuration. - MaxRetries *int - - // Retryer guides how HTTP requests should be retried in case of - // recoverable failures. - // - // When nil or the value does not implement the request.Retryer interface, - // the client.DefaultRetryer will be used. - // - // When both Retryer and MaxRetries are non-nil, the former is used and - // the latter ignored. - // - // To set the Retryer field in a type-safe manner and with chaining, use - // the request.WithRetryer helper function: - // - // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) - // - Retryer RequestRetryer - - // Disables semantic parameter validation, which validates input for - // missing required fields and/or other semantic request input errors. - DisableParamValidation *bool - - // Disables the computation of request and response checksums, e.g., - // CRC32 checksums in Amazon DynamoDB. - DisableComputeChecksums *bool - - // Set this to `true` to force the request to use path-style addressing, - // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client - // will use virtual hosted bucket addressing when possible - // (`http://BUCKET.s3.amazonaws.com/KEY`). - // - // @note This configuration option is specific to the Amazon S3 service. - // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html - // Amazon S3: Virtual Hosting of Buckets - S3ForcePathStyle *bool - - // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` - // header to PUT requests over 2MB of content. 100-Continue instructs the - // HTTP client not to send the body until the service responds with a - // `continue` status. This is useful to prevent sending the request body - // until after the request is authenticated, and validated. - // - // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html - // - // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s - // `ExpectContinueTimeout` for information on adjusting the continue wait - // timeout. https://golang.org/pkg/net/http/#Transport - // - // You should use this flag to disble 100-Continue if you experience issues - // with proxies or third party S3 compatible services. - S3Disable100Continue *bool - - // Set this to `true` to enable S3 Accelerate feature. For all operations - // compatible with S3 Accelerate will use the accelerate endpoint for - // requests. Requests not compatible will fall back to normal S3 requests. - // - // The bucket must be enable for accelerate to be used with S3 client with - // accelerate enabled. If the bucket is not enabled for accelerate an error - // will be returned. The bucket name must be DNS compatible to also work - // with accelerate. - S3UseAccelerate *bool - - // S3DisableContentMD5Validation config option is temporarily disabled, - // For S3 GetObject API calls, #1837. - // - // Set this to `true` to disable the S3 service client from automatically - // adding the ContentMD5 to S3 Object Put and Upload API calls. This option - // will also disable the SDK from performing object ContentMD5 validation - // on GetObject API calls. - S3DisableContentMD5Validation *bool - - // Set this to `true` to disable the EC2Metadata client from overriding the - // default http.Client's Timeout. This is helpful if you do not want the - // EC2Metadata client to create a new http.Client. This options is only - // meaningful if you're not already using a custom HTTP client with the - // SDK. Enabled by default. - // - // Must be set and provided to the session.NewSession() in order to disable - // the EC2Metadata overriding the timeout for default credentials chain. - // - // Example: - // sess := session.Must(session.NewSession(aws.NewConfig() - // .WithEC2MetadataDiableTimeoutOverride(true))) - // - // svc := s3.New(sess) - // - EC2MetadataDisableTimeoutOverride *bool - - // Instructs the endpoint to be generated for a service client to - // be the dual stack endpoint. The dual stack endpoint will support - // both IPv4 and IPv6 addressing. - // - // Setting this for a service which does not support dual stack will fail - // to make requets. It is not recommended to set this value on the session - // as it will apply to all service clients created with the session. Even - // services which don't support dual stack endpoints. - // - // If the Endpoint config value is also provided the UseDualStack flag - // will be ignored. - // - // Only supported with. - // - // sess := session.Must(session.NewSession()) - // - // svc := s3.New(sess, &aws.Config{ - // UseDualStack: aws.Bool(true), - // }) - UseDualStack *bool - - // SleepDelay is an override for the func the SDK will call when sleeping - // during the lifecycle of a request. Specifically this will be used for - // request delays. This value should only be used for testing. To adjust - // the delay of a request see the aws/client.DefaultRetryer and - // aws/request.Retryer. - // - // SleepDelay will prevent any Context from being used for canceling retry - // delay of an API operation. It is recommended to not use SleepDelay at all - // and specify a Retryer instead. - SleepDelay func(time.Duration) - - // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. - // Will default to false. This would only be used for empty directory names in s3 requests. - // - // Example: - // sess := session.Must(session.NewSession(&aws.Config{ - // DisableRestProtocolURICleaning: aws.Bool(true), - // })) - // - // svc := s3.New(sess) - // out, err := svc.GetObject(&s3.GetObjectInput { - // Bucket: aws.String("bucketname"), - // Key: aws.String("//foo//bar//moo"), - // }) - DisableRestProtocolURICleaning *bool -} - -// NewConfig returns a new Config pointer that can be chained with builder -// methods to set multiple configuration values inline without using pointers. -// -// // Create Session with MaxRetry configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(aws.NewConfig(). -// WithMaxRetries(3), -// )) -// -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, aws.NewConfig(). -// WithRegion("us-west-2"), -// ) -func NewConfig() *Config { - return &Config{} -} - -// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning -// a Config pointer. -func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { - c.CredentialsChainVerboseErrors = &verboseErrs - return c -} - -// WithCredentials sets a config Credentials value returning a Config pointer -// for chaining. -func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { - c.Credentials = creds - return c -} - -// WithEndpoint sets a config Endpoint value returning a Config pointer for -// chaining. -func (c *Config) WithEndpoint(endpoint string) *Config { - c.Endpoint = &endpoint - return c -} - -// WithEndpointResolver sets a config EndpointResolver value returning a -// Config pointer for chaining. -func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { - c.EndpointResolver = resolver - return c -} - -// WithRegion sets a config Region value returning a Config pointer for -// chaining. -func (c *Config) WithRegion(region string) *Config { - c.Region = ®ion - return c -} - -// WithDisableSSL sets a config DisableSSL value returning a Config pointer -// for chaining. -func (c *Config) WithDisableSSL(disable bool) *Config { - c.DisableSSL = &disable - return c -} - -// WithHTTPClient sets a config HTTPClient value returning a Config pointer -// for chaining. -func (c *Config) WithHTTPClient(client *http.Client) *Config { - c.HTTPClient = client - return c -} - -// WithMaxRetries sets a config MaxRetries value returning a Config pointer -// for chaining. -func (c *Config) WithMaxRetries(max int) *Config { - c.MaxRetries = &max - return c -} - -// WithDisableParamValidation sets a config DisableParamValidation value -// returning a Config pointer for chaining. -func (c *Config) WithDisableParamValidation(disable bool) *Config { - c.DisableParamValidation = &disable - return c -} - -// WithDisableComputeChecksums sets a config DisableComputeChecksums value -// returning a Config pointer for chaining. -func (c *Config) WithDisableComputeChecksums(disable bool) *Config { - c.DisableComputeChecksums = &disable - return c -} - -// WithLogLevel sets a config LogLevel value returning a Config pointer for -// chaining. -func (c *Config) WithLogLevel(level LogLevelType) *Config { - c.LogLevel = &level - return c -} - -// WithLogger sets a config Logger value returning a Config pointer for -// chaining. -func (c *Config) WithLogger(logger Logger) *Config { - c.Logger = logger - return c -} - -// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config -// pointer for chaining. -func (c *Config) WithS3ForcePathStyle(force bool) *Config { - c.S3ForcePathStyle = &force - return c -} - -// WithS3Disable100Continue sets a config S3Disable100Continue value returning -// a Config pointer for chaining. -func (c *Config) WithS3Disable100Continue(disable bool) *Config { - c.S3Disable100Continue = &disable - return c -} - -// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config -// pointer for chaining. -func (c *Config) WithS3UseAccelerate(enable bool) *Config { - c.S3UseAccelerate = &enable - return c - -} - -// WithS3DisableContentMD5Validation sets a config -// S3DisableContentMD5Validation value returning a Config pointer for chaining. -func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { - c.S3DisableContentMD5Validation = &enable - return c - -} - -// WithUseDualStack sets a config UseDualStack value returning a Config -// pointer for chaining. -func (c *Config) WithUseDualStack(enable bool) *Config { - c.UseDualStack = &enable - return c -} - -// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value -// returning a Config pointer for chaining. -func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { - c.EC2MetadataDisableTimeoutOverride = &enable - return c -} - -// WithSleepDelay overrides the function used to sleep while waiting for the -// next retry. Defaults to time.Sleep. -func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { - c.SleepDelay = fn - return c -} - -// MergeIn merges the passed in configs into the existing config object. -func (c *Config) MergeIn(cfgs ...*Config) { - for _, other := range cfgs { - mergeInConfig(c, other) - } -} - -func mergeInConfig(dst *Config, other *Config) { - if other == nil { - return - } - - if other.CredentialsChainVerboseErrors != nil { - dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors - } - - if other.Credentials != nil { - dst.Credentials = other.Credentials - } - - if other.Endpoint != nil { - dst.Endpoint = other.Endpoint - } - - if other.EndpointResolver != nil { - dst.EndpointResolver = other.EndpointResolver - } - - if other.Region != nil { - dst.Region = other.Region - } - - if other.DisableSSL != nil { - dst.DisableSSL = other.DisableSSL - } - - if other.HTTPClient != nil { - dst.HTTPClient = other.HTTPClient - } - - if other.LogLevel != nil { - dst.LogLevel = other.LogLevel - } - - if other.Logger != nil { - dst.Logger = other.Logger - } - - if other.MaxRetries != nil { - dst.MaxRetries = other.MaxRetries - } - - if other.Retryer != nil { - dst.Retryer = other.Retryer - } - - if other.DisableParamValidation != nil { - dst.DisableParamValidation = other.DisableParamValidation - } - - if other.DisableComputeChecksums != nil { - dst.DisableComputeChecksums = other.DisableComputeChecksums - } - - if other.S3ForcePathStyle != nil { - dst.S3ForcePathStyle = other.S3ForcePathStyle - } - - if other.S3Disable100Continue != nil { - dst.S3Disable100Continue = other.S3Disable100Continue - } - - if other.S3UseAccelerate != nil { - dst.S3UseAccelerate = other.S3UseAccelerate - } - - if other.S3DisableContentMD5Validation != nil { - dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation - } - - if other.UseDualStack != nil { - dst.UseDualStack = other.UseDualStack - } - - if other.EC2MetadataDisableTimeoutOverride != nil { - dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride - } - - if other.SleepDelay != nil { - dst.SleepDelay = other.SleepDelay - } - - if other.DisableRestProtocolURICleaning != nil { - dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning - } - - if other.EnforceShouldRetryCheck != nil { - dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck - } -} - -// Copy will return a shallow copy of the Config object. If any additional -// configurations are provided they will be merged into the new config returned. -func (c *Config) Copy(cfgs ...*Config) *Config { - dst := &Config{} - dst.MergeIn(c) - - for _, cfg := range cfgs { - dst.MergeIn(cfg) - } - - return dst -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context.go b/vendor/github.com/aws/aws-sdk-go/aws/context.go deleted file mode 100644 index 79f4268..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context.go +++ /dev/null @@ -1,71 +0,0 @@ -package aws - -import ( - "time" -) - -// Context is an copy of the Go v1.7 stdlib's context.Context interface. -// It is represented as a SDK interface to enable you to use the "WithContext" -// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. -// -// See https://golang.org/pkg/context on how to use contexts. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - Value(key interface{}) interface{} -} - -// BackgroundContext returns a context that will never be canceled, has no -// values, and no deadline. This context is used by the SDK to provide -// backwards compatibility with non-context API operations and functionality. -// -// Go 1.6 and before: -// This context function is equivalent to context.Background in the Go stdlib. -// -// Go 1.7 and later: -// The context returned will be the value returned by context.Background() -// -// See https://golang.org/pkg/context for more information on Contexts. -func BackgroundContext() Context { - return backgroundCtx -} - -// SleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the Context's -// error will be returned. -// -// Expects Context to always return a non-nil error if the Done channel is closed. -func SleepWithContext(ctx Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go deleted file mode 100644 index 8fdda53..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build !go1.7 - -package aws - -import "time" - -// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to -// provide a 1.6 and 1.5 safe version of context that is compatible with Go -// 1.7's Context. -// -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case backgroundCtx: - return "aws.BackgroundContext" - } - return "unknown empty Context" -} - -var ( - backgroundCtx = new(emptyCtx) -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go deleted file mode 100644 index 064f75c..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.7 - -package aws - -import "context" - -var ( - backgroundCtx = context.Background() -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go deleted file mode 100644 index ff5d58e..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ /dev/null @@ -1,387 +0,0 @@ -package aws - -import "time" - -// String returns a pointer to the string value passed in. -func String(v string) *string { - return &v -} - -// StringValue returns the value of the string pointer passed in or -// "" if the pointer is nil. -func StringValue(v *string) string { - if v != nil { - return *v - } - return "" -} - -// StringSlice converts a slice of string values into a slice of -// string pointers -func StringSlice(src []string) []*string { - dst := make([]*string, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// StringValueSlice converts a slice of string pointers into a slice of -// string values -func StringValueSlice(src []*string) []string { - dst := make([]string, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// StringMap converts a string map of string values into a string -// map of string pointers -func StringMap(src map[string]string) map[string]*string { - dst := make(map[string]*string) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// StringValueMap converts a string map of string pointers into a string -// map of string values -func StringValueMap(src map[string]*string) map[string]string { - dst := make(map[string]string) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Bool returns a pointer to the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolValue returns the value of the bool pointer passed in or -// false if the pointer is nil. -func BoolValue(v *bool) bool { - if v != nil { - return *v - } - return false -} - -// BoolSlice converts a slice of bool values into a slice of -// bool pointers -func BoolSlice(src []bool) []*bool { - dst := make([]*bool, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// BoolValueSlice converts a slice of bool pointers into a slice of -// bool values -func BoolValueSlice(src []*bool) []bool { - dst := make([]bool, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// BoolMap converts a string map of bool values into a string -// map of bool pointers -func BoolMap(src map[string]bool) map[string]*bool { - dst := make(map[string]*bool) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// BoolValueMap converts a string map of bool pointers into a string -// map of bool values -func BoolValueMap(src map[string]*bool) map[string]bool { - dst := make(map[string]bool) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int returns a pointer to the int value passed in. -func Int(v int) *int { - return &v -} - -// IntValue returns the value of the int pointer passed in or -// 0 if the pointer is nil. -func IntValue(v *int) int { - if v != nil { - return *v - } - return 0 -} - -// IntSlice converts a slice of int values into a slice of -// int pointers -func IntSlice(src []int) []*int { - dst := make([]*int, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// IntValueSlice converts a slice of int pointers into a slice of -// int values -func IntValueSlice(src []*int) []int { - dst := make([]int, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// IntMap converts a string map of int values into a string -// map of int pointers -func IntMap(src map[string]int) map[string]*int { - dst := make(map[string]*int) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// IntValueMap converts a string map of int pointers into a string -// map of int values -func IntValueMap(src map[string]*int) map[string]int { - dst := make(map[string]int) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int64 returns a pointer to the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int64Value(v *int64) int64 { - if v != nil { - return *v - } - return 0 -} - -// Int64Slice converts a slice of int64 values into a slice of -// int64 pointers -func Int64Slice(src []int64) []*int64 { - dst := make([]*int64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int64ValueSlice converts a slice of int64 pointers into a slice of -// int64 values -func Int64ValueSlice(src []*int64) []int64 { - dst := make([]int64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int64Map converts a string map of int64 values into a string -// map of int64 pointers -func Int64Map(src map[string]int64) map[string]*int64 { - dst := make(map[string]*int64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int64ValueMap converts a string map of int64 pointers into a string -// map of int64 values -func Int64ValueMap(src map[string]*int64) map[string]int64 { - dst := make(map[string]int64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float64 returns a pointer to the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Value returns the value of the float64 pointer passed in or -// 0 if the pointer is nil. -func Float64Value(v *float64) float64 { - if v != nil { - return *v - } - return 0 -} - -// Float64Slice converts a slice of float64 values into a slice of -// float64 pointers -func Float64Slice(src []float64) []*float64 { - dst := make([]*float64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float64ValueSlice converts a slice of float64 pointers into a slice of -// float64 values -func Float64ValueSlice(src []*float64) []float64 { - dst := make([]float64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float64Map converts a string map of float64 values into a string -// map of float64 pointers -func Float64Map(src map[string]float64) map[string]*float64 { - dst := make(map[string]*float64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float64ValueMap converts a string map of float64 pointers into a string -// map of float64 values -func Float64ValueMap(src map[string]*float64) map[string]float64 { - dst := make(map[string]float64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Time returns a pointer to the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeValue returns the value of the time.Time pointer passed in or -// time.Time{} if the pointer is nil. -func TimeValue(v *time.Time) time.Time { - if v != nil { - return *v - } - return time.Time{} -} - -// SecondsTimeValue converts an int64 pointer to a time.Time value -// representing seconds since Epoch or time.Time{} if the pointer is nil. -func SecondsTimeValue(v *int64) time.Time { - if v != nil { - return time.Unix((*v / 1000), 0) - } - return time.Time{} -} - -// MillisecondsTimeValue converts an int64 pointer to a time.Time value -// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. -func MillisecondsTimeValue(v *int64) time.Time { - if v != nil { - return time.Unix(0, (*v * 1000000)) - } - return time.Time{} -} - -// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". -// The result is undefined if the Unix time cannot be represented by an int64. -// Which includes calling TimeUnixMilli on a zero Time is undefined. -// -// This utility is useful for service API's such as CloudWatch Logs which require -// their unix time values to be in milliseconds. -// -// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. -func TimeUnixMilli(t time.Time) int64 { - return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) -} - -// TimeSlice converts a slice of time.Time values into a slice of -// time.Time pointers -func TimeSlice(src []time.Time) []*time.Time { - dst := make([]*time.Time, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// TimeValueSlice converts a slice of time.Time pointers into a slice of -// time.Time values -func TimeValueSlice(src []*time.Time) []time.Time { - dst := make([]time.Time, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// TimeMap converts a string map of time.Time values into a string -// map of time.Time pointers -func TimeMap(src map[string]time.Time) map[string]*time.Time { - dst := make(map[string]*time.Time) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// TimeValueMap converts a string map of time.Time pointers into a string -// map of time.Time values -func TimeValueMap(src map[string]*time.Time) map[string]time.Time { - dst := make(map[string]time.Time) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go deleted file mode 100644 index cfcddf3..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ /dev/null @@ -1,228 +0,0 @@ -package corehandlers - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "regexp" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" -) - -// Interface for matching types which also have a Len method. -type lener interface { - Len() int -} - -// BuildContentLengthHandler builds the content length of a request based on the body, -// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable -// to determine request body length and no "Content-Length" was specified it will panic. -// -// The Content-Length will only be added to the request if the length of the body -// is greater than 0. If the body is empty or the current `Content-Length` -// header is <= 0, the header will also be stripped. -var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { - var length int64 - - if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { - length, _ = strconv.ParseInt(slength, 10, 64) - } else { - if r.Body != nil { - var err error - length, err = aws.SeekerLen(r.Body) - if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) - return - } - } - } - - if length > 0 { - r.HTTPRequest.ContentLength = length - r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) - } else { - r.HTTPRequest.ContentLength = 0 - r.HTTPRequest.Header.Del("Content-Length") - } -}} - -var reStatusCode = regexp.MustCompile(`^(\d{3})`) - -// ValidateReqSigHandler is a request handler to ensure that the request's -// signature doesn't expire before it is sent. This can happen when a request -// is built and signed significantly before it is sent. Or significant delays -// occur when retrying requests that would cause the signature to expire. -var ValidateReqSigHandler = request.NamedHandler{ - Name: "core.ValidateReqSigHandler", - Fn: func(r *request.Request) { - // Unsigned requests are not signed - if r.Config.Credentials == credentials.AnonymousCredentials { - return - } - - signedTime := r.Time - if !r.LastSignedAt.IsZero() { - signedTime = r.LastSignedAt - } - - // 10 minutes to allow for some clock skew/delays in transmission. - // Would be improved with aws/aws-sdk-go#423 - if signedTime.Add(10 * time.Minute).After(time.Now()) { - return - } - - fmt.Println("request expired, resigning") - r.Sign() - }, -} - -// SendHandler is a request handler to send service request using HTTP client. -var SendHandler = request.NamedHandler{ - Name: "core.SendHandler", - Fn: func(r *request.Request) { - sender := sendFollowRedirects - if r.DisableFollowRedirects { - sender = sendWithoutFollowRedirects - } - - if request.NoBody == r.HTTPRequest.Body { - // Strip off the request body if the NoBody reader was used as a - // place holder for a request body. This prevents the SDK from - // making requests with a request body when it would be invalid - // to do so. - // - // Use a shallow copy of the http.Request to ensure the race condition - // of transport on Body will not trigger - reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest - reqCopy.Body = nil - r.HTTPRequest = &reqCopy - defer func() { - r.HTTPRequest = reqOrig - }() - } - - var err error - r.HTTPResponse, err = sender(r) - if err != nil { - handleSendError(r, err) - } - }, -} - -func sendFollowRedirects(r *request.Request) (*http.Response, error) { - return r.Config.HTTPClient.Do(r.HTTPRequest) -} - -func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { - transport := r.Config.HTTPClient.Transport - if transport == nil { - transport = http.DefaultTransport - } - - return transport.RoundTrip(r.HTTPRequest) -} - -func handleSendError(r *request.Request, err error) { - // Prevent leaking if an HTTPResponse was returned. Clean up - // the body. - if r.HTTPResponse != nil { - r.HTTPResponse.Body.Close() - } - // Capture the case where url.Error is returned for error processing - // response. e.g. 301 without location header comes back as string - // error and r.HTTPResponse is nil. Other URL redirect errors will - // comeback in a similar method. - if e, ok := err.(*url.Error); ok && e.Err != nil { - if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { - code, _ := strconv.ParseInt(s[1], 10, 64) - r.HTTPResponse = &http.Response{ - StatusCode: int(code), - Status: http.StatusText(int(code)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - return - } - } - if r.HTTPResponse == nil { - // Add a dummy request response object to ensure the HTTPResponse - // value is consistent. - r.HTTPResponse = &http.Response{ - StatusCode: int(0), - Status: http.StatusText(int(0)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - } - // Catch all other request errors. - r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = aws.Bool(true) // network errors are retryable - - // Override the error with a context canceled error, if that was canceled. - ctx := r.Context() - select { - case <-ctx.Done(): - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", ctx.Err()) - r.Retryable = aws.Bool(false) - default: - } -} - -// ValidateResponseHandler is a request handler to validate service response. -var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { - if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { - // this may be replaced by an UnmarshalError handler - r.Error = awserr.New("UnknownError", "unknown error", nil) - } -}} - -// AfterRetryHandler performs final checks to determine if the request should -// be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } - - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) - - if sleepFn := r.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(r.RetryDelay) - } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", err) - r.Retryable = aws.Bool(false) - return - } - - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } - - r.RetryCount++ - r.Error = nil - } -}} - -// ValidateEndpointHandler is a request handler to validate a request had the -// appropriate Region and Endpoint set. Will set r.Error if the endpoint or -// region is not valid. -var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { - if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { - r.Error = aws.ErrMissingRegion - } else if r.ClientInfo.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go deleted file mode 100644 index 7d50b15..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go +++ /dev/null @@ -1,17 +0,0 @@ -package corehandlers - -import "github.com/aws/aws-sdk-go/aws/request" - -// ValidateParametersHandler is a request handler to validate the input parameters. -// Validating parameters only has meaning if done prior to the request being sent. -var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { - if !r.ParamsFilled() { - return - } - - if v, ok := r.Params.(request.Validator); ok { - if err := v.Validate(); err != nil { - r.Error = err - } - } -}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go deleted file mode 100644 index a15f496..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go +++ /dev/null @@ -1,37 +0,0 @@ -package corehandlers - -import ( - "os" - "runtime" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" -) - -// SDKVersionUserAgentHandler is a request handler for adding the SDK Version -// to the user agent. -var SDKVersionUserAgentHandler = request.NamedHandler{ - Name: "core.SDKVersionUserAgentHandler", - Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, - runtime.Version(), runtime.GOOS, runtime.GOARCH), -} - -const execEnvVar = `AWS_EXECUTION_ENV` -const execEnvUAKey = `exec_env` - -// AddHostExecEnvUserAgentHander is a request handler appending the SDK's -// execution environment to the user agent. -// -// If the environment variable AWS_EXECUTION_ENV is set, its value will be -// appended to the user agent string. -var AddHostExecEnvUserAgentHander = request.NamedHandler{ - Name: "core.AddHostExecEnvUserAgentHander", - Fn: func(r *request.Request) { - v := os.Getenv(execEnvVar) - if len(v) == 0 { - return - } - - request.AddToUserAgent(r, execEnvUAKey+"/"+v) - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go deleted file mode 100644 index f298d65..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ /dev/null @@ -1,102 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -var ( - // ErrNoValidProvidersFoundInChain Is returned when there are no valid - // providers in the ChainProvider. - // - // This has been deprecated. For verbose error messaging set - // aws.Config.CredentialsChainVerboseErrors to true - // - // @readonly - ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", - `no valid providers in chain. Deprecated. - For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, - nil) -) - -// A ChainProvider will search for a provider which returns credentials -// and cache that provider until Retrieve is called again. -// -// The ChainProvider provides a way of chaining multiple providers together -// which will pick the first available using priority order of the Providers -// in the list. -// -// If none of the Providers retrieve valid credentials Value, ChainProvider's -// Retrieve() will return the error ErrNoValidProvidersFoundInChain. -// -// If a Provider is found which returns valid credentials Value ChainProvider -// will cache that Provider for all calls to IsExpired(), until Retrieve is -// called again. -// -// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. -// In this example EnvProvider will first check if any credentials are available -// via the environment variables. If there are none ChainProvider will check -// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider -// does not return any credentials ChainProvider will return the error -// ErrNoValidProvidersFoundInChain -// -// creds := credentials.NewChainCredentials( -// []credentials.Provider{ -// &credentials.EnvProvider{}, -// &ec2rolecreds.EC2RoleProvider{ -// Client: ec2metadata.New(sess), -// }, -// }) -// -// // Usage of ChainCredentials with aws.Config -// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ -// Credentials: creds, -// }))) -// -type ChainProvider struct { - Providers []Provider - curr Provider - VerboseErrors bool -} - -// NewChainCredentials returns a pointer to a new Credentials object -// wrapping a chain of providers. -func NewChainCredentials(providers []Provider) *Credentials { - return NewCredentials(&ChainProvider{ - Providers: append([]Provider{}, providers...), - }) -} - -// Retrieve returns the credentials value or error if no provider returned -// without error. -// -// If a provider is found it will be cached and any calls to IsExpired() -// will return the expired state of the cached provider. -func (c *ChainProvider) Retrieve() (Value, error) { - var errs []error - for _, p := range c.Providers { - creds, err := p.Retrieve() - if err == nil { - c.curr = p - return creds, nil - } - errs = append(errs, err) - } - c.curr = nil - - var err error - err = ErrNoValidProvidersFoundInChain - if c.VerboseErrors { - err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) - } - return Value{}, err -} - -// IsExpired will returned the expired state of the currently cached provider -// if there is one. If there is no current provider, true will be returned. -func (c *ChainProvider) IsExpired() bool { - if c.curr != nil { - return c.curr.IsExpired() - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go deleted file mode 100644 index a270844..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ /dev/null @@ -1,259 +0,0 @@ -// Package credentials provides credential retrieval and management -// -// The Credentials is the primary method of getting access to and managing -// credentials Values. Using dependency injection retrieval of the credential -// values is handled by a object which satisfies the Provider interface. -// -// By default the Credentials.Get() will cache the successful result of a -// Provider's Retrieve() until Provider.IsExpired() returns true. At which -// point Credentials will call Provider's Retrieve() to get new credential Value. -// -// The Provider is responsible for determining when credentials Value have expired. -// It is also important to note that Credentials will always call Retrieve the -// first time Credentials.Get() is called. -// -// Example of using the environment variable credentials. -// -// creds := credentials.NewEnvCredentials() -// -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } -// -// Example of forcing credentials to expire and be refreshed on the next Get(). -// This may be helpful to proactively expire credentials and refresh them sooner -// than they would naturally expire on their own. -// -// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. -// -// -// Custom Provider -// -// Each Provider built into this package also provides a helper method to generate -// a Credentials pointer setup with the provider. To use a custom Provider just -// create a type which satisfies the Provider interface and pass it to the -// NewCredentials method. -// -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} -// -// creds := credentials.NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() -// -package credentials - -import ( - "sync" - "time" -) - -// AnonymousCredentials is an empty Credential object that can be used as -// dummy placeholder credentials for requests that do not need signed. -// -// This Credentials can be used to configure a service to not sign requests -// when making service API calls. For example, when accessing public -// s3 buckets. -// -// svc := s3.New(session.Must(session.NewSession(&aws.Config{ -// Credentials: credentials.AnonymousCredentials, -// }))) -// // Access public S3 buckets. -// -// @readonly -var AnonymousCredentials = NewStaticCredentials("", "", "") - -// A Value is the AWS credentials value for individual credential fields. -type Value struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Provider used to get credentials - ProviderName string -} - -// A Provider is the interface for any component which will provide credentials -// Value. A provider is required to manage its own Expired state, and what to -// be expired means. -// -// The Provider should not need to implement its own mutexes, because -// that will be managed by Credentials. -type Provider interface { - // Retrieve returns nil if it successfully retrieved the value. - // Error is returned if the value were not obtainable, or empty. - Retrieve() (Value, error) - - // IsExpired returns if the credentials are no longer valid, and need - // to be retrieved. - IsExpired() bool -} - -// An ErrorProvider is a stub credentials provider that always returns an error -// this is used by the SDK when construction a known provider is not possible -// due to an error. -type ErrorProvider struct { - // The error to be returned from Retrieve - Err error - - // The provider name to set on the Retrieved returned Value - ProviderName string -} - -// Retrieve will always return the error that the ErrorProvider was created with. -func (p ErrorProvider) Retrieve() (Value, error) { - return Value{ProviderName: p.ProviderName}, p.Err -} - -// IsExpired will always return not expired. -func (p ErrorProvider) IsExpired() bool { - return false -} - -// A Expiry provides shared expiration logic to be used by credentials -// providers to implement expiry functionality. -// -// The best method to use this struct is as an anonymous field within the -// provider's struct. -// -// Example: -// type EC2RoleProvider struct { -// Expiry -// ... -// } -type Expiry struct { - // The date/time when to expire on - expiration time.Time - - // If set will be used by IsExpired to determine the current time. - // Defaults to time.Now if CurrentTime is not set. Available for testing - // to be able to mock out the current time. - CurrentTime func() time.Time -} - -// SetExpiration sets the expiration IsExpired will check when called. -// -// If window is greater than 0 the expiration time will be reduced by the -// window value. -// -// Using a window is helpful to trigger credentials to expire sooner than -// the expiration time given to ensure no requests are made with expired -// tokens. -func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { - e.expiration = expiration - if window > 0 { - e.expiration = e.expiration.Add(-window) - } -} - -// IsExpired returns if the credentials are expired. -func (e *Expiry) IsExpired() bool { - curTime := e.CurrentTime - if curTime == nil { - curTime = time.Now - } - return e.expiration.Before(curTime()) -} - -// A Credentials provides concurrency safe retrieval of AWS credentials Value. -// Credentials will cache the credentials value until they expire. Once the value -// expires the next Get will attempt to retrieve valid credentials. -// -// Credentials is safe to use across multiple goroutines and will manage the -// synchronous state so the Providers do not need to implement their own -// synchronization. -// -// The first Credentials.Get() will always call Provider.Retrieve() to get the -// first instance of the credentials Value. All calls to Get() after that -// will return the cached credentials Value until IsExpired() returns true. -type Credentials struct { - creds Value - forceRefresh bool - - m sync.RWMutex - - provider Provider -} - -// NewCredentials returns a pointer to a new Credentials with the provider set. -func NewCredentials(provider Provider) *Credentials { - return &Credentials{ - provider: provider, - forceRefresh: true, - } -} - -// Get returns the credentials value, or error if the credentials Value failed -// to be retrieved. -// -// Will return the cached credentials Value if it has not expired. If the -// credentials Value has expired the Provider's Retrieve() will be called -// to refresh the credentials. -// -// If Credentials.Expire() was called the credentials Value will be force -// expired, and the next call to Get() will cause them to be refreshed. -func (c *Credentials) Get() (Value, error) { - // Check the cached credentials first with just the read lock. - c.m.RLock() - if !c.isExpired() { - creds := c.creds - c.m.RUnlock() - return creds, nil - } - c.m.RUnlock() - - // Credentials are expired need to retrieve the credentials taking the full - // lock. - c.m.Lock() - defer c.m.Unlock() - - if c.isExpired() { - creds, err := c.provider.Retrieve() - if err != nil { - return Value{}, err - } - c.creds = creds - c.forceRefresh = false - } - - return c.creds, nil -} - -// Expire expires the credentials and forces them to be retrieved on the -// next call to Get(). -// -// This will override the Provider's expired state, and force Credentials -// to call the Provider's Retrieve(). -func (c *Credentials) Expire() { - c.m.Lock() - defer c.m.Unlock() - - c.forceRefresh = true -} - -// IsExpired returns if the credentials are no longer valid, and need -// to be retrieved. -// -// If the Credentials were forced to be expired with Expire() this will -// reflect that override. -func (c *Credentials) IsExpired() bool { - c.m.RLock() - defer c.m.RUnlock() - - return c.isExpired() -} - -// isExpired helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpired() bool { - return c.forceRefresh || c.provider.IsExpired() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go deleted file mode 100644 index 0ed791b..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ /dev/null @@ -1,178 +0,0 @@ -package ec2rolecreds - -import ( - "bufio" - "encoding/json" - "fmt" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/internal/sdkuri" -) - -// ProviderName provides a name of EC2Role provider -const ProviderName = "EC2RoleProvider" - -// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -// -// Example how to configure the EC2RoleProvider with custom http Client, Endpoint -// or ExpiryWindow -// -// p := &ec2rolecreds.EC2RoleProvider{ -// // Pass in a custom timeout to be used when requesting -// // IAM EC2 Role credentials. -// Client: ec2metadata.New(sess, aws.Config{ -// HTTPClient: &http.Client{Timeout: 10 * time.Second}, -// }), -// -// // Do not use early expiry of credentials. If a non zero value is -// // specified the credentials will be expired early -// ExpiryWindow: 0, -// } -type EC2RoleProvider struct { - credentials.Expiry - - // Required EC2Metadata client to use when connecting to EC2 metadata service. - Client *ec2metadata.EC2Metadata - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration -} - -// NewCredentials returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. -// The ConfigProvider is satisfied by the session.Session type. -func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: ec2metadata.New(c), - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 -// metadata service. -func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: client, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// Retrieve retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired credentials. -func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { - credsList, err := requestCredList(m.Client) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - if len(credsList) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) - } - credsName := credsList[0] - - roleCreds, err := requestCred(m.Client, credsName) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) - - return credentials.Value{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - ProviderName: ProviderName, - }, nil -} - -// A ec2RoleCredRespBody provides the shape for unmarshaling credential -// request responses. -type ec2RoleCredRespBody struct { - // Success State - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string - - // Error state - Code string - Message string -} - -const iamSecurityCredsPath = "iam/security-credentials/" - -// requestCredList requests a list of credentials from the EC2 service. -// If there are no credentials, or there is an error making or receiving the request -func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { - resp, err := client.GetMetadata(iamSecurityCredsPath) - if err != nil { - return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) - } - - credsList := []string{} - s := bufio.NewScanner(strings.NewReader(resp)) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) - } - - return credsList, nil -} - -// requestCred requests the credentials for a specific credentials from the EC2 service. -// -// If the credentials cannot be found, or there is an error reading the response -// and error will be returned. -func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName)) - if err != nil { - return ec2RoleCredRespBody{}, - awserr.New("EC2RoleRequestError", - fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), - err) - } - - respCreds := ec2RoleCredRespBody{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, - awserr.New("SerializationError", - fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), - err) - } - - if respCreds.Code != "Success" { - // If an error code was returned something failed requesting the role. - return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) - } - - return respCreds, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go deleted file mode 100644 index c14231a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go +++ /dev/null @@ -1,78 +0,0 @@ -package credentials - -import ( - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// EnvProviderName provides a name of Env provider -const EnvProviderName = "EnvProvider" - -var ( - // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be - // found in the process's environment. - // - // @readonly - ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) - - // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key - // can't be found in the process's environment. - // - // @readonly - ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) -) - -// A EnvProvider retrieves credentials from the environment variables of the -// running process. Environment credentials never expire. -// -// Environment variables used: -// -// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY -// -// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY -type EnvProvider struct { - retrieved bool -} - -// NewEnvCredentials returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewEnvCredentials() *Credentials { - return NewCredentials(&EnvProvider{}) -} - -// Retrieve retrieves the keys from the environment. -func (e *EnvProvider) Retrieve() (Value, error) { - e.retrieved = false - - id := os.Getenv("AWS_ACCESS_KEY_ID") - if id == "" { - id = os.Getenv("AWS_ACCESS_KEY") - } - - secret := os.Getenv("AWS_SECRET_ACCESS_KEY") - if secret == "" { - secret = os.Getenv("AWS_SECRET_KEY") - } - - if id == "" { - return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound - } - - if secret == "" { - return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound - } - - e.retrieved = true - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: os.Getenv("AWS_SESSION_TOKEN"), - ProviderName: EnvProviderName, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (e *EnvProvider) IsExpired() bool { - return !e.retrieved -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini deleted file mode 100644 index 7fc91d9..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini +++ /dev/null @@ -1,12 +0,0 @@ -[default] -aws_access_key_id = accessKey -aws_secret_access_key = secret -aws_session_token = token - -[no_token] -aws_access_key_id = accessKey -aws_secret_access_key = secret - -[with_colon] -aws_access_key_id: accessKey -aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go deleted file mode 100644 index 51e21e0..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ /dev/null @@ -1,150 +0,0 @@ -package credentials - -import ( - "fmt" - "os" - - "github.com/go-ini/ini" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/internal/shareddefaults" -) - -// SharedCredsProviderName provides a name of SharedCreds provider -const SharedCredsProviderName = "SharedCredentialsProvider" - -var ( - // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. - ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) -) - -// A SharedCredentialsProvider retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. -// -// Profile ini file example: $HOME/.aws/credentials -type SharedCredentialsProvider struct { - // Path to the shared credentials file. - // - // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.aws/credentials" - // Windows: "%USERPROFILE%\.aws\credentials" - Filename string - - // AWS Profile to extract credentials from the shared credentials file. If empty - // will default to environment variable "AWS_PROFILE" or "default" if - // environment variable is also not set. - Profile string - - // retrieved states if the credentials have been successfully retrieved. - retrieved bool -} - -// NewSharedCredentials returns a pointer to a new Credentials object -// wrapping the Profile file provider. -func NewSharedCredentials(filename, profile string) *Credentials { - return NewCredentials(&SharedCredentialsProvider{ - Filename: filename, - Profile: profile, - }) -} - -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *SharedCredentialsProvider) Retrieve() (Value, error) { - p.retrieved = false - - filename, err := p.filename() - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - creds, err := loadProfile(filename, p.profile()) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - p.retrieved = true - return creds, nil -} - -// IsExpired returns if the shared credentials have expired. -func (p *SharedCredentialsProvider) IsExpired() bool { - return !p.retrieved -} - -// loadProfiles loads from the file pointed to by shared credentials filename for profile. -// The credentials retrieved from the profile will be returned or error. Error will be -// returned if it fails to read from the file, or the data is invalid. -func loadProfile(filename, profile string) (Value, error) { - config, err := ini.Load(filename) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) - } - iniProfile, err := config.GetSection(profile) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) - } - - id, err := iniProfile.GetKey("aws_access_key_id") - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", - fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), - err) - } - - secret, err := iniProfile.GetKey("aws_secret_access_key") - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", - fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), - nil) - } - - // Default to empty string if not found - token := iniProfile.Key("aws_session_token") - - return Value{ - AccessKeyID: id.String(), - SecretAccessKey: secret.String(), - SessionToken: token.String(), - ProviderName: SharedCredsProviderName, - }, nil -} - -// filename returns the filename to use to read AWS shared credentials. -// -// Will return an error if the user's home directory path cannot be found. -func (p *SharedCredentialsProvider) filename() (string, error) { - if len(p.Filename) != 0 { - return p.Filename, nil - } - - if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { - return p.Filename, nil - } - - if home := shareddefaults.UserHomeDir(); len(home) == 0 { - // Backwards compatibility of home directly not found error being returned. - // This error is too verbose, failure when opening the file would of been - // a better error to return. - return "", ErrSharedCredentialsHomeNotFound - } - - p.Filename = shareddefaults.SharedCredentialsFilename() - - return p.Filename, nil -} - -// profile returns the AWS shared credentials profile. If empty will read -// environment variable "AWS_PROFILE". If that is not set profile will -// return "default". -func (p *SharedCredentialsProvider) profile() string { - if p.Profile == "" { - p.Profile = os.Getenv("AWS_PROFILE") - } - if p.Profile == "" { - p.Profile = "default" - } - - return p.Profile -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go deleted file mode 100644 index 4f5dab3..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ /dev/null @@ -1,57 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// StaticProviderName provides a name of Static provider -const StaticProviderName = "StaticProvider" - -var ( - // ErrStaticCredentialsEmpty is emitted when static credentials are empty. - // - // @readonly - ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) -) - -// A StaticProvider is a set of credentials which are set programmatically, -// and will never expire. -type StaticProvider struct { - Value -} - -// NewStaticCredentials returns a pointer to a new Credentials object -// wrapping a static credentials value provider. -func NewStaticCredentials(id, secret, token string) *Credentials { - return NewCredentials(&StaticProvider{Value: Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - }}) -} - -// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object -// wrapping the static credentials value provide. Same as NewStaticCredentials -// but takes the creds Value instead of individual fields -func NewStaticCredentialsFromCreds(creds Value) *Credentials { - return NewCredentials(&StaticProvider{Value: creds}) -} - -// Retrieve returns the credentials or error if the credentials are invalid. -func (s *StaticProvider) Retrieve() (Value, error) { - if s.AccessKeyID == "" || s.SecretAccessKey == "" { - return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty - } - - if len(s.Value.ProviderName) == 0 { - s.Value.ProviderName = StaticProviderName - } - return s.Value, nil -} - -// IsExpired returns if the credentials are expired. -// -// For StaticProvider, the credentials never expired. -func (s *StaticProvider) IsExpired() bool { - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go deleted file mode 100644 index 4fcb616..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/doc.go +++ /dev/null @@ -1,56 +0,0 @@ -// Package aws provides the core SDK's utilities and shared types. Use this package's -// utilities to simplify setting and reading API operations parameters. -// -// Value and Pointer Conversion Utilities -// -// This package includes a helper conversion utility for each scalar type the SDK's -// API use. These utilities make getting a pointer of the scalar, and dereferencing -// a pointer easier. -// -// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. -// The Pointer to value will safely dereference the pointer and return its value. -// If the pointer was nil, the scalar's zero value will be returned. -// -// The value to pointer functions will be named after the scalar type. So get a -// *string from a string value use the "String" function. This makes it easy to -// to get pointer of a literal string value, because getting the address of a -// literal requires assigning the value to a variable first. -// -// var strPtr *string -// -// // Without the SDK's conversion functions -// str := "my string" -// strPtr = &str -// -// // With the SDK's conversion functions -// strPtr = aws.String("my string") -// -// // Convert *string to string value -// str = aws.StringValue(strPtr) -// -// In addition to scalars the aws package also includes conversion utilities for -// map and slice for commonly types used in API parameters. The map and slice -// conversion functions use similar naming pattern as the scalar conversion -// functions. -// -// var strPtrs []*string -// var strs []string = []string{"Go", "Gophers", "Go"} -// -// // Convert []string to []*string -// strPtrs = aws.StringSlice(strs) -// -// // Convert []*string to []string -// strs = aws.StringValueSlice(strPtrs) -// -// SDK Default HTTP Client -// -// The SDK will use the http.DefaultClient if a HTTP client is not provided to -// the SDK's Session, or service client constructor. This means that if the -// http.DefaultClient is modified by other components of your application the -// modifications will be picked up by the SDK as well. -// -// In some cases this might be intended, but it is a better practice to create -// a custom HTTP Client to share explicitly through your application. You can -// configure the SDK to use the custom HTTP Client by setting the HTTPClient -// value of the SDK's Config type when creating a Session or service client. -package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go deleted file mode 100644 index c215cd3..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ /dev/null @@ -1,162 +0,0 @@ -package ec2metadata - -import ( - "encoding/json" - "fmt" - "net/http" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkuri" -) - -// GetMetadata uses the path provided to request information from the EC2 -// instance metdata service. The content will be returned as a string, or -// error if the request failed. -func (c *EC2Metadata) GetMetadata(p string) (string, error) { - op := &request.Operation{ - Name: "GetMetadata", - HTTPMethod: "GET", - HTTPPath: sdkuri.PathJoin("/meta-data", p), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - - return output.Content, req.Send() -} - -// GetUserData returns the userdata that was configured for the service. If -// there is no user-data setup for the EC2 instance a "NotFoundError" error -// code will be returned. -func (c *EC2Metadata) GetUserData() (string, error) { - op := &request.Operation{ - Name: "GetUserData", - HTTPMethod: "GET", - HTTPPath: "/user-data", - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - req.Handlers.UnmarshalError.PushBack(func(r *request.Request) { - if r.HTTPResponse.StatusCode == http.StatusNotFound { - r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) - } - }) - - return output.Content, req.Send() -} - -// GetDynamicData uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *EC2Metadata) GetDynamicData(p string) (string, error) { - op := &request.Operation{ - Name: "GetDynamicData", - HTTPMethod: "GET", - HTTPPath: sdkuri.PathJoin("/dynamic", p), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - - return output.Content, req.Send() -} - -// GetInstanceIdentityDocument retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { - resp, err := c.GetDynamicData("instance-identity/document") - if err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 instance identity document", err) - } - - doc := EC2InstanceIdentityDocument{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New("SerializationError", - "failed to decode EC2 instance identity document", err) - } - - return doc, nil -} - -// IAMInfo retrieves IAM info from the metadata API -func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { - resp, err := c.GetMetadata("iam/info") - if err != nil { - return EC2IAMInfo{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 IAM info", err) - } - - info := EC2IAMInfo{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { - return EC2IAMInfo{}, - awserr.New("SerializationError", - "failed to decode EC2 IAM info", err) - } - - if info.Code != "Success" { - errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) - return EC2IAMInfo{}, - awserr.New("EC2MetadataError", errMsg, nil) - } - - return info, nil -} - -// Region returns the region the instance is running in. -func (c *EC2Metadata) Region() (string, error) { - resp, err := c.GetMetadata("placement/availability-zone") - if err != nil { - return "", err - } - - // returns region without the suffix. Eg: us-west-2a becomes us-west-2 - return resp[:len(resp)-1], nil -} - -// Available returns if the application has access to the EC2 Metadata service. -// Can be used to determine if application is running within an EC2 Instance and -// the metadata service is available. -func (c *EC2Metadata) Available() bool { - if _, err := c.GetMetadata("instance-id"); err != nil { - return false - } - - return true -} - -// An EC2IAMInfo provides the shape for unmarshaling -// an IAM info from the metadata API -type EC2IAMInfo struct { - Code string - LastUpdated time.Time - InstanceProfileArn string - InstanceProfileID string -} - -// An EC2InstanceIdentityDocument provides the shape for unmarshaling -// an instance identity document -type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go deleted file mode 100644 index ef5f732..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ /dev/null @@ -1,148 +0,0 @@ -// Package ec2metadata provides the client for making API calls to the -// EC2 Metadata service. -// -// This package's client can be disabled completely by setting the environment -// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to -// true instructs the SDK to disable the EC2 Metadata client. The client cannot -// be used while the environemnt variable is set to true, (case insensitive). -package ec2metadata - -import ( - "bytes" - "errors" - "io" - "net/http" - "os" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/request" -) - -// ServiceName is the name of the service. -const ServiceName = "ec2metadata" -const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" - -// A EC2Metadata is an EC2 Metadata service Client. -type EC2Metadata struct { - *client.Client -} - -// New creates a new instance of the EC2Metadata client with a session. -// This client is safe to use across multiple goroutines. -// -// -// Example: -// // Create a EC2Metadata client from just a session. -// svc := ec2metadata.New(mySession) -// -// // Create a EC2Metadata client with additional configuration -// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { - c := p.ClientConfig(ServiceName, cfgs...) - return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) -} - -// NewClient returns a new EC2Metadata client. Should be used to create -// a client when not using a session. Generally using just New with a session -// is preferred. -// -// If an unmodified HTTP client is provided from the stdlib default, or no client -// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. -// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. -func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { - if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { - // If the http client is unmodified and this feature is not disabled - // set custom timeouts for EC2Metadata requests. - cfg.HTTPClient = &http.Client{ - // use a shorter timeout than default because the metadata - // service is local if it is running, and to fail faster - // if not running on an ec2 instance. - Timeout: 5 * time.Second, - } - } - - svc := &EC2Metadata{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - Endpoint: endpoint, - APIVersion: "latest", - }, - handlers, - ), - } - - svc.Handlers.Unmarshal.PushBack(unmarshalHandler) - svc.Handlers.UnmarshalError.PushBack(unmarshalError) - svc.Handlers.Validate.Clear() - svc.Handlers.Validate.PushBack(validateEndpointHandler) - - // Disable the EC2 Metadata service if the environment variable is set. - // This shortcirctes the service's functionality to always fail to send - // requests. - if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { - svc.Handlers.Send.SwapNamed(request.NamedHandler{ - Name: corehandlers.SendHandler.Name, - Fn: func(r *request.Request) { - r.Error = awserr.New( - request.CanceledErrorCode, - "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", - nil) - }, - }) - } - - // Add additional options to the service config - for _, option := range opts { - option(svc.Client) - } - - return svc -} - -func httpClientZero(c *http.Client) bool { - return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) -} - -type metadataOutput struct { - Content string -} - -func unmarshalHandler(r *request.Request) { - defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) - return - } - - if data, ok := r.Data.(*metadataOutput); ok { - data.Content = b.String() - } -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) - return - } - - // Response body format is not consistent between metadata endpoints. - // Grab the error message as a string and include that as the source error - r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) -} - -func validateEndpointHandler(r *request.Request) { - if r.ClientInfo.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go deleted file mode 100644 index c04ba06..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ /dev/null @@ -1,155 +0,0 @@ -package endpoints - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -type modelDefinition map[string]json.RawMessage - -// A DecodeModelOptions are the options for how the endpoints model definition -// are decoded. -type DecodeModelOptions struct { - SkipCustomizations bool -} - -// Set combines all of the option functions together. -func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { - for _, fn := range optFns { - fn(d) - } -} - -// DecodeModel unmarshals a Regions and Endpoint model definition file into -// a endpoint Resolver. If the file format is not supported, or an error occurs -// when unmarshaling the model an error will be returned. -// -// Casting the return value of this func to a EnumPartitions will -// allow you to get a list of the partitions in the order the endpoints -// will be resolved in. -// -// resolver, err := endpoints.DecodeModel(reader) -// -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// for _, p := range partitions { -// // ... inspect partitions -// } -func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { - var opts DecodeModelOptions - opts.Set(optFns...) - - // Get the version of the partition file to determine what - // unmarshaling model to use. - modelDef := modelDefinition{} - if err := json.NewDecoder(r).Decode(&modelDef); err != nil { - return nil, newDecodeModelError("failed to decode endpoints model", err) - } - - var version string - if b, ok := modelDef["version"]; ok { - version = string(b) - } else { - return nil, newDecodeModelError("endpoints version not found in model", nil) - } - - if version == "3" { - return decodeV3Endpoints(modelDef, opts) - } - - return nil, newDecodeModelError( - fmt.Sprintf("endpoints version %s, not supported", version), nil) -} - -func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { - b, ok := modelDef["partitions"] - if !ok { - return nil, newDecodeModelError("endpoints model missing partitions", nil) - } - - ps := partitions{} - if err := json.Unmarshal(b, &ps); err != nil { - return nil, newDecodeModelError("failed to decode endpoints model", err) - } - - if opts.SkipCustomizations { - return ps, nil - } - - // Customization - for i := 0; i < len(ps); i++ { - p := &ps[i] - custAddEC2Metadata(p) - custAddS3DualStack(p) - custRmIotDataService(p) - custFixAppAutoscalingChina(p) - } - - return ps, nil -} - -func custAddS3DualStack(p *partition) { - if p.ID != "aws" { - return - } - - s, ok := p.Services["s3"] - if !ok { - return - } - - s.Defaults.HasDualStack = boxedTrue - s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" - - p.Services["s3"] = s -} - -func custAddEC2Metadata(p *partition) { - p.Services["ec2metadata"] = service{ - IsRegionalized: boxedFalse, - PartitionEndpoint: "aws-global", - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - } -} - -func custRmIotDataService(p *partition) { - delete(p.Services, "data.iot") -} - -func custFixAppAutoscalingChina(p *partition) { - if p.ID != "aws-cn" { - return - } - - const serviceName = "application-autoscaling" - s, ok := p.Services[serviceName] - if !ok { - return - } - - const expectHostname = `autoscaling.{region}.amazonaws.com` - if e, a := s.Defaults.Hostname, expectHostname; e != a { - fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) - return - } - - s.Defaults.Hostname = expectHostname + ".cn" - p.Services[serviceName] = s -} - -type decodeModelError struct { - awsError -} - -func newDecodeModelError(msg string, err error) decodeModelError { - return decodeModelError{ - awsError: awserr.New("DecodeEndpointsModelError", msg, err), - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go deleted file mode 100644 index 8678810..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ /dev/null @@ -1,3253 +0,0 @@ -// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. - -package endpoints - -import ( - "regexp" -) - -// Partition identifiers -const ( - AwsPartitionID = "aws" // AWS Standard partition. - AwsCnPartitionID = "aws-cn" // AWS China partition. - AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. -) - -// AWS Standard partition's regions. -const ( - ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). - ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). - ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). - ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). - ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). - CaCentral1RegionID = "ca-central-1" // Canada (Central). - EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). - EuWest1RegionID = "eu-west-1" // EU (Ireland). - EuWest2RegionID = "eu-west-2" // EU (London). - EuWest3RegionID = "eu-west-3" // EU (Paris). - SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). - UsEast1RegionID = "us-east-1" // US East (N. Virginia). - UsEast2RegionID = "us-east-2" // US East (Ohio). - UsWest1RegionID = "us-west-1" // US West (N. California). - UsWest2RegionID = "us-west-2" // US West (Oregon). -) - -// AWS China partition's regions. -const ( - CnNorth1RegionID = "cn-north-1" // China (Beijing). - CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). -) - -// AWS GovCloud (US) partition's regions. -const ( - UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). -) - -// Service identifiers -const ( - A4bServiceID = "a4b" // A4b. - AcmServiceID = "acm" // Acm. - AcmPcaServiceID = "acm-pca" // AcmPca. - ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. - ApiPricingServiceID = "api.pricing" // ApiPricing. - ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. - ApigatewayServiceID = "apigateway" // Apigateway. - ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. - Appstream2ServiceID = "appstream2" // Appstream2. - AthenaServiceID = "athena" // Athena. - AutoscalingServiceID = "autoscaling" // Autoscaling. - AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. - BatchServiceID = "batch" // Batch. - BudgetsServiceID = "budgets" // Budgets. - CeServiceID = "ce" // Ce. - Cloud9ServiceID = "cloud9" // Cloud9. - ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. - CloudformationServiceID = "cloudformation" // Cloudformation. - CloudfrontServiceID = "cloudfront" // Cloudfront. - CloudhsmServiceID = "cloudhsm" // Cloudhsm. - Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. - CloudsearchServiceID = "cloudsearch" // Cloudsearch. - CloudtrailServiceID = "cloudtrail" // Cloudtrail. - CodebuildServiceID = "codebuild" // Codebuild. - CodecommitServiceID = "codecommit" // Codecommit. - CodedeployServiceID = "codedeploy" // Codedeploy. - CodepipelineServiceID = "codepipeline" // Codepipeline. - CodestarServiceID = "codestar" // Codestar. - CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. - CognitoIdpServiceID = "cognito-idp" // CognitoIdp. - CognitoSyncServiceID = "cognito-sync" // CognitoSync. - ComprehendServiceID = "comprehend" // Comprehend. - ConfigServiceID = "config" // Config. - CurServiceID = "cur" // Cur. - DatapipelineServiceID = "datapipeline" // Datapipeline. - DaxServiceID = "dax" // Dax. - DevicefarmServiceID = "devicefarm" // Devicefarm. - DirectconnectServiceID = "directconnect" // Directconnect. - DiscoveryServiceID = "discovery" // Discovery. - DmsServiceID = "dms" // Dms. - DsServiceID = "ds" // Ds. - DynamodbServiceID = "dynamodb" // Dynamodb. - Ec2ServiceID = "ec2" // Ec2. - Ec2metadataServiceID = "ec2metadata" // Ec2metadata. - EcrServiceID = "ecr" // Ecr. - EcsServiceID = "ecs" // Ecs. - ElasticacheServiceID = "elasticache" // Elasticache. - ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. - ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. - ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. - ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. - ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. - EmailServiceID = "email" // Email. - EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. - EsServiceID = "es" // Es. - EventsServiceID = "events" // Events. - FirehoseServiceID = "firehose" // Firehose. - FmsServiceID = "fms" // Fms. - GameliftServiceID = "gamelift" // Gamelift. - GlacierServiceID = "glacier" // Glacier. - GlueServiceID = "glue" // Glue. - GreengrassServiceID = "greengrass" // Greengrass. - GuarddutyServiceID = "guardduty" // Guardduty. - HealthServiceID = "health" // Health. - IamServiceID = "iam" // Iam. - ImportexportServiceID = "importexport" // Importexport. - InspectorServiceID = "inspector" // Inspector. - IotServiceID = "iot" // Iot. - IotanalyticsServiceID = "iotanalytics" // Iotanalytics. - KinesisServiceID = "kinesis" // Kinesis. - KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. - KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. - KmsServiceID = "kms" // Kms. - LambdaServiceID = "lambda" // Lambda. - LightsailServiceID = "lightsail" // Lightsail. - LogsServiceID = "logs" // Logs. - MachinelearningServiceID = "machinelearning" // Machinelearning. - MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. - MediaconvertServiceID = "mediaconvert" // Mediaconvert. - MedialiveServiceID = "medialive" // Medialive. - MediapackageServiceID = "mediapackage" // Mediapackage. - MediastoreServiceID = "mediastore" // Mediastore. - MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. - MghServiceID = "mgh" // Mgh. - MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. - ModelsLexServiceID = "models.lex" // ModelsLex. - MonitoringServiceID = "monitoring" // Monitoring. - MturkRequesterServiceID = "mturk-requester" // MturkRequester. - NeptuneServiceID = "neptune" // Neptune. - OpsworksServiceID = "opsworks" // Opsworks. - OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. - OrganizationsServiceID = "organizations" // Organizations. - PinpointServiceID = "pinpoint" // Pinpoint. - PollyServiceID = "polly" // Polly. - RdsServiceID = "rds" // Rds. - RedshiftServiceID = "redshift" // Redshift. - RekognitionServiceID = "rekognition" // Rekognition. - ResourceGroupsServiceID = "resource-groups" // ResourceGroups. - Route53ServiceID = "route53" // Route53. - Route53domainsServiceID = "route53domains" // Route53domains. - RuntimeLexServiceID = "runtime.lex" // RuntimeLex. - RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. - S3ServiceID = "s3" // S3. - SdbServiceID = "sdb" // Sdb. - SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. - ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. - ServicecatalogServiceID = "servicecatalog" // Servicecatalog. - ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. - ShieldServiceID = "shield" // Shield. - SmsServiceID = "sms" // Sms. - SnowballServiceID = "snowball" // Snowball. - SnsServiceID = "sns" // Sns. - SqsServiceID = "sqs" // Sqs. - SsmServiceID = "ssm" // Ssm. - StatesServiceID = "states" // States. - StoragegatewayServiceID = "storagegateway" // Storagegateway. - StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. - StsServiceID = "sts" // Sts. - SupportServiceID = "support" // Support. - SwfServiceID = "swf" // Swf. - TaggingServiceID = "tagging" // Tagging. - TranslateServiceID = "translate" // Translate. - WafServiceID = "waf" // Waf. - WafRegionalServiceID = "waf-regional" // WafRegional. - WorkdocsServiceID = "workdocs" // Workdocs. - WorkmailServiceID = "workmail" // Workmail. - WorkspacesServiceID = "workspaces" // Workspaces. - XrayServiceID = "xray" // Xray. -) - -// DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). -// -// Use DefaultPartitions() to get the list of the default partitions. -func DefaultResolver() Resolver { - return defaultPartitions -} - -// DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US). -// -// partitions := endpoints.DefaultPartitions -// for _, p := range partitions { -// // ... inspect partitions -// } -func DefaultPartitions() []Partition { - return defaultPartitions.Partitions() -} - -var defaultPartitions = partitions{ - awsPartition, - awscnPartition, - awsusgovPartition, -} - -// AwsPartition returns the Resolver for AWS Standard. -func AwsPartition() Partition { - return awsPartition.Partition() -} - -var awsPartition = partition{ - ID: "aws", - Name: "AWS Standard", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "ap-northeast-1": region{ - Description: "Asia Pacific (Tokyo)", - }, - "ap-northeast-2": region{ - Description: "Asia Pacific (Seoul)", - }, - "ap-south-1": region{ - Description: "Asia Pacific (Mumbai)", - }, - "ap-southeast-1": region{ - Description: "Asia Pacific (Singapore)", - }, - "ap-southeast-2": region{ - Description: "Asia Pacific (Sydney)", - }, - "ca-central-1": region{ - Description: "Canada (Central)", - }, - "eu-central-1": region{ - Description: "EU (Frankfurt)", - }, - "eu-west-1": region{ - Description: "EU (Ireland)", - }, - "eu-west-2": region{ - Description: "EU (London)", - }, - "eu-west-3": region{ - Description: "EU (Paris)", - }, - "sa-east-1": region{ - Description: "South America (Sao Paulo)", - }, - "us-east-1": region{ - Description: "US East (N. Virginia)", - }, - "us-east-2": region{ - Description: "US East (Ohio)", - }, - "us-west-1": region{ - Description: "US West (N. California)", - }, - "us-west-2": region{ - Description: "US West (Oregon)", - }, - }, - Services: services{ - "a4b": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "acm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "acm-pca": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "api.mediatailor": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "api.pricing": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "pricing", - }, - }, - Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "api.sagemaker": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "apigateway": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "appstream2": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "athena": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "autoscaling-plans": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "autoscaling-plans", - }, - }, - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "batch": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "budgets": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "budgets.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "ce": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "ce.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cloud9": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "clouddirectory": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudfront": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "cloudfront.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cloudhsm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudsearch": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codebuild": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "codebuild-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "codebuild-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "codebuild-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "codebuild-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "codecommit": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codepipeline": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codestar": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-identity": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-idp": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cognito-sync": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "comprehend": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cur": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "datapipeline": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dax": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "devicefarm": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "discovery": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecr": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticfilesystem": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticmapreduce": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.{service}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elastictranscoder": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "email": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "entitlement.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "es": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "firehose": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "fms": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "gamelift": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "glue": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "iam.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "importexport": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "importexport.amazonaws.com", - SignatureVersions: []string{"v2", "v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - Service: "IngestionService", - }, - }, - }, - }, - "inspector": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iotanalytics": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesisanalytics": service{ - - Endpoints: endpoints{ - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesisvideo": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lightsail": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "machinelearning": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "marketplacecommerceanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "mediaconvert": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "medialive": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mediapackage": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mediastore": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mgh": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, - }, - }, - "mobileanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "models.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mturk-requester": service{ - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "sandbox": endpoint{ - Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", - }, - "us-east-1": endpoint{}, - }, - }, - "neptune": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "us-east-1": endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-2": endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "opsworks": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "opsworks-cm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "organizations.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "pinpoint": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "rekognition": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "resource-groups": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "route53.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "route53domains": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "runtime.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "s3": service{ - PartitionEndpoint: "us-east-1", - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "s3.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{ - Hostname: "s3.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ap-southeast-2": endpoint{ - Hostname: "s3.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{ - Hostname: "s3.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "s3-external-1": endpoint{ - Hostname: "s3-external-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "sa-east-1": endpoint{ - Hostname: "s3.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-east-1": endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{ - Hostname: "s3.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-west-2": endpoint{ - Hostname: "s3.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - }, - }, - "sdb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"v2"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - Hostname: "sdb.amazonaws.com", - }, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "secretsmanager": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-northeast-2": endpoint{ - Protocols: []string{"https"}, - }, - "ap-south-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-southeast-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-southeast-2": endpoint{ - Protocols: []string{"https"}, - }, - "ca-central-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-central-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-2": endpoint{ - Protocols: []string{"https"}, - }, - "sa-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-east-2": endpoint{ - Protocols: []string{"https"}, - }, - "us-west-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-west-2": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "servicecatalog": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "servicediscovery": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "shield": service{ - IsRegionalized: boxedFalse, - Defaults: endpoint{ - SSLCommonName: "Shield.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-us-east-2": endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "fips-us-west-1": endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "queue.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sts": service{ - PartitionEndpoint: "aws-global", - Defaults: endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{ - Hostname: "sts.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "aws-global": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "support": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "waf": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "waf.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "waf-regional": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "workdocs": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "workmail": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "workspaces": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "xray": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - }, -} - -// AwsCnPartition returns the Resolver for AWS China. -func AwsCnPartition() Partition { - return awscnPartition.Partition() -} - -var awscnPartition = partition{ - ID: "aws-cn", - Name: "AWS China", - DNSSuffix: "amazonaws.com.cn", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "cn-north-1": region{ - Description: "China (Beijing)", - }, - "cn-northwest-1": region{ - Description: "China (Ningxia)", - }, - }, - Services: services{ - "apigateway": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com.cn", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "cognito-identity": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ds": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecr": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "elasticmapreduce": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "es": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "iam.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - }, -} - -// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). -func AwsUsGovPartition() Partition { - return awsusgovPartition.Partition() -} - -var awsusgovPartition = partition{ - ID: "aws-us-gov", - Name: "AWS GovCloud (US)", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "us-gov-west-1": region{ - Description: "AWS GovCloud (US)", - }, - }, - Services: services{ - "acm": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "apigateway": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "autoscaling": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "cloudhsm": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "config": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "dynamodb": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "ec2": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, - "ecr": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "elasticmapreduce": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "es": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "events": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "glacier": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "inspector": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "monitoring": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "rekognition": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "s3": service{ - Defaults: endpoint{ - SignatureVersions: []string{"s3", "s3v4"}, - }, - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "s3-fips-us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "us-gov-west-1": endpoint{ - Hostname: "s3.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "snowball": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "sns": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "sqs": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go deleted file mode 100644 index 84316b9..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package endpoints provides the types and functionality for defining regions -// and endpoints, as well as querying those definitions. -// -// The SDK's Regions and Endpoints metadata is code generated into the endpoints -// package, and is accessible via the DefaultResolver function. This function -// returns a endpoint Resolver will search the metadata and build an associated -// endpoint if one is found. The default resolver will search all partitions -// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and -// AWS GovCloud (US) (aws-us-gov). -// . -// -// Enumerating Regions and Endpoint Metadata -// -// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface -// will allow you to get access to the list of underlying Partitions with the -// Partitions method. This is helpful if you want to limit the SDK's endpoint -// resolving to a single partition, or enumerate regions, services, and endpoints -// in the partition. -// -// resolver := endpoints.DefaultResolver() -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// -// for _, p := range partitions { -// fmt.Println("Regions for", p.ID()) -// for id, _ := range p.Regions() { -// fmt.Println("*", id) -// } -// -// fmt.Println("Services for", p.ID()) -// for id, _ := range p.Services() { -// fmt.Println("*", id) -// } -// } -// -// Using Custom Endpoints -// -// The endpoints package also gives you the ability to use your own logic how -// endpoints are resolved. This is a great way to define a custom endpoint -// for select services, without passing that logic down through your code. -// -// If a type implements the Resolver interface it can be used to resolve -// endpoints. To use this with the SDK's Session and Config set the value -// of the type to the EndpointsResolver field of aws.Config when initializing -// the session, or service client. -// -// In addition the ResolverFunc is a wrapper for a func matching the signature -// of Resolver.EndpointFor, converting it to a type that satisfies the -// Resolver interface. -// -// -// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { -// if service == endpoints.S3ServiceID { -// return endpoints.ResolvedEndpoint{ -// URL: "s3.custom.endpoint.com", -// SigningRegion: "custom-signing-region", -// }, nil -// } -// -// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) -// } -// -// sess := session.Must(session.NewSession(&aws.Config{ -// Region: aws.String("us-west-2"), -// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), -// })) -package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go deleted file mode 100644 index e29c095..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ /dev/null @@ -1,449 +0,0 @@ -package endpoints - -import ( - "fmt" - "regexp" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// Options provide the configuration needed to direct how the -// endpoints will be resolved. -type Options struct { - // DisableSSL forces the endpoint to be resolved as HTTP. - // instead of HTTPS if the service supports it. - DisableSSL bool - - // Sets the resolver to resolve the endpoint as a dualstack endpoint - // for the service. If dualstack support for a service is not known and - // StrictMatching is not enabled a dualstack endpoint for the service will - // be returned. This endpoint may not be valid. If StrictMatching is - // enabled only services that are known to support dualstack will return - // dualstack endpoints. - UseDualStack bool - - // Enables strict matching of services and regions resolved endpoints. - // If the partition doesn't enumerate the exact service and region an - // error will be returned. This option will prevent returning endpoints - // that look valid, but may not resolve to any real endpoint. - StrictMatching bool - - // Enables resolving a service endpoint based on the region provided if the - // service does not exist. The service endpoint ID will be used as the service - // domain name prefix. By default the endpoint resolver requires the service - // to be known when resolving endpoints. - // - // If resolving an endpoint on the partition list the provided region will - // be used to determine which partition's domain name pattern to the service - // endpoint ID with. If both the service and region are unkonwn and resolving - // the endpoint on partition list an UnknownEndpointError error will be returned. - // - // If resolving and endpoint on a partition specific resolver that partition's - // domain name pattern will be used with the service endpoint ID. If both - // region and service do not exist when resolving an endpoint on a specific - // partition the partition's domain pattern will be used to combine the - // endpoint and region together. - // - // This option is ignored if StrictMatching is enabled. - ResolveUnknownService bool -} - -// Set combines all of the option functions together. -func (o *Options) Set(optFns ...func(*Options)) { - for _, fn := range optFns { - fn(o) - } -} - -// DisableSSLOption sets the DisableSSL options. Can be used as a functional -// option when resolving endpoints. -func DisableSSLOption(o *Options) { - o.DisableSSL = true -} - -// UseDualStackOption sets the UseDualStack option. Can be used as a functional -// option when resolving endpoints. -func UseDualStackOption(o *Options) { - o.UseDualStack = true -} - -// StrictMatchingOption sets the StrictMatching option. Can be used as a functional -// option when resolving endpoints. -func StrictMatchingOption(o *Options) { - o.StrictMatching = true -} - -// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used -// as a functional option when resolving endpoints. -func ResolveUnknownServiceOption(o *Options) { - o.ResolveUnknownService = true -} - -// A Resolver provides the interface for functionality to resolve endpoints. -// The build in Partition and DefaultResolver return value satisfy this interface. -type Resolver interface { - EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) -} - -// ResolverFunc is a helper utility that wraps a function so it satisfies the -// Resolver interface. This is useful when you want to add additional endpoint -// resolving logic, or stub out specific endpoints with custom values. -type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) - -// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. -func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return fn(service, region, opts...) -} - -var schemeRE = regexp.MustCompile("^([^:]+)://") - -// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no -// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. -// -// If disableSSL is set, it will only set the URL's scheme if the URL does not -// contain a scheme. -func AddScheme(endpoint string, disableSSL bool) string { - if !schemeRE.MatchString(endpoint) { - scheme := "https" - if disableSSL { - scheme = "http" - } - endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) - } - - return endpoint -} - -// EnumPartitions a provides a way to retrieve the underlying partitions that -// make up the SDK's default Resolver, or any resolver decoded from a model -// file. -// -// Use this interface with DefaultResolver and DecodeModels to get the list of -// Partitions. -type EnumPartitions interface { - Partitions() []Partition -} - -// RegionsForService returns a map of regions for the partition and service. -// If either the partition or service does not exist false will be returned -// as the second parameter. -// -// This example shows how to get the regions for DynamoDB in the AWS partition. -// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) -// -// This is equivalent to using the partition directly. -// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() -func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { - for _, p := range ps { - if p.ID() != partitionID { - continue - } - if _, ok := p.p.Services[serviceID]; !ok { - break - } - - s := Service{ - id: serviceID, - p: p.p, - } - return s.Regions(), true - } - - return map[string]Region{}, false -} - -// PartitionForRegion returns the first partition which includes the region -// passed in. This includes both known regions and regions which match -// a pattern supported by the partition which may include regions that are -// not explicitly known by the partition. Use the Regions method of the -// returned Partition if explicit support is needed. -func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { - for _, p := range ps { - if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { - return p, true - } - } - - return Partition{}, false -} - -// A Partition provides the ability to enumerate the partition's regions -// and services. -type Partition struct { - id string - p *partition -} - -// ID returns the identifier of the partition. -func (p Partition) ID() string { return p.id } - -// EndpointFor attempts to resolve the endpoint based on service and region. -// See Options for information on configuring how the endpoint is resolved. -// -// If the service cannot be found in the metadata the UnknownServiceError -// error will be returned. This validation will occur regardless if -// StrictMatching is enabled. To enable resolving unknown services set the -// "ResolveUnknownService" option to true. When StrictMatching is disabled -// this option allows the partition resolver to resolve a endpoint based on -// the service endpoint ID provided. -// -// When resolving endpoints you can choose to enable StrictMatching. This will -// require the provided service and region to be known by the partition. -// If the endpoint cannot be strictly resolved an error will be returned. This -// mode is useful to ensure the endpoint resolved is valid. Without -// StrictMatching enabled the endpoint returned my look valid but may not work. -// StrictMatching requires the SDK to be updated if you want to take advantage -// of new regions and services expansions. -// -// Errors that can be returned. -// * UnknownServiceError -// * UnknownEndpointError -func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return p.p.EndpointFor(service, region, opts...) -} - -// Regions returns a map of Regions indexed by their ID. This is useful for -// enumerating over the regions in a partition. -func (p Partition) Regions() map[string]Region { - rs := map[string]Region{} - for id, r := range p.p.Regions { - rs[id] = Region{ - id: id, - desc: r.Description, - p: p.p, - } - } - - return rs -} - -// Services returns a map of Service indexed by their ID. This is useful for -// enumerating over the services in a partition. -func (p Partition) Services() map[string]Service { - ss := map[string]Service{} - for id := range p.p.Services { - ss[id] = Service{ - id: id, - p: p.p, - } - } - - return ss -} - -// A Region provides information about a region, and ability to resolve an -// endpoint from the context of a region, given a service. -type Region struct { - id, desc string - p *partition -} - -// ID returns the region's identifier. -func (r Region) ID() string { return r.id } - -// Description returns the region's description. The region description -// is free text, it can be empty, and it may change between SDK releases. -func (r Region) Description() string { return r.desc } - -// ResolveEndpoint resolves an endpoint from the context of the region given -// a service. See Partition.EndpointFor for usage and errors that can be returned. -func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return r.p.EndpointFor(service, r.id, opts...) -} - -// Services returns a list of all services that are known to be in this region. -func (r Region) Services() map[string]Service { - ss := map[string]Service{} - for id, s := range r.p.Services { - if _, ok := s.Endpoints[r.id]; ok { - ss[id] = Service{ - id: id, - p: r.p, - } - } - } - - return ss -} - -// A Service provides information about a service, and ability to resolve an -// endpoint from the context of a service, given a region. -type Service struct { - id string - p *partition -} - -// ID returns the identifier for the service. -func (s Service) ID() string { return s.id } - -// ResolveEndpoint resolves an endpoint from the context of a service given -// a region. See Partition.EndpointFor for usage and errors that can be returned. -func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return s.p.EndpointFor(s.id, region, opts...) -} - -// Regions returns a map of Regions that the service is present in. -// -// A region is the AWS region the service exists in. Whereas a Endpoint is -// an URL that can be resolved to a instance of a service. -func (s Service) Regions() map[string]Region { - rs := map[string]Region{} - for id := range s.p.Services[s.id].Endpoints { - if r, ok := s.p.Regions[id]; ok { - rs[id] = Region{ - id: id, - desc: r.Description, - p: s.p, - } - } - } - - return rs -} - -// Endpoints returns a map of Endpoints indexed by their ID for all known -// endpoints for a service. -// -// A region is the AWS region the service exists in. Whereas a Endpoint is -// an URL that can be resolved to a instance of a service. -func (s Service) Endpoints() map[string]Endpoint { - es := map[string]Endpoint{} - for id := range s.p.Services[s.id].Endpoints { - es[id] = Endpoint{ - id: id, - serviceID: s.id, - p: s.p, - } - } - - return es -} - -// A Endpoint provides information about endpoints, and provides the ability -// to resolve that endpoint for the service, and the region the endpoint -// represents. -type Endpoint struct { - id string - serviceID string - p *partition -} - -// ID returns the identifier for an endpoint. -func (e Endpoint) ID() string { return e.id } - -// ServiceID returns the identifier the endpoint belongs to. -func (e Endpoint) ServiceID() string { return e.serviceID } - -// ResolveEndpoint resolves an endpoint from the context of a service and -// region the endpoint represents. See Partition.EndpointFor for usage and -// errors that can be returned. -func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { - return e.p.EndpointFor(e.serviceID, e.id, opts...) -} - -// A ResolvedEndpoint is an endpoint that has been resolved based on a partition -// service, and region. -type ResolvedEndpoint struct { - // The endpoint URL - URL string - - // The region that should be used for signing requests. - SigningRegion string - - // The service name that should be used for signing requests. - SigningName string - - // States that the signing name for this endpoint was derived from metadata - // passed in, but was not explicitly modeled. - SigningNameDerived bool - - // The signing method that should be used for signing requests. - SigningMethod string -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError awserr.Error - -// A EndpointNotFoundError is returned when in StrictMatching mode, and the -// endpoint for the service and region cannot be found in any of the partitions. -type EndpointNotFoundError struct { - awsError - Partition string - Service string - Region string -} - -// A UnknownServiceError is returned when the service does not resolve to an -// endpoint. Includes a list of all known services for the partition. Returned -// when a partition does not support the service. -type UnknownServiceError struct { - awsError - Partition string - Service string - Known []string -} - -// NewUnknownServiceError builds and returns UnknownServiceError. -func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { - return UnknownServiceError{ - awsError: awserr.New("UnknownServiceError", - "could not resolve endpoint for unknown service", nil), - Partition: p, - Service: s, - Known: known, - } -} - -// String returns the string representation of the error. -func (e UnknownServiceError) Error() string { - extra := fmt.Sprintf("partition: %q, service: %q", - e.Partition, e.Service) - if len(e.Known) > 0 { - extra += fmt.Sprintf(", known: %v", e.Known) - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -func (e UnknownServiceError) String() string { - return e.Error() -} - -// A UnknownEndpointError is returned when in StrictMatching mode and the -// service is valid, but the region does not resolve to an endpoint. Includes -// a list of all known endpoints for the service. -type UnknownEndpointError struct { - awsError - Partition string - Service string - Region string - Known []string -} - -// NewUnknownEndpointError builds and returns UnknownEndpointError. -func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { - return UnknownEndpointError{ - awsError: awserr.New("UnknownEndpointError", - "could not resolve endpoint", nil), - Partition: p, - Service: s, - Region: r, - Known: known, - } -} - -// String returns the string representation of the error. -func (e UnknownEndpointError) Error() string { - extra := fmt.Sprintf("partition: %q, service: %q, region: %q", - e.Partition, e.Service, e.Region) - if len(e.Known) > 0 { - extra += fmt.Sprintf(", known: %v", e.Known) - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -func (e UnknownEndpointError) String() string { - return e.Error() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go deleted file mode 100644 index ff6f76d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ /dev/null @@ -1,307 +0,0 @@ -package endpoints - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -type partitions []partition - -func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - var opt Options - opt.Set(opts...) - - for i := 0; i < len(ps); i++ { - if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { - continue - } - - return ps[i].EndpointFor(service, region, opts...) - } - - // If loose matching fallback to first partition format to use - // when resolving the endpoint. - if !opt.StrictMatching && len(ps) > 0 { - return ps[0].EndpointFor(service, region, opts...) - } - - return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) -} - -// Partitions satisfies the EnumPartitions interface and returns a list -// of Partitions representing each partition represented in the SDK's -// endpoints model. -func (ps partitions) Partitions() []Partition { - parts := make([]Partition, 0, len(ps)) - for i := 0; i < len(ps); i++ { - parts = append(parts, ps[i].Partition()) - } - - return parts -} - -type partition struct { - ID string `json:"partition"` - Name string `json:"partitionName"` - DNSSuffix string `json:"dnsSuffix"` - RegionRegex regionRegex `json:"regionRegex"` - Defaults endpoint `json:"defaults"` - Regions regions `json:"regions"` - Services services `json:"services"` -} - -func (p partition) Partition() Partition { - return Partition{ - id: p.ID, - p: &p, - } -} - -func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { - s, hasService := p.Services[service] - _, hasEndpoint := s.Endpoints[region] - - if hasEndpoint && hasService { - return true - } - - if strictMatch { - return false - } - - return p.RegionRegex.MatchString(region) -} - -func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { - var opt Options - opt.Set(opts...) - - s, hasService := p.Services[service] - if !(hasService || opt.ResolveUnknownService) { - // Only return error if the resolver will not fallback to creating - // endpoint based on service endpoint ID passed in. - return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) - } - - e, hasEndpoint := s.endpointForRegion(region) - if !hasEndpoint && opt.StrictMatching { - return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) - } - - defs := []endpoint{p.Defaults, s.Defaults} - return e.resolve(service, region, p.DNSSuffix, defs, opt), nil -} - -func serviceList(ss services) []string { - list := make([]string, 0, len(ss)) - for k := range ss { - list = append(list, k) - } - return list -} -func endpointList(es endpoints) []string { - list := make([]string, 0, len(es)) - for k := range es { - list = append(list, k) - } - return list -} - -type regionRegex struct { - *regexp.Regexp -} - -func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { - // Strip leading and trailing quotes - regex, err := strconv.Unquote(string(b)) - if err != nil { - return fmt.Errorf("unable to strip quotes from regex, %v", err) - } - - rr.Regexp, err = regexp.Compile(regex) - if err != nil { - return fmt.Errorf("unable to unmarshal region regex, %v", err) - } - return nil -} - -type regions map[string]region - -type region struct { - Description string `json:"description"` -} - -type services map[string]service - -type service struct { - PartitionEndpoint string `json:"partitionEndpoint"` - IsRegionalized boxedBool `json:"isRegionalized,omitempty"` - Defaults endpoint `json:"defaults"` - Endpoints endpoints `json:"endpoints"` -} - -func (s *service) endpointForRegion(region string) (endpoint, bool) { - if s.IsRegionalized == boxedFalse { - return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint - } - - if e, ok := s.Endpoints[region]; ok { - return e, true - } - - // Unable to find any matching endpoint, return - // blank that will be used for generic endpoint creation. - return endpoint{}, false -} - -type endpoints map[string]endpoint - -type endpoint struct { - Hostname string `json:"hostname"` - Protocols []string `json:"protocols"` - CredentialScope credentialScope `json:"credentialScope"` - - // Custom fields not modeled - HasDualStack boxedBool `json:"-"` - DualStackHostname string `json:"-"` - - // Signature Version not used - SignatureVersions []string `json:"signatureVersions"` - - // SSLCommonName not used. - SSLCommonName string `json:"sslCommonName"` -} - -const ( - defaultProtocol = "https" - defaultSigner = "v4" -) - -var ( - protocolPriority = []string{"https", "http"} - signerPriority = []string{"v4", "v2"} -) - -func getByPriority(s []string, p []string, def string) string { - if len(s) == 0 { - return def - } - - for i := 0; i < len(p); i++ { - for j := 0; j < len(s); j++ { - if s[j] == p[i] { - return s[j] - } - } - } - - return s[0] -} - -func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { - var merged endpoint - for _, def := range defs { - merged.mergeIn(def) - } - merged.mergeIn(e) - e = merged - - hostname := e.Hostname - - // Offset the hostname for dualstack if enabled - if opts.UseDualStack && e.HasDualStack == boxedTrue { - hostname = e.DualStackHostname - } - - u := strings.Replace(hostname, "{service}", service, 1) - u = strings.Replace(u, "{region}", region, 1) - u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) - - scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) - u = fmt.Sprintf("%s://%s", scheme, u) - - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - - signingName := e.CredentialScope.Service - var signingNameDerived bool - if len(signingName) == 0 { - signingName = service - signingNameDerived = true - } - - return ResolvedEndpoint{ - URL: u, - SigningRegion: signingRegion, - SigningName: signingName, - SigningNameDerived: signingNameDerived, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - } -} - -func getEndpointScheme(protocols []string, disableSSL bool) string { - if disableSSL { - return "http" - } - - return getByPriority(protocols, protocolPriority, defaultProtocol) -} - -func (e *endpoint) mergeIn(other endpoint) { - if len(other.Hostname) > 0 { - e.Hostname = other.Hostname - } - if len(other.Protocols) > 0 { - e.Protocols = other.Protocols - } - if len(other.SignatureVersions) > 0 { - e.SignatureVersions = other.SignatureVersions - } - if len(other.CredentialScope.Region) > 0 { - e.CredentialScope.Region = other.CredentialScope.Region - } - if len(other.CredentialScope.Service) > 0 { - e.CredentialScope.Service = other.CredentialScope.Service - } - if len(other.SSLCommonName) > 0 { - e.SSLCommonName = other.SSLCommonName - } - if other.HasDualStack != boxedBoolUnset { - e.HasDualStack = other.HasDualStack - } - if len(other.DualStackHostname) > 0 { - e.DualStackHostname = other.DualStackHostname - } -} - -type credentialScope struct { - Region string `json:"region"` - Service string `json:"service"` -} - -type boxedBool int - -func (b *boxedBool) UnmarshalJSON(buf []byte) error { - v, err := strconv.ParseBool(string(buf)) - if err != nil { - return err - } - - if v { - *b = boxedTrue - } else { - *b = boxedFalse - } - - return nil -} - -const ( - boxedBoolUnset boxedBool = iota - boxedFalse - boxedTrue -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go deleted file mode 100644 index 05e92df..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go +++ /dev/null @@ -1,337 +0,0 @@ -// +build codegen - -package endpoints - -import ( - "fmt" - "io" - "reflect" - "strings" - "text/template" - "unicode" -) - -// A CodeGenOptions are the options for code generating the endpoints into -// Go code from the endpoints model definition. -type CodeGenOptions struct { - // Options for how the model will be decoded. - DecodeModelOptions DecodeModelOptions -} - -// Set combines all of the option functions together -func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { - for _, fn := range optFns { - fn(d) - } -} - -// CodeGenModel given a endpoints model file will decode it and attempt to -// generate Go code from the model definition. Error will be returned if -// the code is unable to be generated, or decoded. -func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { - var opts CodeGenOptions - opts.Set(optFns...) - - resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { - *d = opts.DecodeModelOptions - }) - if err != nil { - return err - } - - tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) - if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil { - return fmt.Errorf("failed to execute template, %v", err) - } - - return nil -} - -func toSymbol(v string) string { - out := []rune{} - for _, c := range strings.Title(v) { - if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { - continue - } - - out = append(out, c) - } - - return string(out) -} - -func quoteString(v string) string { - return fmt.Sprintf("%q", v) -} - -func regionConstName(p, r string) string { - return toSymbol(p) + toSymbol(r) -} - -func partitionGetter(id string) string { - return fmt.Sprintf("%sPartition", toSymbol(id)) -} - -func partitionVarName(id string) string { - return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) -} - -func listPartitionNames(ps partitions) string { - names := []string{} - switch len(ps) { - case 1: - return ps[0].Name - case 2: - return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) - default: - for i, p := range ps { - if i == len(ps)-1 { - names = append(names, "and "+p.Name) - } else { - names = append(names, p.Name) - } - } - return strings.Join(names, ", ") - } -} - -func boxedBoolIfSet(msg string, v boxedBool) string { - switch v { - case boxedTrue: - return fmt.Sprintf(msg, "boxedTrue") - case boxedFalse: - return fmt.Sprintf(msg, "boxedFalse") - default: - return "" - } -} - -func stringIfSet(msg, v string) string { - if len(v) == 0 { - return "" - } - - return fmt.Sprintf(msg, v) -} - -func stringSliceIfSet(msg string, vs []string) string { - if len(vs) == 0 { - return "" - } - - names := []string{} - for _, v := range vs { - names = append(names, `"`+v+`"`) - } - - return fmt.Sprintf(msg, strings.Join(names, ",")) -} - -func endpointIsSet(v endpoint) bool { - return !reflect.DeepEqual(v, endpoint{}) -} - -func serviceSet(ps partitions) map[string]struct{} { - set := map[string]struct{}{} - for _, p := range ps { - for id := range p.Services { - set[id] = struct{}{} - } - } - - return set -} - -var funcMap = template.FuncMap{ - "ToSymbol": toSymbol, - "QuoteString": quoteString, - "RegionConst": regionConstName, - "PartitionGetter": partitionGetter, - "PartitionVarName": partitionVarName, - "ListPartitionNames": listPartitionNames, - "BoxedBoolIfSet": boxedBoolIfSet, - "StringIfSet": stringIfSet, - "StringSliceIfSet": stringSliceIfSet, - "EndpointIsSet": endpointIsSet, - "ServicesSet": serviceSet, -} - -const v3Tmpl = ` -{{ define "defaults" -}} -// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. - -package endpoints - -import ( - "regexp" -) - - {{ template "partition consts" . }} - - {{ range $_, $partition := . }} - {{ template "partition region consts" $partition }} - {{ end }} - - {{ template "service consts" . }} - - {{ template "endpoint resolvers" . }} -{{- end }} - -{{ define "partition consts" }} - // Partition identifiers - const ( - {{ range $_, $p := . -}} - {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. - {{ end -}} - ) -{{- end }} - -{{ define "partition region consts" }} - // {{ .Name }} partition's regions. - const ( - {{ range $id, $region := .Regions -}} - {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. - {{ end -}} - ) -{{- end }} - -{{ define "service consts" }} - // Service identifiers - const ( - {{ $serviceSet := ServicesSet . -}} - {{ range $id, $_ := $serviceSet -}} - {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. - {{ end -}} - ) -{{- end }} - -{{ define "endpoint resolvers" }} - // DefaultResolver returns an Endpoint resolver that will be able - // to resolve endpoints for: {{ ListPartitionNames . }}. - // - // Use DefaultPartitions() to get the list of the default partitions. - func DefaultResolver() Resolver { - return defaultPartitions - } - - // DefaultPartitions returns a list of the partitions the SDK is bundled - // with. The available partitions are: {{ ListPartitionNames . }}. - // - // partitions := endpoints.DefaultPartitions - // for _, p := range partitions { - // // ... inspect partitions - // } - func DefaultPartitions() []Partition { - return defaultPartitions.Partitions() - } - - var defaultPartitions = partitions{ - {{ range $_, $partition := . -}} - {{ PartitionVarName $partition.ID }}, - {{ end }} - } - - {{ range $_, $partition := . -}} - {{ $name := PartitionGetter $partition.ID -}} - // {{ $name }} returns the Resolver for {{ $partition.Name }}. - func {{ $name }}() Partition { - return {{ PartitionVarName $partition.ID }}.Partition() - } - var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} - {{ end }} -{{ end }} - -{{ define "default partitions" }} - func DefaultPartitions() []Partition { - return []partition{ - {{ range $_, $partition := . -}} - // {{ ToSymbol $partition.ID}}Partition(), - {{ end }} - } - } -{{ end }} - -{{ define "gocode Partition" -}} -partition{ - {{ StringIfSet "ID: %q,\n" .ID -}} - {{ StringIfSet "Name: %q,\n" .Name -}} - {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} - RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, - {{ if EndpointIsSet .Defaults -}} - Defaults: {{ template "gocode Endpoint" .Defaults }}, - {{- end }} - Regions: {{ template "gocode Regions" .Regions }}, - Services: {{ template "gocode Services" .Services }}, -} -{{- end }} - -{{ define "gocode RegionRegex" -}} -regionRegex{ - Regexp: func() *regexp.Regexp{ - reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) - return reg - }(), -} -{{- end }} - -{{ define "gocode Regions" -}} -regions{ - {{ range $id, $region := . -}} - "{{ $id }}": {{ template "gocode Region" $region }}, - {{ end -}} -} -{{- end }} - -{{ define "gocode Region" -}} -region{ - {{ StringIfSet "Description: %q,\n" .Description -}} -} -{{- end }} - -{{ define "gocode Services" -}} -services{ - {{ range $id, $service := . -}} - "{{ $id }}": {{ template "gocode Service" $service }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Service" -}} -service{ - {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} - {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} - {{ if EndpointIsSet .Defaults -}} - Defaults: {{ template "gocode Endpoint" .Defaults -}}, - {{- end }} - {{ if .Endpoints -}} - Endpoints: {{ template "gocode Endpoints" .Endpoints }}, - {{- end }} -} -{{- end }} - -{{ define "gocode Endpoints" -}} -endpoints{ - {{ range $id, $endpoint := . -}} - "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Endpoint" -}} -endpoint{ - {{ StringIfSet "Hostname: %q,\n" .Hostname -}} - {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} - {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} - {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} - {{ if or .CredentialScope.Region .CredentialScope.Service -}} - CredentialScope: credentialScope{ - {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} - {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} - }, - {{- end }} - {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} - {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} - -} -{{- end }} -` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go deleted file mode 100644 index 5766361..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/errors.go +++ /dev/null @@ -1,17 +0,0 @@ -package aws - -import "github.com/aws/aws-sdk-go/aws/awserr" - -var ( - // ErrMissingRegion is an error that is returned if region configuration is - // not found. - // - // @readonly - ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) - - // ErrMissingEndpoint is an error that is returned if an endpoint cannot be - // resolved for a service. - // - // @readonly - ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go deleted file mode 100644 index 91a6f27..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go +++ /dev/null @@ -1,12 +0,0 @@ -package aws - -// JSONValue is a representation of a grab bag type that will be marshaled -// into a json string. This type can be used just like any other map. -// -// Example: -// -// values := aws.JSONValue{ -// "Foo": "Bar", -// } -// values["Baz"] = "Qux" -type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go deleted file mode 100644 index 6ed15b2..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ /dev/null @@ -1,118 +0,0 @@ -package aws - -import ( - "log" - "os" -) - -// A LogLevelType defines the level logging should be performed at. Used to instruct -// the SDK which statements should be logged. -type LogLevelType uint - -// LogLevel returns the pointer to a LogLevel. Should be used to workaround -// not being able to take the address of a non-composite literal. -func LogLevel(l LogLevelType) *LogLevelType { - return &l -} - -// Value returns the LogLevel value or the default value LogOff if the LogLevel -// is nil. Safe to use on nil value LogLevelTypes. -func (l *LogLevelType) Value() LogLevelType { - if l != nil { - return *l - } - return LogOff -} - -// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be -// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If -// LogLevel is nil, will default to LogOff comparison. -func (l *LogLevelType) Matches(v LogLevelType) bool { - c := l.Value() - return c&v == v -} - -// AtLeast returns true if this LogLevel is at least high enough to satisfies v. -// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default -// to LogOff comparison. -func (l *LogLevelType) AtLeast(v LogLevelType) bool { - c := l.Value() - return c >= v -} - -const ( - // LogOff states that no logging should be performed by the SDK. This is the - // default state of the SDK, and should be use to disable all logging. - LogOff LogLevelType = iota * 0x1000 - - // LogDebug state that debug output should be logged by the SDK. This should - // be used to inspect request made and responses received. - LogDebug -) - -// Debug Logging Sub Levels -const ( - // LogDebugWithSigning states that the SDK should log request signing and - // presigning events. This should be used to log the signing details of - // requests for debugging. Will also enable LogDebug. - LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) - - // LogDebugWithHTTPBody states the SDK should log HTTP request and response - // HTTP bodys in addition to the headers and path. This should be used to - // see the body content of requests and responses made while using the SDK - // Will also enable LogDebug. - LogDebugWithHTTPBody - - // LogDebugWithRequestRetries states the SDK should log when service requests will - // be retried. This should be used to log when you want to log when service - // requests are being retried. Will also enable LogDebug. - LogDebugWithRequestRetries - - // LogDebugWithRequestErrors states the SDK should log when service requests fail - // to build, send, validate, or unmarshal. - LogDebugWithRequestErrors - - // LogDebugWithEventStreamBody states the SDK should log EventStream - // request and response bodys. This should be used to log the EventStream - // wire unmarshaled message content of requests and responses made while - // using the SDK Will also enable LogDebug. - LogDebugWithEventStreamBody -) - -// A Logger is a minimalistic interface for the SDK to log messages to. Should -// be used to provide custom logging writers for the SDK to use. -type Logger interface { - Log(...interface{}) -} - -// A LoggerFunc is a convenience type to convert a function taking a variadic -// list of arguments and wrap it so the Logger interface can be used. -// -// Example: -// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { -// fmt.Fprintln(os.Stdout, args...) -// })}) -type LoggerFunc func(...interface{}) - -// Log calls the wrapped function with the arguments provided -func (f LoggerFunc) Log(args ...interface{}) { - f(args...) -} - -// NewDefaultLogger returns a Logger which will write log messages to stdout, and -// use same formatting runes as the stdlib log.Logger -func NewDefaultLogger() Logger { - return &defaultLogger{ - logger: log.New(os.Stdout, "", log.LstdFlags), - } -} - -// A defaultLogger provides a minimalistic logger satisfying the Logger interface. -type defaultLogger struct { - logger *log.Logger -} - -// Log logs the parameters to the stdlib logger. See log.Println. -func (l defaultLogger) Log(args ...interface{}) { - l.logger.Println(args...) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go deleted file mode 100644 index 271da43..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !appengine,!plan9 - -package request - -import ( - "net" - "os" - "syscall" -) - -func isErrConnectionReset(err error) bool { - if opErr, ok := err.(*net.OpError); ok { - if sysErr, ok := opErr.Err.(*os.SyscallError); ok { - return sysErr.Err == syscall.ECONNRESET - } - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go deleted file mode 100644 index daf9eca..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine plan9 - -package request - -import ( - "strings" -) - -func isErrConnectionReset(err error) bool { - return strings.Contains(err.Error(), "connection reset") -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go deleted file mode 100644 index 605a72d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ /dev/null @@ -1,274 +0,0 @@ -package request - -import ( - "fmt" - "strings" -) - -// A Handlers provides a collection of request handlers for various -// stages of handling requests. -type Handlers struct { - Validate HandlerList - Build HandlerList - Sign HandlerList - Send HandlerList - ValidateResponse HandlerList - Unmarshal HandlerList - UnmarshalStream HandlerList - UnmarshalMeta HandlerList - UnmarshalError HandlerList - Retry HandlerList - AfterRetry HandlerList - Complete HandlerList -} - -// Copy returns of this handler's lists. -func (h *Handlers) Copy() Handlers { - return Handlers{ - Validate: h.Validate.copy(), - Build: h.Build.copy(), - Sign: h.Sign.copy(), - Send: h.Send.copy(), - ValidateResponse: h.ValidateResponse.copy(), - Unmarshal: h.Unmarshal.copy(), - UnmarshalStream: h.UnmarshalStream.copy(), - UnmarshalError: h.UnmarshalError.copy(), - UnmarshalMeta: h.UnmarshalMeta.copy(), - Retry: h.Retry.copy(), - AfterRetry: h.AfterRetry.copy(), - Complete: h.Complete.copy(), - } -} - -// Clear removes callback functions for all handlers -func (h *Handlers) Clear() { - h.Validate.Clear() - h.Build.Clear() - h.Send.Clear() - h.Sign.Clear() - h.Unmarshal.Clear() - h.UnmarshalStream.Clear() - h.UnmarshalMeta.Clear() - h.UnmarshalError.Clear() - h.ValidateResponse.Clear() - h.Retry.Clear() - h.AfterRetry.Clear() - h.Complete.Clear() -} - -// A HandlerListRunItem represents an entry in the HandlerList which -// is being run. -type HandlerListRunItem struct { - Index int - Handler NamedHandler - Request *Request -} - -// A HandlerList manages zero or more handlers in a list. -type HandlerList struct { - list []NamedHandler - - // Called after each request handler in the list is called. If set - // and the func returns true the HandlerList will continue to iterate - // over the request handlers. If false is returned the HandlerList - // will stop iterating. - // - // Should be used if extra logic to be performed between each handler - // in the list. This can be used to terminate a list's iteration - // based on a condition such as error like, HandlerListStopOnError. - // Or for logging like HandlerListLogItem. - AfterEachFn func(item HandlerListRunItem) bool -} - -// A NamedHandler is a struct that contains a name and function callback. -type NamedHandler struct { - Name string - Fn func(*Request) -} - -// copy creates a copy of the handler list. -func (l *HandlerList) copy() HandlerList { - n := HandlerList{ - AfterEachFn: l.AfterEachFn, - } - if len(l.list) == 0 { - return n - } - - n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) - return n -} - -// Clear clears the handler list. -func (l *HandlerList) Clear() { - l.list = l.list[0:0] -} - -// Len returns the number of handlers in the list. -func (l *HandlerList) Len() int { - return len(l.list) -} - -// PushBack pushes handler f to the back of the handler list. -func (l *HandlerList) PushBack(f func(*Request)) { - l.PushBackNamed(NamedHandler{"__anonymous", f}) -} - -// PushBackNamed pushes named handler f to the back of the handler list. -func (l *HandlerList) PushBackNamed(n NamedHandler) { - if cap(l.list) == 0 { - l.list = make([]NamedHandler, 0, 5) - } - l.list = append(l.list, n) -} - -// PushFront pushes handler f to the front of the handler list. -func (l *HandlerList) PushFront(f func(*Request)) { - l.PushFrontNamed(NamedHandler{"__anonymous", f}) -} - -// PushFrontNamed pushes named handler f to the front of the handler list. -func (l *HandlerList) PushFrontNamed(n NamedHandler) { - if cap(l.list) == len(l.list) { - // Allocating new list required - l.list = append([]NamedHandler{n}, l.list...) - } else { - // Enough room to prepend into list. - l.list = append(l.list, NamedHandler{}) - copy(l.list[1:], l.list) - l.list[0] = n - } -} - -// Remove removes a NamedHandler n -func (l *HandlerList) Remove(n NamedHandler) { - l.RemoveByName(n.Name) -} - -// RemoveByName removes a NamedHandler by name. -func (l *HandlerList) RemoveByName(name string) { - for i := 0; i < len(l.list); i++ { - m := l.list[i] - if m.Name == name { - // Shift array preventing creating new arrays - copy(l.list[i:], l.list[i+1:]) - l.list[len(l.list)-1] = NamedHandler{} - l.list = l.list[:len(l.list)-1] - - // decrement list so next check to length is correct - i-- - } - } -} - -// SwapNamed will swap out any existing handlers with the same name as the -// passed in NamedHandler returning true if handlers were swapped. False is -// returned otherwise. -func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { - for i := 0; i < len(l.list); i++ { - if l.list[i].Name == n.Name { - l.list[i].Fn = n.Fn - swapped = true - } - } - - return swapped -} - -// Swap will swap out all handlers matching the name passed in. The matched -// handlers will be swapped in. True is returned if the handlers were swapped. -func (l *HandlerList) Swap(name string, replace NamedHandler) bool { - var swapped bool - - for i := 0; i < len(l.list); i++ { - if l.list[i].Name == name { - l.list[i] = replace - swapped = true - } - } - - return swapped -} - -// SetBackNamed will replace the named handler if it exists in the handler list. -// If the handler does not exist the handler will be added to the end of the list. -func (l *HandlerList) SetBackNamed(n NamedHandler) { - if !l.SwapNamed(n) { - l.PushBackNamed(n) - } -} - -// SetFrontNamed will replace the named handler if it exists in the handler list. -// If the handler does not exist the handler will be added to the beginning of -// the list. -func (l *HandlerList) SetFrontNamed(n NamedHandler) { - if !l.SwapNamed(n) { - l.PushFrontNamed(n) - } -} - -// Run executes all handlers in the list with a given request object. -func (l *HandlerList) Run(r *Request) { - for i, h := range l.list { - h.Fn(r) - item := HandlerListRunItem{ - Index: i, Handler: h, Request: r, - } - if l.AfterEachFn != nil && !l.AfterEachFn(item) { - return - } - } -} - -// HandlerListLogItem logs the request handler and the state of the -// request's Error value. Always returns true to continue iterating -// request handlers in a HandlerList. -func HandlerListLogItem(item HandlerListRunItem) bool { - if item.Request.Config.Logger == nil { - return true - } - item.Request.Config.Logger.Log("DEBUG: RequestHandler", - item.Index, item.Handler.Name, item.Request.Error) - - return true -} - -// HandlerListStopOnError returns false to stop the HandlerList iterating -// over request handlers if Request.Error is not nil. True otherwise -// to continue iterating. -func HandlerListStopOnError(item HandlerListRunItem) bool { - return item.Request.Error == nil -} - -// WithAppendUserAgent will add a string to the user agent prefixed with a -// single white space. -func WithAppendUserAgent(s string) Option { - return func(r *Request) { - r.Handlers.Build.PushBack(func(r2 *Request) { - AddToUserAgent(r, s) - }) - } -} - -// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request -// header. If the extra parameters are provided they will be added as metadata to the -// name/version pair resulting in the following format. -// "name/version (extra0; extra1; ...)" -// The user agent part will be concatenated with this current request's user agent string. -func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { - ua := fmt.Sprintf("%s/%s", name, version) - if len(extra) > 0 { - ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) - } - return func(r *Request) { - AddToUserAgent(r, ua) - } -} - -// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. -// The input string will be concatenated with the current request's user agent string. -func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { - return func(r *Request) { - AddToUserAgent(r, s) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go deleted file mode 100644 index 79f7960..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go +++ /dev/null @@ -1,24 +0,0 @@ -package request - -import ( - "io" - "net/http" - "net/url" -) - -func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - req := new(http.Request) - *req = *r - req.URL = &url.URL{} - *req.URL = *r.URL - req.Body = body - - req.Header = http.Header{} - for k, v := range r.Header { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go deleted file mode 100644 index b0c2ef4..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ /dev/null @@ -1,60 +0,0 @@ -package request - -import ( - "io" - "sync" - - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -// offsetReader is a thread-safe io.ReadCloser to prevent racing -// with retrying requests -type offsetReader struct { - buf io.ReadSeeker - lock sync.Mutex - closed bool -} - -func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { - reader := &offsetReader{} - buf.Seek(offset, sdkio.SeekStart) - - reader.buf = buf - return reader -} - -// Close will close the instance of the offset reader's access to -// the underlying io.ReadSeeker. -func (o *offsetReader) Close() error { - o.lock.Lock() - defer o.lock.Unlock() - o.closed = true - return nil -} - -// Read is a thread-safe read of the underlying io.ReadSeeker -func (o *offsetReader) Read(p []byte) (int, error) { - o.lock.Lock() - defer o.lock.Unlock() - - if o.closed { - return 0, io.EOF - } - - return o.buf.Read(p) -} - -// Seek is a thread-safe seeking operation. -func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { - o.lock.Lock() - defer o.lock.Unlock() - - return o.buf.Seek(offset, whence) -} - -// CloseAndCopy will return a new offsetReader with a copy of the old buffer -// and close the old buffer. -func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { - o.Close() - return newOffsetReader(o.buf, offset) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go deleted file mode 100644 index 75f0fe0..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ /dev/null @@ -1,657 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - "io" - "net" - "net/http" - "net/url" - "reflect" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -const ( - // ErrCodeSerialization is the serialization error code that is received - // during protocol unmarshaling. - ErrCodeSerialization = "SerializationError" - - // ErrCodeRead is an error that is returned during HTTP reads. - ErrCodeRead = "ReadError" - - // ErrCodeResponseTimeout is the connection timeout error that is received - // during body reads. - ErrCodeResponseTimeout = "ResponseTimeout" - - // ErrCodeInvalidPresignExpire is returned when the expire time provided to - // presign is invalid - ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" - - // CanceledErrorCode is the error code that will be returned by an - // API request that was canceled. Requests given a aws.Context may - // return this error when canceled. - CanceledErrorCode = "RequestCanceled" -) - -// A Request is the service request to be made. -type Request struct { - Config aws.Config - ClientInfo metadata.ClientInfo - Handlers Handlers - - Retryer - AttemptTime time.Time - Time time.Time - Operation *Operation - HTTPRequest *http.Request - HTTPResponse *http.Response - Body io.ReadSeeker - BodyStart int64 // offset from beginning of Body that the request body starts - Params interface{} - Error error - Data interface{} - RequestID string - RetryCount int - Retryable *bool - RetryDelay time.Duration - NotHoist bool - SignedHeaderVals http.Header - LastSignedAt time.Time - DisableFollowRedirects bool - - // A value greater than 0 instructs the request to be signed as Presigned URL - // You should not set this field directly. Instead use Request's - // Presign or PresignRequest methods. - ExpireTime time.Duration - - context aws.Context - - built bool - - // Need to persist an intermediate body between the input Body and HTTP - // request body because the HTTP Client's transport can maintain a reference - // to the HTTP request's body after the client has returned. This value is - // safe to use concurrently and wrap the input Body for each HTTP request. - safeBody *offsetReader -} - -// An Operation is the service API operation to be made. -type Operation struct { - Name string - HTTPMethod string - HTTPPath string - *Paginator - - BeforePresignFn func(r *Request) error -} - -// New returns a new Request pointer for the service API -// operation and parameters. -// -// Params is any value of input parameters to be the request payload. -// Data is pointer value to an object which the request's response -// payload will be deserialized to. -func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, - retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { - - method := operation.HTTPMethod - if method == "" { - method = "POST" - } - - httpReq, _ := http.NewRequest(method, "", nil) - - var err error - httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) - if err != nil { - httpReq.URL = &url.URL{} - err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) - } - - SanitizeHostForHeader(httpReq) - - r := &Request{ - Config: cfg, - ClientInfo: clientInfo, - Handlers: handlers.Copy(), - - Retryer: retryer, - AttemptTime: time.Now(), - Time: time.Now(), - ExpireTime: 0, - Operation: operation, - HTTPRequest: httpReq, - Body: nil, - Params: params, - Error: err, - Data: data, - } - r.SetBufferBody([]byte{}) - - return r -} - -// A Option is a functional option that can augment or modify a request when -// using a WithContext API operation method. -type Option func(*Request) - -// WithGetResponseHeader builds a request Option which will retrieve a single -// header value from the HTTP Response. If there are multiple values for the -// header key use WithGetResponseHeaders instead to access the http.Header -// map directly. The passed in val pointer must be non-nil. -// -// This Option can be used multiple times with a single API operation. -// -// var id2, versionID string -// svc.PutObjectWithContext(ctx, params, -// request.WithGetResponseHeader("x-amz-id-2", &id2), -// request.WithGetResponseHeader("x-amz-version-id", &versionID), -// ) -func WithGetResponseHeader(key string, val *string) Option { - return func(r *Request) { - r.Handlers.Complete.PushBack(func(req *Request) { - *val = req.HTTPResponse.Header.Get(key) - }) - } -} - -// WithGetResponseHeaders builds a request Option which will retrieve the -// headers from the HTTP response and assign them to the passed in headers -// variable. The passed in headers pointer must be non-nil. -// -// var headers http.Header -// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) -func WithGetResponseHeaders(headers *http.Header) Option { - return func(r *Request) { - r.Handlers.Complete.PushBack(func(req *Request) { - *headers = req.HTTPResponse.Header - }) - } -} - -// WithLogLevel is a request option that will set the request to use a specific -// log level when the request is made. -// -// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) -func WithLogLevel(l aws.LogLevelType) Option { - return func(r *Request) { - r.Config.LogLevel = aws.LogLevel(l) - } -} - -// ApplyOptions will apply each option to the request calling them in the order -// the were provided. -func (r *Request) ApplyOptions(opts ...Option) { - for _, opt := range opts { - opt(r) - } -} - -// Context will always returns a non-nil context. If Request does not have a -// context aws.BackgroundContext will be returned. -func (r *Request) Context() aws.Context { - if r.context != nil { - return r.context - } - return aws.BackgroundContext() -} - -// SetContext adds a Context to the current request that can be used to cancel -// a in-flight request. The Context value must not be nil, or this method will -// panic. -// -// Unlike http.Request.WithContext, SetContext does not return a copy of the -// Request. It is not safe to use use a single Request value for multiple -// requests. A new Request should be created for each API operation request. -// -// Go 1.6 and below: -// The http.Request's Cancel field will be set to the Done() value of -// the context. This will overwrite the Cancel field's value. -// -// Go 1.7 and above: -// The http.Request.WithContext will be used to set the context on the underlying -// http.Request. This will create a shallow copy of the http.Request. The SDK -// may create sub contexts in the future for nested requests such as retries. -func (r *Request) SetContext(ctx aws.Context) { - if ctx == nil { - panic("context cannot be nil") - } - setRequestContext(r, ctx) -} - -// WillRetry returns if the request's can be retried. -func (r *Request) WillRetry() bool { - if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { - return false - } - return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() -} - -// ParamsFilled returns if the request's parameters have been populated -// and the parameters are valid. False is returned if no parameters are -// provided or invalid. -func (r *Request) ParamsFilled() bool { - return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() -} - -// DataFilled returns true if the request's data for response deserialization -// target has been set and is a valid. False is returned if data is not -// set, or is invalid. -func (r *Request) DataFilled() bool { - return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() -} - -// SetBufferBody will set the request's body bytes that will be sent to -// the service API. -func (r *Request) SetBufferBody(buf []byte) { - r.SetReaderBody(bytes.NewReader(buf)) -} - -// SetStringBody sets the body of the request to be backed by a string. -func (r *Request) SetStringBody(s string) { - r.SetReaderBody(strings.NewReader(s)) -} - -// SetReaderBody will set the request's body reader. -func (r *Request) SetReaderBody(reader io.ReadSeeker) { - r.Body = reader - r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset. - r.ResetBody() -} - -// Presign returns the request's signed URL. Error will be returned -// if the signing fails. -// -// It is invalid to create a presigned URL with a expire duration 0 or less. An -// error is returned if expire duration is 0 or less. -func (r *Request) Presign(expire time.Duration) (string, error) { - r = r.copy() - - // Presign requires all headers be hoisted. There is no way to retrieve - // the signed headers not hoisted without this. Making the presigned URL - // useless. - r.NotHoist = false - - u, _, err := getPresignedURL(r, expire) - return u, err -} - -// PresignRequest behaves just like presign, with the addition of returning a -// set of headers that were signed. -// -// It is invalid to create a presigned URL with a expire duration 0 or less. An -// error is returned if expire duration is 0 or less. -// -// Returns the URL string for the API operation with signature in the query string, -// and the HTTP headers that were included in the signature. These headers must -// be included in any HTTP request made with the presigned URL. -// -// To prevent hoisting any headers to the query string set NotHoist to true on -// this Request value prior to calling PresignRequest. -func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { - r = r.copy() - return getPresignedURL(r, expire) -} - -// IsPresigned returns true if the request represents a presigned API url. -func (r *Request) IsPresigned() bool { - return r.ExpireTime != 0 -} - -func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { - if expire <= 0 { - return "", nil, awserr.New( - ErrCodeInvalidPresignExpire, - "presigned URL requires an expire duration greater than 0", - nil, - ) - } - - r.ExpireTime = expire - - if r.Operation.BeforePresignFn != nil { - if err := r.Operation.BeforePresignFn(r); err != nil { - return "", nil, err - } - } - - if err := r.Sign(); err != nil { - return "", nil, err - } - - return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil -} - -func debugLogReqError(r *Request, stage string, retrying bool, err error) { - if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { - return - } - - retryStr := "not retrying" - if retrying { - retryStr = "will retry" - } - - r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", - stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) -} - -// Build will build the request's object so it can be signed and sent -// to the service. Build will also validate all the request's parameters. -// Any additional build Handlers set on this request will be run -// in the order they were set. -// -// The request will only be built once. Multiple calls to build will have -// no effect. -// -// If any Validate or Build errors occur the build will stop and the error -// which occurred will be returned. -func (r *Request) Build() error { - if !r.built { - r.Handlers.Validate.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Request", false, r.Error) - return r.Error - } - r.Handlers.Build.Run(r) - if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) - return r.Error - } - r.built = true - } - - return r.Error -} - -// Sign will sign the request, returning error if errors are encountered. -// -// Sign will build the request prior to signing. All Sign Handlers will -// be executed in the order they were set. -func (r *Request) Sign() error { - r.Build() - if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) - return r.Error - } - - r.Handlers.Sign.Run(r) - return r.Error -} - -func (r *Request) getNextRequestBody() (io.ReadCloser, error) { - if r.safeBody != nil { - r.safeBody.Close() - } - - r.safeBody = newOffsetReader(r.Body, r.BodyStart) - - // Go 1.8 tightened and clarified the rules code needs to use when building - // requests with the http package. Go 1.8 removed the automatic detection - // of if the Request.Body was empty, or actually had bytes in it. The SDK - // always sets the Request.Body even if it is empty and should not actually - // be sent. This is incorrect. - // - // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http - // client that the request really should be sent without a body. The - // Request.Body cannot be set to nil, which is preferable, because the - // field is exported and could introduce nil pointer dereferences for users - // of the SDK if they used that field. - // - // Related golang/go#18257 - l, err := aws.SeekerLen(r.Body) - if err != nil { - return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) - } - - var body io.ReadCloser - if l == 0 { - body = NoBody - } else if l > 0 { - body = r.safeBody - } else { - // Hack to prevent sending bodies for methods where the body - // should be ignored by the server. Sending bodies on these - // methods without an associated ContentLength will cause the - // request to socket timeout because the server does not handle - // Transfer-Encoding: chunked bodies for these methods. - // - // This would only happen if a aws.ReaderSeekerCloser was used with - // a io.Reader that was not also an io.Seeker, or did not implement - // Len() method. - switch r.Operation.HTTPMethod { - case "GET", "HEAD", "DELETE": - body = NoBody - default: - body = r.safeBody - } - } - - return body, nil -} - -// GetBody will return an io.ReadSeeker of the Request's underlying -// input body with a concurrency safe wrapper. -func (r *Request) GetBody() io.ReadSeeker { - return r.safeBody -} - -// Send will send the request, returning error if errors are encountered. -// -// Send will sign the request prior to sending. All Send Handlers will -// be executed in the order they were set. -// -// Canceling a request is non-deterministic. If a request has been canceled, -// then the transport will choose, randomly, one of the state channels during -// reads or getting the connection. -// -// readLoop() and getConn(req *Request, cm connectMethod) -// https://github.com/golang/go/blob/master/src/net/http/transport.go -// -// Send will not close the request.Request's body. -func (r *Request) Send() error { - defer func() { - // Regardless of success or failure of the request trigger the Complete - // request handlers. - r.Handlers.Complete.Run(r) - }() - - for { - r.AttemptTime = time.Now() - if aws.BoolValue(r.Retryable) { - if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { - r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", - r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) - } - - // The previous http.Request will have a reference to the r.Body - // and the HTTP Client's Transport may still be reading from - // the request's body even though the Client's Do returned. - r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) - r.ResetBody() - - // Closing response body to ensure that no response body is leaked - // between retry attempts. - if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { - r.HTTPResponse.Body.Close() - } - } - - r.Sign() - if r.Error != nil { - return r.Error - } - - r.Retryable = nil - - r.Handlers.Send.Run(r) - if r.Error != nil { - if !shouldRetryCancel(r) { - return r.Error - } - - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Send Request", false, err) - return r.Error - } - debugLogReqError(r, "Send Request", true, err) - continue - } - r.Handlers.UnmarshalMeta.Run(r) - r.Handlers.ValidateResponse.Run(r) - if r.Error != nil { - r.Handlers.UnmarshalError.Run(r) - err := r.Error - - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Response", false, err) - return r.Error - } - debugLogReqError(r, "Validate Response", true, err) - continue - } - - r.Handlers.Unmarshal.Run(r) - if r.Error != nil { - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", false, err) - return r.Error - } - debugLogReqError(r, "Unmarshal Response", true, err) - continue - } - - break - } - - return nil -} - -// copy will copy a request which will allow for local manipulation of the -// request. -func (r *Request) copy() *Request { - req := &Request{} - *req = *r - req.Handlers = r.Handlers.Copy() - op := *r.Operation - req.Operation = &op - return req -} - -// AddToUserAgent adds the string to the end of the request's current user agent. -func AddToUserAgent(r *Request, s string) { - curUA := r.HTTPRequest.Header.Get("User-Agent") - if len(curUA) > 0 { - s = curUA + " " + s - } - r.HTTPRequest.Header.Set("User-Agent", s) -} - -func shouldRetryCancel(r *Request) bool { - awsErr, ok := r.Error.(awserr.Error) - timeoutErr := false - errStr := r.Error.Error() - if ok { - if awsErr.Code() == CanceledErrorCode { - return false - } - err := awsErr.OrigErr() - netErr, netOK := err.(net.Error) - timeoutErr = netOK && netErr.Temporary() - if urlErr, ok := err.(*url.Error); !timeoutErr && ok { - errStr = urlErr.Err.Error() - } - } - - // There can be two types of canceled errors here. - // The first being a net.Error and the other being an error. - // If the request was timed out, we want to continue the retry - // process. Otherwise, return the canceled error. - return timeoutErr || - (errStr != "net/http: request canceled" && - errStr != "net/http: request canceled while waiting for connection") - -} - -// SanitizeHostForHeader removes default port from host and updates request.Host -func SanitizeHostForHeader(r *http.Request) { - host := getHost(r) - port := portOnly(host) - if port != "" && isDefaultPort(r.URL.Scheme, port) { - r.Host = stripPort(host) - } -} - -// Returns host from request -func getHost(r *http.Request) string { - if r.Host != "" { - return r.Host - } - - return r.URL.Host -} - -// Hostname returns u.Host, without any port number. -// -// If Host is an IPv6 literal with a port number, Hostname returns the -// IPv6 literal without the square brackets. IPv6 literals may include -// a zone identifier. -// -// Copied from the Go 1.8 standard library (net/url) -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} - -// Port returns the port part of u.Host, without the leading colon. -// If u.Host doesn't contain a port, Port returns an empty string. -// -// Copied from the Go 1.8 standard library (net/url) -func portOnly(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return "" - } - if i := strings.Index(hostport, "]:"); i != -1 { - return hostport[i+len("]:"):] - } - if strings.Contains(hostport, "]") { - return "" - } - return hostport[colon+len(":"):] -} - -// Returns true if the specified URI is using the standard port -// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) -func isDefaultPort(scheme, port string) bool { - if port == "" { - return true - } - - lowerCaseScheme := strings.ToLower(scheme) - if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { - return true - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go deleted file mode 100644 index e36e468..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build !go1.8 - -package request - -import "io" - -// NoBody is an io.ReadCloser with no bytes. Read always returns EOF -// and Close always returns nil. It can be used in an outgoing client -// request to explicitly signal that a request has zero bytes. -// An alternative, however, is to simply set Request.Body to nil. -// -// Copy of Go 1.8 NoBody type from net/http/http.go -type noBody struct{} - -func (noBody) Read([]byte) (int, error) { return 0, io.EOF } -func (noBody) Close() error { return nil } -func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } - -// NoBody is an empty reader that will trigger the Go HTTP client to not include -// and body in the HTTP request. -var NoBody = noBody{} - -// ResetBody rewinds the request body back to its starting position, and -// sets the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -// -// ResetBody will automatically be called by the SDK's build handler, but if -// the request is being used directly ResetBody must be called before the request -// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically -// call ResetBody. -func (r *Request) ResetBody() { - body, err := r.getNextRequestBody() - if err != nil { - r.Error = err - return - } - - r.HTTPRequest.Body = body -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go deleted file mode 100644 index 7c6a800..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.8 - -package request - -import ( - "net/http" -) - -// NoBody is a http.NoBody reader instructing Go HTTP client to not include -// and body in the HTTP request. -var NoBody = http.NoBody - -// ResetBody rewinds the request body back to its starting position, and -// sets the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -// -// ResetBody will automatically be called by the SDK's build handler, but if -// the request is being used directly ResetBody must be called before the request -// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically -// call ResetBody. -// -// Will also set the Go 1.8's http.Request.GetBody member to allow retrying -// PUT/POST redirects. -func (r *Request) ResetBody() { - body, err := r.getNextRequestBody() - if err != nil { - r.Error = err - return - } - - r.HTTPRequest.Body = body - r.HTTPRequest.GetBody = r.getNextRequestBody -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go deleted file mode 100644 index a7365cd..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build go1.7 - -package request - -import "github.com/aws/aws-sdk-go/aws" - -// setContext updates the Request to use the passed in context for cancellation. -// Context will also be used for request retry delay. -// -// Creates shallow copy of the http.Request with the WithContext method. -func setRequestContext(r *Request, ctx aws.Context) { - r.context = ctx - r.HTTPRequest = r.HTTPRequest.WithContext(ctx) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go deleted file mode 100644 index 307fa07..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !go1.7 - -package request - -import "github.com/aws/aws-sdk-go/aws" - -// setContext updates the Request to use the passed in context for cancellation. -// Context will also be used for request retry delay. -// -// Creates shallow copy of the http.Request with the WithContext method. -func setRequestContext(r *Request, ctx aws.Context) { - r.context = ctx - r.HTTPRequest.Cancel = ctx.Done() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go deleted file mode 100644 index a633ed5..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ /dev/null @@ -1,264 +0,0 @@ -package request - -import ( - "reflect" - "sync/atomic" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -// A Pagination provides paginating of SDK API operations which are paginatable. -// Generally you should not use this type directly, but use the "Pages" API -// operations method to automatically perform pagination for you. Such as, -// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. -// -// Pagination differs from a Paginator type in that pagination is the type that -// does the pagination between API operations, and Paginator defines the -// configuration that will be used per page request. -// -// cont := true -// for p.Next() && cont { -// data := p.Page().(*s3.ListObjectsOutput) -// // process the page's data -// } -// return p.Err() -// -// See service client API operation Pages methods for examples how the SDK will -// use the Pagination type. -type Pagination struct { - // Function to return a Request value for each pagination request. - // Any configuration or handlers that need to be applied to the request - // prior to getting the next page should be done here before the request - // returned. - // - // NewRequest should always be built from the same API operations. It is - // undefined if different API operations are returned on subsequent calls. - NewRequest func() (*Request, error) - // EndPageOnSameToken, when enabled, will allow the paginator to stop on - // token that are the same as its previous tokens. - EndPageOnSameToken bool - - started bool - prevTokens []interface{} - nextTokens []interface{} - - err error - curPage interface{} -} - -// HasNextPage will return true if Pagination is able to determine that the API -// operation has additional pages. False will be returned if there are no more -// pages remaining. -// -// Will always return true if Next has not been called yet. -func (p *Pagination) HasNextPage() bool { - if !p.started { - return true - } - - hasNextPage := len(p.nextTokens) != 0 - if p.EndPageOnSameToken { - return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) - } - return hasNextPage -} - -// Err returns the error Pagination encountered when retrieving the next page. -func (p *Pagination) Err() error { - return p.err -} - -// Page returns the current page. Page should only be called after a successful -// call to Next. It is undefined what Page will return if Page is called after -// Next returns false. -func (p *Pagination) Page() interface{} { - return p.curPage -} - -// Next will attempt to retrieve the next page for the API operation. When a page -// is retrieved true will be returned. If the page cannot be retrieved, or there -// are no more pages false will be returned. -// -// Use the Page method to retrieve the current page data. The data will need -// to be cast to the API operation's output type. -// -// Use the Err method to determine if an error occurred if Page returns false. -func (p *Pagination) Next() bool { - if !p.HasNextPage() { - return false - } - - req, err := p.NewRequest() - if err != nil { - p.err = err - return false - } - - if p.started { - for i, intok := range req.Operation.InputTokens { - awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) - } - } - p.started = true - - err = req.Send() - if err != nil { - p.err = err - return false - } - - p.prevTokens = p.nextTokens - p.nextTokens = req.nextPageTokens() - p.curPage = req.Data - - return true -} - -// A Paginator is the configuration data that defines how an API operation -// should be paginated. This type is used by the API service models to define -// the generated pagination config for service APIs. -// -// The Pagination type is what provides iterating between pages of an API. It -// is only used to store the token metadata the SDK should use for performing -// pagination. -type Paginator struct { - InputTokens []string - OutputTokens []string - LimitToken string - TruncationToken string -} - -// nextPageTokens returns the tokens to use when asking for the next page of data. -func (r *Request) nextPageTokens() []interface{} { - if r.Operation.Paginator == nil { - return nil - } - if r.Operation.TruncationToken != "" { - tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) - if len(tr) == 0 { - return nil - } - - switch v := tr[0].(type) { - case *bool: - if !aws.BoolValue(v) { - return nil - } - case bool: - if v == false { - return nil - } - } - } - - tokens := []interface{}{} - tokenAdded := false - for _, outToken := range r.Operation.OutputTokens { - vs, _ := awsutil.ValuesAtPath(r.Data, outToken) - if len(vs) == 0 { - tokens = append(tokens, nil) - continue - } - v := vs[0] - - switch tv := v.(type) { - case *string: - if len(aws.StringValue(tv)) == 0 { - tokens = append(tokens, nil) - continue - } - case string: - if len(tv) == 0 { - tokens = append(tokens, nil) - continue - } - } - - tokenAdded = true - tokens = append(tokens, v) - } - if !tokenAdded { - return nil - } - - return tokens -} - -// Ensure a deprecated item is only logged once instead of each time its used. -func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { - if logger == nil { - return - } - if atomic.CompareAndSwapInt32(flag, 0, 1) { - logger.Log(msg) - } -} - -var ( - logDeprecatedHasNextPage int32 - logDeprecatedNextPage int32 - logDeprecatedEachPage int32 -) - -// HasNextPage returns true if this request has more pages of data available. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) HasNextPage() bool { - logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, - "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") - - return len(r.nextPageTokens()) > 0 -} - -// NextPage returns a new Request that can be executed to return the next -// page of result data. Call .Send() on this request to execute it. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) NextPage() *Request { - logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, - "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") - - tokens := r.nextPageTokens() - if len(tokens) == 0 { - return nil - } - - data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() - nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) - for i, intok := range nr.Operation.InputTokens { - awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) - } - return nr -} - -// EachPage iterates over each page of a paginated request object. The fn -// parameter should be a function with the following sample signature: -// -// func(page *T, lastPage bool) bool { -// return true // return false to stop iterating -// } -// -// Where "T" is the structure type matching the output structure of the given -// operation. For example, a request object generated by -// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput -// as the structure "T". The lastPage value represents whether the page is -// the last page of data or not. The return value of this function should -// return true to keep iterating or false to stop. -// -// Deprecated Use Pagination type for configurable pagination of API operations -func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { - logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, - "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") - - for page := r; page != nil; page = page.NextPage() { - if err := page.Send(); err != nil { - return err - } - if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { - return page.Error - } - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go deleted file mode 100644 index 7d52702..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ /dev/null @@ -1,161 +0,0 @@ -package request - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the client.DefaultRetryer -// structure, which contains basic retry logic using exponential backoff. -type Retryer interface { - RetryRules(*Request) time.Duration - ShouldRetry(*Request) bool - MaxRetries() int -} - -// WithRetryer sets a config Retryer value to the given Config returning it -// for chaining. -func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { - cfg.Retryer = retryer - return cfg -} - -// retryableCodes is a collection of service response codes which are retry-able -// without any further action. -var retryableCodes = map[string]struct{}{ - "RequestError": {}, - "RequestTimeout": {}, - ErrCodeResponseTimeout: {}, - "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout -} - -var throttleCodes = map[string]struct{}{ - "ProvisionedThroughputExceededException": {}, - "Throttling": {}, - "ThrottlingException": {}, - "RequestLimitExceeded": {}, - "RequestThrottled": {}, - "TooManyRequestsException": {}, // Lambda functions - "PriorRequestNotComplete": {}, // Route53 -} - -// credsExpiredCodes is a collection of error codes which signify the credentials -// need to be refreshed. Expired tokens require refreshing of credentials, and -// resigning before the request can be retried. -var credsExpiredCodes = map[string]struct{}{ - "ExpiredToken": {}, - "ExpiredTokenException": {}, - "RequestExpired": {}, // EC2 Only -} - -func isCodeThrottle(code string) bool { - _, ok := throttleCodes[code] - return ok -} - -func isCodeRetryable(code string) bool { - if _, ok := retryableCodes[code]; ok { - return true - } - - return isCodeExpiredCreds(code) -} - -func isCodeExpiredCreds(code string) bool { - _, ok := credsExpiredCodes[code] - return ok -} - -var validParentCodes = map[string]struct{}{ - ErrCodeSerialization: {}, - ErrCodeRead: {}, -} - -type temporaryError interface { - Temporary() bool -} - -func isNestedErrorRetryable(parentErr awserr.Error) bool { - if parentErr == nil { - return false - } - - if _, ok := validParentCodes[parentErr.Code()]; !ok { - return false - } - - err := parentErr.OrigErr() - if err == nil { - return false - } - - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) - } - - if t, ok := err.(temporaryError); ok { - return t.Temporary() || isErrConnectionReset(err) - } - - return isErrConnectionReset(err) -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if error is nil. -func IsErrorRetryable(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) - } - } - return false -} - -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if error is nil. -func IsErrorThrottle(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeThrottle(aerr.Code()) - } - } - return false -} - -// IsErrorExpiredCreds returns whether the error code is a credential expiry error. -// Returns false if error is nil. -func IsErrorExpiredCreds(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeExpiredCreds(aerr.Code()) - } - } - return false -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorRetryable -func (r *Request) IsErrorRetryable() bool { - return IsErrorRetryable(r.Error) -} - -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set -// -// Alias for the utility function IsErrorThrottle -func (r *Request) IsErrorThrottle() bool { - return IsErrorThrottle(r.Error) -} - -// IsErrorExpired returns whether the error code is a credential expiry error. -// Returns false if the request has no Error set. -// -// Alias for the utility function IsErrorExpiredCreds -func (r *Request) IsErrorExpired() bool { - return IsErrorExpiredCreds(r.Error) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go deleted file mode 100644 index 09a44eb..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go +++ /dev/null @@ -1,94 +0,0 @@ -package request - -import ( - "io" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -var timeoutErr = awserr.New( - ErrCodeResponseTimeout, - "read on body has reached the timeout limit", - nil, -) - -type readResult struct { - n int - err error -} - -// timeoutReadCloser will handle body reads that take too long. -// We will return a ErrReadTimeout error if a timeout occurs. -type timeoutReadCloser struct { - reader io.ReadCloser - duration time.Duration -} - -// Read will spin off a goroutine to call the reader's Read method. We will -// select on the timer's channel or the read's channel. Whoever completes first -// will be returned. -func (r *timeoutReadCloser) Read(b []byte) (int, error) { - timer := time.NewTimer(r.duration) - c := make(chan readResult, 1) - - go func() { - n, err := r.reader.Read(b) - timer.Stop() - c <- readResult{n: n, err: err} - }() - - select { - case data := <-c: - return data.n, data.err - case <-timer.C: - return 0, timeoutErr - } -} - -func (r *timeoutReadCloser) Close() error { - return r.reader.Close() -} - -const ( - // HandlerResponseTimeout is what we use to signify the name of the - // response timeout handler. - HandlerResponseTimeout = "ResponseTimeoutHandler" -) - -// adaptToResponseTimeoutError is a handler that will replace any top level error -// to a ErrCodeResponseTimeout, if its child is that. -func adaptToResponseTimeoutError(req *Request) { - if err, ok := req.Error.(awserr.Error); ok { - aerr, ok := err.OrigErr().(awserr.Error) - if ok && aerr.Code() == ErrCodeResponseTimeout { - req.Error = aerr - } - } -} - -// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. -// This will allow for per read timeouts. If a timeout occurred, we will return the -// ErrCodeResponseTimeout. -// -// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) -func WithResponseReadTimeout(duration time.Duration) Option { - return func(r *Request) { - - var timeoutHandler = NamedHandler{ - HandlerResponseTimeout, - func(req *Request) { - req.HTTPResponse.Body = &timeoutReadCloser{ - reader: req.HTTPResponse.Body, - duration: duration, - } - }} - - // remove the handler so we are not stomping over any new durations. - r.Handlers.Send.RemoveByName(HandlerResponseTimeout) - r.Handlers.Send.PushBackNamed(timeoutHandler) - - r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) - r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go deleted file mode 100644 index 4012462..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ /dev/null @@ -1,234 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -const ( - // InvalidParameterErrCode is the error code for invalid parameters errors - InvalidParameterErrCode = "InvalidParameter" - // ParamRequiredErrCode is the error code for required parameter errors - ParamRequiredErrCode = "ParamRequiredError" - // ParamMinValueErrCode is the error code for fields with too low of a - // number value. - ParamMinValueErrCode = "ParamMinValueError" - // ParamMinLenErrCode is the error code for fields without enough elements. - ParamMinLenErrCode = "ParamMinLenError" -) - -// Validator provides a way for types to perform validation logic on their -// input values that external code can use to determine if a type's values -// are valid. -type Validator interface { - Validate() error -} - -// An ErrInvalidParams provides wrapping of invalid parameter errors found when -// validating API operation input parameters. -type ErrInvalidParams struct { - // Context is the base context of the invalid parameter group. - Context string - errs []ErrInvalidParam -} - -// Add adds a new invalid parameter error to the collection of invalid -// parameters. The context of the invalid parameter will be updated to reflect -// this collection. -func (e *ErrInvalidParams) Add(err ErrInvalidParam) { - err.SetContext(e.Context) - e.errs = append(e.errs, err) -} - -// AddNested adds the invalid parameter errors from another ErrInvalidParams -// value into this collection. The nested errors will have their nested context -// updated and base context to reflect the merging. -// -// Use for nested validations errors. -func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { - for _, err := range nested.errs { - err.SetContext(e.Context) - err.AddNestedContext(nestedCtx) - e.errs = append(e.errs, err) - } -} - -// Len returns the number of invalid parameter errors -func (e ErrInvalidParams) Len() int { - return len(e.errs) -} - -// Code returns the code of the error -func (e ErrInvalidParams) Code() string { - return InvalidParameterErrCode -} - -// Message returns the message of the error -func (e ErrInvalidParams) Message() string { - return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) -} - -// Error returns the string formatted form of the invalid parameters. -func (e ErrInvalidParams) Error() string { - w := &bytes.Buffer{} - fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) - - for _, err := range e.errs { - fmt.Fprintf(w, "- %s\n", err.Message()) - } - - return w.String() -} - -// OrigErr returns the invalid parameters as a awserr.BatchedErrors value -func (e ErrInvalidParams) OrigErr() error { - return awserr.NewBatchError( - InvalidParameterErrCode, e.Message(), e.OrigErrs()) -} - -// OrigErrs returns a slice of the invalid parameters -func (e ErrInvalidParams) OrigErrs() []error { - errs := make([]error, len(e.errs)) - for i := 0; i < len(errs); i++ { - errs[i] = e.errs[i] - } - - return errs -} - -// An ErrInvalidParam represents an invalid parameter error type. -type ErrInvalidParam interface { - awserr.Error - - // Field name the error occurred on. - Field() string - - // SetContext updates the context of the error. - SetContext(string) - - // AddNestedContext updates the error's context to include a nested level. - AddNestedContext(string) -} - -type errInvalidParam struct { - context string - nestedContext string - field string - code string - msg string -} - -// Code returns the error code for the type of invalid parameter. -func (e *errInvalidParam) Code() string { - return e.code -} - -// Message returns the reason the parameter was invalid, and its context. -func (e *errInvalidParam) Message() string { - return fmt.Sprintf("%s, %s.", e.msg, e.Field()) -} - -// Error returns the string version of the invalid parameter error. -func (e *errInvalidParam) Error() string { - return fmt.Sprintf("%s: %s", e.code, e.Message()) -} - -// OrigErr returns nil, Implemented for awserr.Error interface. -func (e *errInvalidParam) OrigErr() error { - return nil -} - -// Field Returns the field and context the error occurred. -func (e *errInvalidParam) Field() string { - field := e.context - if len(field) > 0 { - field += "." - } - if len(e.nestedContext) > 0 { - field += fmt.Sprintf("%s.", e.nestedContext) - } - field += e.field - - return field -} - -// SetContext updates the base context of the error. -func (e *errInvalidParam) SetContext(ctx string) { - e.context = ctx -} - -// AddNestedContext prepends a context to the field's path. -func (e *errInvalidParam) AddNestedContext(ctx string) { - if len(e.nestedContext) == 0 { - e.nestedContext = ctx - } else { - e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) - } - -} - -// An ErrParamRequired represents an required parameter error. -type ErrParamRequired struct { - errInvalidParam -} - -// NewErrParamRequired creates a new required parameter error. -func NewErrParamRequired(field string) *ErrParamRequired { - return &ErrParamRequired{ - errInvalidParam{ - code: ParamRequiredErrCode, - field: field, - msg: fmt.Sprintf("missing required field"), - }, - } -} - -// An ErrParamMinValue represents a minimum value parameter error. -type ErrParamMinValue struct { - errInvalidParam - min float64 -} - -// NewErrParamMinValue creates a new minimum value parameter error. -func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { - return &ErrParamMinValue{ - errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, - field: field, - msg: fmt.Sprintf("minimum field value of %v", min), - }, - min: min, - } -} - -// MinValue returns the field's require minimum value. -// -// float64 is returned for both int and float min values. -func (e *ErrParamMinValue) MinValue() float64 { - return e.min -} - -// An ErrParamMinLen represents a minimum length parameter error. -type ErrParamMinLen struct { - errInvalidParam - min int -} - -// NewErrParamMinLen creates a new minimum length parameter error. -func NewErrParamMinLen(field string, min int) *ErrParamMinLen { - return &ErrParamMinLen{ - errInvalidParam: errInvalidParam{ - code: ParamMinLenErrCode, - field: field, - msg: fmt.Sprintf("minimum field size of %v", min), - }, - min: min, - } -} - -// MinLen returns the field's required minimum length. -func (e *ErrParamMinLen) MinLen() int { - return e.min -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go deleted file mode 100644 index 4601f88..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go +++ /dev/null @@ -1,295 +0,0 @@ -package request - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when -// the waiter's max attempts have been exhausted. -const WaiterResourceNotReadyErrorCode = "ResourceNotReady" - -// A WaiterOption is a function that will update the Waiter value's fields to -// configure the waiter. -type WaiterOption func(*Waiter) - -// WithWaiterMaxAttempts returns the maximum number of times the waiter should -// attempt to check the resource for the target state. -func WithWaiterMaxAttempts(max int) WaiterOption { - return func(w *Waiter) { - w.MaxAttempts = max - } -} - -// WaiterDelay will return a delay the waiter should pause between attempts to -// check the resource state. The passed in attempt is the number of times the -// Waiter has checked the resource state. -// -// Attempt is the number of attempts the Waiter has made checking the resource -// state. -type WaiterDelay func(attempt int) time.Duration - -// ConstantWaiterDelay returns a WaiterDelay that will always return a constant -// delay the waiter should use between attempts. It ignores the number of -// attempts made. -func ConstantWaiterDelay(delay time.Duration) WaiterDelay { - return func(attempt int) time.Duration { - return delay - } -} - -// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. -func WithWaiterDelay(delayer WaiterDelay) WaiterOption { - return func(w *Waiter) { - w.Delay = delayer - } -} - -// WithWaiterLogger returns a waiter option to set the logger a waiter -// should use to log warnings and errors to. -func WithWaiterLogger(logger aws.Logger) WaiterOption { - return func(w *Waiter) { - w.Logger = logger - } -} - -// WithWaiterRequestOptions returns a waiter option setting the request -// options for each request the waiter makes. Appends to waiter's request -// options already set. -func WithWaiterRequestOptions(opts ...Option) WaiterOption { - return func(w *Waiter) { - w.RequestOptions = append(w.RequestOptions, opts...) - } -} - -// A Waiter provides the functionality to perform a blocking call which will -// wait for a resource state to be satisfied by a service. -// -// This type should not be used directly. The API operations provided in the -// service packages prefixed with "WaitUntil" should be used instead. -type Waiter struct { - Name string - Acceptors []WaiterAcceptor - Logger aws.Logger - - MaxAttempts int - Delay WaiterDelay - - RequestOptions []Option - NewRequest func([]Option) (*Request, error) - SleepWithContext func(aws.Context, time.Duration) error -} - -// ApplyOptions updates the waiter with the list of waiter options provided. -func (w *Waiter) ApplyOptions(opts ...WaiterOption) { - for _, fn := range opts { - fn(w) - } -} - -// WaiterState are states the waiter uses based on WaiterAcceptor definitions -// to identify if the resource state the waiter is waiting on has occurred. -type WaiterState int - -// String returns the string representation of the waiter state. -func (s WaiterState) String() string { - switch s { - case SuccessWaiterState: - return "success" - case FailureWaiterState: - return "failure" - case RetryWaiterState: - return "retry" - default: - return "unknown waiter state" - } -} - -// States the waiter acceptors will use to identify target resource states. -const ( - SuccessWaiterState WaiterState = iota // waiter successful - FailureWaiterState // waiter failed - RetryWaiterState // waiter needs to be retried -) - -// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor -// definition's Expected attribute. -type WaiterMatchMode int - -// Modes the waiter will use when inspecting API response to identify target -// resource states. -const ( - PathAllWaiterMatch WaiterMatchMode = iota // match on all paths - PathWaiterMatch // match on specific path - PathAnyWaiterMatch // match on any path - PathListWaiterMatch // match on list of paths - StatusWaiterMatch // match on status code - ErrorWaiterMatch // match on error -) - -// String returns the string representation of the waiter match mode. -func (m WaiterMatchMode) String() string { - switch m { - case PathAllWaiterMatch: - return "pathAll" - case PathWaiterMatch: - return "path" - case PathAnyWaiterMatch: - return "pathAny" - case PathListWaiterMatch: - return "pathList" - case StatusWaiterMatch: - return "status" - case ErrorWaiterMatch: - return "error" - default: - return "unknown waiter match mode" - } -} - -// WaitWithContext will make requests for the API operation using NewRequest to -// build API requests. The request's response will be compared against the -// Waiter's Acceptors to determine the successful state of the resource the -// waiter is inspecting. -// -// The passed in context must not be nil. If it is nil a panic will occur. The -// Context will be used to cancel the waiter's pending requests and retry delays. -// Use aws.BackgroundContext if no context is available. -// -// The waiter will continue until the target state defined by the Acceptors, -// or the max attempts expires. -// -// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's -// retryer ShouldRetry returns false. This normally will happen when the max -// wait attempts expires. -func (w Waiter) WaitWithContext(ctx aws.Context) error { - - for attempt := 1; ; attempt++ { - req, err := w.NewRequest(w.RequestOptions) - if err != nil { - waiterLogf(w.Logger, "unable to create request %v", err) - return err - } - req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) - err = req.Send() - - // See if any of the acceptors match the request's response, or error - for _, a := range w.Acceptors { - if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { - return matchErr - } - } - - // The Waiter should only check the resource state MaxAttempts times - // This is here instead of in the for loop above to prevent delaying - // unnecessary when the waiter will not retry. - if attempt == w.MaxAttempts { - break - } - - // Delay to wait before inspecting the resource again - delay := w.Delay(attempt) - if sleepFn := req.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(delay) - } else { - sleepCtxFn := w.SleepWithContext - if sleepCtxFn == nil { - sleepCtxFn = aws.SleepWithContext - } - - if err := sleepCtxFn(ctx, delay); err != nil { - return awserr.New(CanceledErrorCode, "waiter context canceled", err) - } - } - } - - return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) -} - -// A WaiterAcceptor provides the information needed to wait for an API operation -// to complete. -type WaiterAcceptor struct { - State WaiterState - Matcher WaiterMatchMode - Argument string - Expected interface{} -} - -// match returns if the acceptor found a match with the passed in request -// or error. True is returned if the acceptor made a match, error is returned -// if there was an error attempting to perform the match. -func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { - result := false - var vals []interface{} - - switch a.Matcher { - case PathAllWaiterMatch, PathWaiterMatch: - // Require all matches to be equal for result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - if len(vals) == 0 { - break - } - result = true - for _, val := range vals { - if !awsutil.DeepEqual(val, a.Expected) { - result = false - break - } - } - case PathAnyWaiterMatch: - // Only a single match needs to equal for the result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - for _, val := range vals { - if awsutil.DeepEqual(val, a.Expected) { - result = true - break - } - } - case PathListWaiterMatch: - // ignored matcher - case StatusWaiterMatch: - s := a.Expected.(int) - result = s == req.HTTPResponse.StatusCode - case ErrorWaiterMatch: - if aerr, ok := err.(awserr.Error); ok { - result = aerr.Code() == a.Expected.(string) - } - default: - waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", - name, a.Matcher) - } - - if !result { - // If there was no matching result found there is nothing more to do - // for this response, retry the request. - return false, nil - } - - switch a.State { - case SuccessWaiterState: - // waiter completed - return true, nil - case FailureWaiterState: - // Waiter failure state triggered - return true, awserr.New(WaiterResourceNotReadyErrorCode, - "failed waiting for successful resource state", err) - case RetryWaiterState: - // clear the error and retry the operation - return false, nil - default: - waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", - name, a.State) - return false, nil - } -} - -func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { - if logger != nil { - logger.Log(fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go deleted file mode 100644 index 8b6f234..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ /dev/null @@ -1,201 +0,0 @@ -package aws - -import ( - "io" - "sync" - - "github.com/aws/aws-sdk-go/internal/sdkio" -) - -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should -// only be used with an io.Reader that is also an io.Seeker. Doing so may -// cause request signature errors, or request body's not sent for GET, HEAD -// and DELETE HTTP methods. -// -// Deprecated: Should only be used with io.ReadSeeker. If using for -// S3 PutObject to stream content use s3manager.Uploader instead. -func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { - return ReaderSeekerCloser{r} -} - -// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and -// io.Closer interfaces to the underlying object if they are available. -type ReaderSeekerCloser struct { - r io.Reader -} - -// IsReaderSeekable returns if the underlying reader type can be seeked. A -// io.Reader might not actually be seekable if it is the ReaderSeekerCloser -// type. -func IsReaderSeekable(r io.Reader) bool { - switch v := r.(type) { - case ReaderSeekerCloser: - return v.IsSeeker() - case *ReaderSeekerCloser: - return v.IsSeeker() - case io.ReadSeeker: - return true - default: - return false - } -} - -// Read reads from the reader up to size of p. The number of bytes read, and -// error if it occurred will be returned. -// -// If the reader is not an io.Reader zero bytes read, and nil error will be returned. -// -// Performs the same functionality as io.Reader Read -func (r ReaderSeekerCloser) Read(p []byte) (int, error) { - switch t := r.r.(type) { - case io.Reader: - return t.Read(p) - } - return 0, nil -} - -// Seek sets the offset for the next Read to offset, interpreted according to -// whence: 0 means relative to the origin of the file, 1 means relative to the -// current offset, and 2 means relative to the end. Seek returns the new offset -// and an error, if any. -// -// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. -func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { - switch t := r.r.(type) { - case io.Seeker: - return t.Seek(offset, whence) - } - return int64(0), nil -} - -// IsSeeker returns if the underlying reader is also a seeker. -func (r ReaderSeekerCloser) IsSeeker() bool { - _, ok := r.r.(io.Seeker) - return ok -} - -// HasLen returns the length of the underlying reader if the value implements -// the Len() int method. -func (r ReaderSeekerCloser) HasLen() (int, bool) { - type lenner interface { - Len() int - } - - if lr, ok := r.r.(lenner); ok { - return lr.Len(), true - } - - return 0, false -} - -// GetLen returns the length of the bytes remaining in the underlying reader. -// Checks first for Len(), then io.Seeker to determine the size of the -// underlying reader. -// -// Will return -1 if the length cannot be determined. -func (r ReaderSeekerCloser) GetLen() (int64, error) { - if l, ok := r.HasLen(); ok { - return int64(l), nil - } - - if s, ok := r.r.(io.Seeker); ok { - return seekerLen(s) - } - - return -1, nil -} - -// SeekerLen attempts to get the number of bytes remaining at the seeker's -// current position. Returns the number of bytes remaining or error. -func SeekerLen(s io.Seeker) (int64, error) { - // Determine if the seeker is actually seekable. ReaderSeekerCloser - // hides the fact that a io.Readers might not actually be seekable. - switch v := s.(type) { - case ReaderSeekerCloser: - return v.GetLen() - case *ReaderSeekerCloser: - return v.GetLen() - } - - return seekerLen(s) -} - -func seekerLen(s io.Seeker) (int64, error) { - curOffset, err := s.Seek(0, sdkio.SeekCurrent) - if err != nil { - return 0, err - } - - endOffset, err := s.Seek(0, sdkio.SeekEnd) - if err != nil { - return 0, err - } - - _, err = s.Seek(curOffset, sdkio.SeekStart) - if err != nil { - return 0, err - } - - return endOffset - curOffset, nil -} - -// Close closes the ReaderSeekerCloser. -// -// If the ReaderSeekerCloser is not an io.Closer nothing will be done. -func (r ReaderSeekerCloser) Close() error { - switch t := r.r.(type) { - case io.Closer: - return t.Close() - } - return nil -} - -// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface -// Can be used with the s3manager.Downloader to download content to a buffer -// in memory. Safe to use concurrently. -type WriteAtBuffer struct { - buf []byte - m sync.Mutex - - // GrowthCoeff defines the growth rate of the internal buffer. By - // default, the growth rate is 1, where expanding the internal - // buffer will allocate only enough capacity to fit the new expected - // length. - GrowthCoeff float64 -} - -// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer -// provided by buf. -func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { - return &WriteAtBuffer{buf: buf} -} - -// WriteAt writes a slice of bytes to a buffer starting at the position provided -// The number of bytes written will be returned, or error. Can overwrite previous -// written slices if the write ats overlap. -func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { - pLen := len(p) - expLen := pos + int64(pLen) - b.m.Lock() - defer b.m.Unlock() - if int64(len(b.buf)) < expLen { - if int64(cap(b.buf)) < expLen { - if b.GrowthCoeff < 1 { - b.GrowthCoeff = 1 - } - newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) - copy(newBuf, b.buf) - b.buf = newBuf - } - b.buf = b.buf[:expLen] - } - copy(b.buf[pos:], p) - return pLen, nil -} - -// Bytes returns a slice of bytes written to the buffer. -func (b *WriteAtBuffer) Bytes() []byte { - b.m.Lock() - defer b.m.Unlock() - return b.buf -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go deleted file mode 100644 index 6192b24..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/url.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build go1.8 - -package aws - -import "net/url" - -// URLHostname will extract the Hostname without port from the URL value. -// -// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. -func URLHostname(url *url.URL) string { - return url.Hostname() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go deleted file mode 100644 index 0210d27..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !go1.8 - -package aws - -import ( - "net/url" - "strings" -) - -// URLHostname will extract the Hostname without port from the URL value. -// -// Copy of Go 1.8's net/url#URL.Hostname functionality. -func URLHostname(url *url.URL) string { - return stripPort(url.Host) - -} - -// stripPort is copy of Go 1.8 url#URL.Hostname functionality. -// https://golang.org/src/net/url/url.go -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go deleted file mode 100644 index 423209b..0000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package aws provides core functionality for making requests to AWS services. -package aws - -// SDKName is the name of this AWS SDK -const SDKName = "aws-sdk-go" - -// SDKVersion is the version of this SDK -const SDKVersion = "1.15.27" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go deleted file mode 100644 index 5aa9137..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !go1.7 - -package sdkio - -// Copy of Go 1.7 io package's Seeker constants. -const ( - SeekStart = 0 // seek relative to the origin of the file - SeekCurrent = 1 // seek relative to the current offset - SeekEnd = 2 // seek relative to the end -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go deleted file mode 100644 index e5f0056..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build go1.7 - -package sdkio - -import "io" - -// Alias for Go 1.7 io package Seeker constants -const ( - SeekStart = io.SeekStart // seek relative to the origin of the file - SeekCurrent = io.SeekCurrent // seek relative to the current offset - SeekEnd = io.SeekEnd // seek relative to the end -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go deleted file mode 100644 index 0c9802d..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go +++ /dev/null @@ -1,29 +0,0 @@ -package sdkrand - -import ( - "math/rand" - "sync" - "time" -) - -// lockedSource is a thread-safe implementation of rand.Source -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} - -// SeededRand is a new RNG using a thread safe implementation of rand.Source -var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go deleted file mode 100644 index 38ea61a..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go +++ /dev/null @@ -1,23 +0,0 @@ -package sdkuri - -import ( - "path" - "strings" -) - -// PathJoin will join the elements of the path delimited by the "/" -// character. Similar to path.Join with the exception the trailing "/" -// character is preserved if present. -func PathJoin(elems ...string) string { - if len(elems) == 0 { - return "" - } - - hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") - str := path.Join(elems...) - if hasTrailing && str != "/" { - str += "/" - } - - return str -} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go deleted file mode 100644 index ebcbc2b..0000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go +++ /dev/null @@ -1,40 +0,0 @@ -package shareddefaults - -import ( - "os" - "path/filepath" - "runtime" -) - -// SharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func SharedCredentialsFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "credentials") -} - -// SharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func SharedConfigFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "config") -} - -// UserHomeDir returns the home directory for the user the process is -// running under. -func UserHomeDir() string { - if runtime.GOOS == "windows" { // Windows - return os.Getenv("USERPROFILE") - } - - // *nix - return os.Getenv("HOME") -} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index bc52e96..0000000 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index 7929947..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// Go versions prior to 1.4 are disabled because they use a different layout -// for interfaces which make the implementation of unsafeReflectValue more complex. -// +build !js,!appengine,!safe,!disableunsafe,go1.4 - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -type flag uintptr - -var ( - // flagRO indicates whether the value field of a reflect.Value - // is read-only. - flagRO flag - - // flagAddr indicates whether the address of the reflect.Value's - // value may be taken. - flagAddr flag -) - -// flagKindMask holds the bits that make up the kind -// part of the flags field. In all the supported versions, -// it is in the lower 5 bits. -const flagKindMask = flag(0x1f) - -// Different versions of Go have used different -// bit layouts for the flags type. This table -// records the known combinations. -var okFlags = []struct { - ro, addr flag -}{{ - // From Go 1.4 to 1.5 - ro: 1 << 5, - addr: 1 << 7, -}, { - // Up to Go tip. - ro: 1<<5 | 1<<6, - addr: 1 << 8, -}} - -var flagValOffset = func() uintptr { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - return field.Offset -}() - -// flagField returns a pointer to the flag field of a reflect.Value. -func flagField(v *reflect.Value) *flag { - return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) reflect.Value { - if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { - return v - } - flagFieldPtr := flagField(&v) - *flagFieldPtr &^= flagRO - *flagFieldPtr |= flagAddr - return v -} - -// Sanity checks against future reflect package changes -// to the type or semantics of the Value.flag field. -func init() { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { - panic("reflect.Value flag field has changed kind") - } - type t0 int - var t struct { - A t0 - // t0 will have flagEmbedRO set. - t0 - // a will have flagStickyRO set - a t0 - } - vA := reflect.ValueOf(t).FieldByName("A") - va := reflect.ValueOf(t).FieldByName("a") - vt0 := reflect.ValueOf(t).FieldByName("t0") - - // Infer flagRO from the difference between the flags - // for the (otherwise identical) fields in t. - flagPublic := *flagField(&vA) - flagWithRO := *flagField(&va) | *flagField(&vt0) - flagRO = flagPublic ^ flagWithRO - - // Infer flagAddr from the difference between a value - // taken from a pointer and not. - vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") - flagNoPtr := *flagField(&vA) - flagPtr := *flagField(&vPtrA) - flagAddr = flagNoPtr ^ flagPtr - - // Check that the inferred flags tally with one of the known versions. - for _, f := range okFlags { - if flagRO == f.ro && flagAddr == f.addr { - return - } - } - panic("reflect.Value read-only flag has changed semantics") -} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 205c28d..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe !go1.4 - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 1be8ce9..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 2e3d22f..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // DisablePointerAddresses specifies whether to disable the printing of - // pointer addresses. This is useful when diffing data structures in tests. - DisablePointerAddresses bool - - // DisableCapacities specifies whether to disable the printing of capacities - // for arrays, slices, maps and channels. This is useful when diffing - // data structures in tests. - DisableCapacities bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index aacaac6..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index f78d89f..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound: - d.w.Write(nilAngleBytes) - - case cycleFound: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if !d.cs.DisableCapacities && valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index b04edb7..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound: - f.fs.Write(nilAngleBytes) - - case cycleFound: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index 32c0e33..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore deleted file mode 100644 index 1241112..0000000 --- a/vendor/github.com/go-ini/ini/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -testdata/conf_out.ini -ini.sublime-project -ini.sublime-workspace -testdata/conf_reflect.ini -.idea -/.vscode diff --git a/vendor/github.com/go-ini/ini/.travis.yml b/vendor/github.com/go-ini/ini/.travis.yml deleted file mode 100644 index 4a237ea..0000000 --- a/vendor/github.com/go-ini/ini/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -sudo: false -language: go -go: - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - -script: - - go get golang.org/x/tools/cmd/cover - - go get github.com/smartystreets/goconvey - - mkdir -p $HOME/gopath/src/gopkg.in - - ln -s $HOME/gopath/src/github.com/go-ini/ini $HOME/gopath/src/gopkg.in/ini.v1 - - cd $HOME/gopath/src/gopkg.in/ini.v1 - - go test -v -cover -race diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE deleted file mode 100644 index d361bbc..0000000 --- a/vendor/github.com/go-ini/ini/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright 2014 Unknwon - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile deleted file mode 100644 index af27ff0..0000000 --- a/vendor/github.com/go-ini/ini/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -.PHONY: build test bench vet coverage - -build: vet bench - -test: - go test -v -cover -race - -bench: - go test -v -cover -race -test.bench=. -test.benchmem - -vet: - go vet - -coverage: - go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md deleted file mode 100644 index 988dcea..0000000 --- a/vendor/github.com/go-ini/ini/README.md +++ /dev/null @@ -1,44 +0,0 @@ -INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg)](https://sourcegraph.com/github.com/go-ini/ini) -=== - -![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) - -Package ini provides INI file read and write functionality in Go. - -## Features - -- Load from multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. -- Read with recursion values. -- Read with parent-child sections. -- Read with auto-increment key names. -- Read with multiple-line values. -- Read with tons of helper methods. -- Read and convert values to Go types. -- Read and **WRITE** comments of sections and keys. -- Manipulate sections, keys and comments with ease. -- Keep sections and keys in order as you parse and save. - -## Installation - -To use a tagged revision: - -```sh -$ go get gopkg.in/ini.v1 -``` - -To use with latest changes: - -```sh -$ go get github.com/go-ini/ini -``` - -Please add `-u` flag to update in the future. - -## Getting Help - -- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started) -- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) - -## License - -This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go deleted file mode 100644 index 80afe74..0000000 --- a/vendor/github.com/go-ini/ini/error.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "fmt" -) - -type ErrDelimiterNotFound struct { - Line string -} - -func IsErrDelimiterNotFound(err error) bool { - _, ok := err.(ErrDelimiterNotFound) - return ok -} - -func (err ErrDelimiterNotFound) Error() string { - return fmt.Sprintf("key-value delimiter not found: %s", err.Line) -} diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go deleted file mode 100644 index 1a3186b..0000000 --- a/vendor/github.com/go-ini/ini/file.go +++ /dev/null @@ -1,414 +0,0 @@ -// Copyright 2017 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "sync" -) - -// File represents a combination of a or more INI file(s) in memory. -type File struct { - options LoadOptions - dataSources []dataSource - - // Should make things safe, but sometimes doesn't matter. - BlockMode bool - lock sync.RWMutex - - // To keep data in order. - sectionList []string - // Actual data is stored here. - sections map[string]*Section - - NameMapper - ValueMapper -} - -// newFile initializes File object with given data sources. -func newFile(dataSources []dataSource, opts LoadOptions) *File { - return &File{ - BlockMode: true, - dataSources: dataSources, - sections: make(map[string]*Section), - sectionList: make([]string, 0, 10), - options: opts, - } -} - -// Empty returns an empty file object. -func Empty() *File { - // Ignore error here, we sure our data is good. - f, _ := Load([]byte("")) - return f -} - -// NewSection creates a new section. -func (f *File) NewSection(name string) (*Section, error) { - if len(name) == 0 { - return nil, errors.New("error creating new section: empty section name") - } else if f.options.Insensitive && name != DEFAULT_SECTION { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if inSlice(name, f.sectionList) { - return f.sections[name], nil - } - - f.sectionList = append(f.sectionList, name) - f.sections[name] = newSection(f, name) - return f.sections[name], nil -} - -// NewRawSection creates a new section with an unparseable body. -func (f *File) NewRawSection(name, body string) (*Section, error) { - section, err := f.NewSection(name) - if err != nil { - return nil, err - } - - section.isRawSection = true - section.rawBody = body - return section, nil -} - -// NewSections creates a list of sections. -func (f *File) NewSections(names ...string) (err error) { - for _, name := range names { - if _, err = f.NewSection(name); err != nil { - return err - } - } - return nil -} - -// GetSection returns section by given name. -func (f *File) GetSection(name string) (*Section, error) { - if len(name) == 0 { - name = DEFAULT_SECTION - } - if f.options.Insensitive { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sec := f.sections[name] - if sec == nil { - return nil, fmt.Errorf("section '%s' does not exist", name) - } - return sec, nil -} - -// Section assumes named section exists and returns a zero-value when not. -func (f *File) Section(name string) *Section { - sec, err := f.GetSection(name) - if err != nil { - // Note: It's OK here because the only possible error is empty section name, - // but if it's empty, this piece of code won't be executed. - sec, _ = f.NewSection(name) - return sec - } - return sec -} - -// Section returns list of Section. -func (f *File) Sections() []*Section { - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sections := make([]*Section, len(f.sectionList)) - for i, name := range f.sectionList { - sections[i] = f.sections[name] - } - return sections -} - -// ChildSections returns a list of child sections of given section name. -func (f *File) ChildSections(name string) []*Section { - return f.Section(name).ChildSections() -} - -// SectionStrings returns list of section names. -func (f *File) SectionStrings() []string { - list := make([]string, len(f.sectionList)) - copy(list, f.sectionList) - return list -} - -// DeleteSection deletes a section. -func (f *File) DeleteSection(name string) { - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if len(name) == 0 { - name = DEFAULT_SECTION - } - - for i, s := range f.sectionList { - if s == name { - f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) - delete(f.sections, name) - return - } - } -} - -func (f *File) reload(s dataSource) error { - r, err := s.ReadCloser() - if err != nil { - return err - } - defer r.Close() - - return f.parse(r) -} - -// Reload reloads and parses all data sources. -func (f *File) Reload() (err error) { - for _, s := range f.dataSources { - if err = f.reload(s); err != nil { - // In loose mode, we create an empty default section for nonexistent files. - if os.IsNotExist(err) && f.options.Loose { - f.parse(bytes.NewBuffer(nil)) - continue - } - return err - } - } - return nil -} - -// Append appends one or more data sources and reloads automatically. -func (f *File) Append(source interface{}, others ...interface{}) error { - ds, err := parseDataSource(source) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - for _, s := range others { - ds, err = parseDataSource(s) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - } - return f.Reload() -} - -func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { - equalSign := "=" - if PrettyFormat || PrettyEqual { - equalSign = " = " - } - - // Use buffer to make sure target is safe until finish encoding. - buf := bytes.NewBuffer(nil) - for i, sname := range f.sectionList { - sec := f.Section(sname) - if len(sec.Comment) > 0 { - // Support multiline comments - lines := strings.Split(sec.Comment, LineBreak) - for i := range lines { - if lines[i][0] != '#' && lines[i][0] != ';' { - lines[i] = "; " + lines[i] - } else { - lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) - } - - if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { - return nil, err - } - } - } - - if i > 0 || DefaultHeader { - if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return nil, err - } - } else { - // Write nothing if default section is empty - if len(sec.keyList) == 0 { - continue - } - } - - if sec.isRawSection { - if _, err := buf.WriteString(sec.rawBody); err != nil { - return nil, err - } - - if PrettySection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - continue - } - - // Count and generate alignment length and buffer spaces using the - // longest key. Keys may be modifed if they contain certain characters so - // we need to take that into account in our calculation. - alignLength := 0 - if PrettyFormat { - for _, kname := range sec.keyList { - keyLength := len(kname) - // First case will surround key by ` and second by """ - if strings.ContainsAny(kname, "\"=:") { - keyLength += 2 - } else if strings.Contains(kname, "`") { - keyLength += 6 - } - - if keyLength > alignLength { - alignLength = keyLength - } - } - } - alignSpaces := bytes.Repeat([]byte(" "), alignLength) - - KEY_LIST: - for _, kname := range sec.keyList { - key := sec.Key(kname) - if len(key.Comment) > 0 { - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - - // Support multiline comments - lines := strings.Split(key.Comment, LineBreak) - for i := range lines { - if lines[i][0] != '#' && lines[i][0] != ';' { - lines[i] = "; " + lines[i] - } else { - lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) - } - - if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { - return nil, err - } - } - } - - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - - switch { - case key.isAutoIncrement: - kname = "-" - case strings.ContainsAny(kname, "\"=:"): - kname = "`" + kname + "`" - case strings.Contains(kname, "`"): - kname = `"""` + kname + `"""` - } - - for _, val := range key.ValueWithShadows() { - if _, err := buf.WriteString(kname); err != nil { - return nil, err - } - - if key.isBooleanType { - if kname != sec.keyList[len(sec.keyList)-1] { - buf.WriteString(LineBreak) - } - continue KEY_LIST - } - - // Write out alignment spaces before "=" sign - if PrettyFormat { - buf.Write(alignSpaces[:alignLength-len(kname)]) - } - - // In case key value contains "\n", "`", "\"", "#" or ";" - if strings.ContainsAny(val, "\n`") { - val = `"""` + val + `"""` - } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { - val = "`" + val + "`" - } - if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { - return nil, err - } - } - - for _, val := range key.nestedValues { - if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { - return nil, err - } - } - } - - if PrettySection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - } - - return buf, nil -} - -// WriteToIndent writes content into io.Writer with given indention. -// If PrettyFormat has been set to be true, -// it will align "=" sign with spaces under each section. -func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { - buf, err := f.writeToBuffer(indent) - if err != nil { - return 0, err - } - return buf.WriteTo(w) -} - -// WriteTo writes file content into io.Writer. -func (f *File) WriteTo(w io.Writer) (int64, error) { - return f.WriteToIndent(w, "") -} - -// SaveToIndent writes content to file system with given value indention. -func (f *File) SaveToIndent(filename, indent string) error { - // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename afte done. - buf, err := f.writeToBuffer(indent) - if err != nil { - return err - } - - return ioutil.WriteFile(filename, buf.Bytes(), 0666) -} - -// SaveTo writes content to file system. -func (f *File) SaveTo(filename string) error { - return f.SaveToIndent(filename, "") -} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go deleted file mode 100644 index cb55997..0000000 --- a/vendor/github.com/go-ini/ini/ini.go +++ /dev/null @@ -1,211 +0,0 @@ -// +build go1.6 - -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package ini provides INI file read and write functionality in Go. -package ini - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "regexp" - "runtime" -) - -const ( - // Name for default section. You can use this constant or the string literal. - // In most of cases, an empty string is all you need to access the section. - DEFAULT_SECTION = "DEFAULT" - - // Maximum allowed depth when recursively substituing variable names. - _DEPTH_VALUES = 99 - _VERSION = "1.38.2" -) - -// Version returns current package version literal. -func Version() string { - return _VERSION -} - -var ( - // Delimiter to determine or compose a new line. - // This variable will be changed to "\r\n" automatically on Windows - // at package init time. - LineBreak = "\n" - - // Variable regexp pattern: %(variable)s - varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) - - // Indicate whether to align "=" sign with spaces to produce pretty output - // or reduce all possible spaces for compact format. - PrettyFormat = true - - // Place spaces around "=" sign even when PrettyFormat is false - PrettyEqual = false - - // Explicitly write DEFAULT section header - DefaultHeader = false - - // Indicate whether to put a line between sections - PrettySection = true -) - -func init() { - if runtime.GOOS == "windows" { - LineBreak = "\r\n" - } -} - -func inSlice(str string, s []string) bool { - for _, v := range s { - if str == v { - return true - } - } - return false -} - -// dataSource is an interface that returns object which can be read and closed. -type dataSource interface { - ReadCloser() (io.ReadCloser, error) -} - -// sourceFile represents an object that contains content on the local file system. -type sourceFile struct { - name string -} - -func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { - return os.Open(s.name) -} - -// sourceData represents an object that contains content in memory. -type sourceData struct { - data []byte -} - -func (s *sourceData) ReadCloser() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(s.data)), nil -} - -// sourceReadCloser represents an input stream with Close method. -type sourceReadCloser struct { - reader io.ReadCloser -} - -func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { - return s.reader, nil -} - -func parseDataSource(source interface{}) (dataSource, error) { - switch s := source.(type) { - case string: - return sourceFile{s}, nil - case []byte: - return &sourceData{s}, nil - case io.ReadCloser: - return &sourceReadCloser{s}, nil - default: - return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) - } -} - -type LoadOptions struct { - // Loose indicates whether the parser should ignore nonexistent files or return error. - Loose bool - // Insensitive indicates whether the parser forces all section and key names to lowercase. - Insensitive bool - // IgnoreContinuation indicates whether to ignore continuation lines while parsing. - IgnoreContinuation bool - // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. - IgnoreInlineComment bool - // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. - SkipUnrecognizableLines bool - // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. - // This type of keys are mostly used in my.cnf. - AllowBooleanKeys bool - // AllowShadows indicates whether to keep track of keys with same name under same section. - AllowShadows bool - // AllowNestedValues indicates whether to allow AWS-like nested values. - // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values - AllowNestedValues bool - // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. - // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure - // Relevant quote: Values can also span multiple lines, as long as they are indented deeper - // than the first line of the value. - AllowPythonMultilineValues bool - // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. - // Docs: https://docs.python.org/2/library/configparser.html - // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. - // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. - SpaceBeforeInlineComment bool - // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format - // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" - UnescapeValueDoubleQuotes bool - // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format - // when value is NOT surrounded by any quotes. - // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. - UnescapeValueCommentSymbols bool - // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise - // conform to key/value pairs. Specify the names of those blocks here. - UnparseableSections []string -} - -func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { - sources := make([]dataSource, len(others)+1) - sources[0], err = parseDataSource(source) - if err != nil { - return nil, err - } - for i := range others { - sources[i+1], err = parseDataSource(others[i]) - if err != nil { - return nil, err - } - } - f := newFile(sources, opts) - if err = f.Reload(); err != nil { - return nil, err - } - return f, nil -} - -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -// It will return error if list contains nonexistent files. -func Load(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{}, source, others...) -} - -// LooseLoad has exactly same functionality as Load function -// except it ignores nonexistent files instead of returning error. -func LooseLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Loose: true}, source, others...) -} - -// InsensitiveLoad has exactly same functionality as Load function -// except it forces all section and key names to be lowercased. -func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Insensitive: true}, source, others...) -} - -// ShadowLoad has exactly same functionality as Load function -// except it allows have shadow keys. -func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{AllowShadows: true}, source, others...) -} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go deleted file mode 100644 index 7c8566a..0000000 --- a/vendor/github.com/go-ini/ini/key.go +++ /dev/null @@ -1,751 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" - "time" -) - -// Key represents a key under a section. -type Key struct { - s *Section - Comment string - name string - value string - isAutoIncrement bool - isBooleanType bool - - isShadow bool - shadows []*Key - - nestedValues []string -} - -// newKey simply return a key object with given values. -func newKey(s *Section, name, val string) *Key { - return &Key{ - s: s, - name: name, - value: val, - } -} - -func (k *Key) addShadow(val string) error { - if k.isShadow { - return errors.New("cannot add shadow to another shadow key") - } else if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add shadow to auto-increment or boolean key") - } - - shadow := newKey(k.s, k.name, val) - shadow.isShadow = true - k.shadows = append(k.shadows, shadow) - return nil -} - -// AddShadow adds a new shadow key to itself. -func (k *Key) AddShadow(val string) error { - if !k.s.f.options.AllowShadows { - return errors.New("shadow key is not allowed") - } - return k.addShadow(val) -} - -func (k *Key) addNestedValue(val string) error { - if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add nested value to auto-increment or boolean key") - } - - k.nestedValues = append(k.nestedValues, val) - return nil -} - -func (k *Key) AddNestedValue(val string) error { - if !k.s.f.options.AllowNestedValues { - return errors.New("nested value is not allowed") - } - return k.addNestedValue(val) -} - -// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv -type ValueMapper func(string) string - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// ValueWithShadows returns raw values of key and its shadows if any. -func (k *Key) ValueWithShadows() []string { - if len(k.shadows) == 0 { - return []string{k.value} - } - vals := make([]string, len(k.shadows)+1) - vals[0] = k.value - for i := range k.shadows { - vals[i+1] = k.shadows[i].value - } - return vals -} - -// NestedValues returns nested values stored in the key. -// It is possible returned value is nil if no nested values stored in the key. -func (k *Key) NestedValues() []string { - return k.nestedValues -} - -// transformValue takes a raw value and transforms to its final string. -func (k *Key) transformValue(val string) string { - if k.s.f.ValueMapper != nil { - val = k.s.f.ValueMapper(val) - } - - // Fail-fast if no indicate char found for recursive value - if !strings.Contains(val, "%") { - return val - } - for i := 0; i < _DEPTH_VALUES; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := strings.TrimLeft(vr, "%(") - noption = strings.TrimRight(noption, ")s") - - // Search in the same section. - nk, err := k.s.GetKey(noption) - if err != nil || k == nk { - // Search again in default section. - nk, _ = k.s.f.Section("").GetKey(noption) - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// String returns string representation of value. -func (k *Key) String() string { - return k.transformValue(k.value) -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - return strconv.Atoi(k.String()) -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 10, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 10, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 10, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - k.value = defaultVal - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatBool(defaultVal[0]) - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(int64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].String() - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].Format(format) - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string divided by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - runes := []rune(str) - vals := make([]string, 0, 2) - var buf bytes.Buffer - escape := false - idx := 0 - for { - if escape { - escape = false - if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { - buf.WriteRune('\\') - } - buf.WriteRune(runes[idx]) - } else { - if runes[idx] == '\\' { - escape = true - } else if strings.HasPrefix(string(runes[idx:]), delim) { - idx += len(delim) - 1 - vals = append(vals, strings.TrimSpace(buf.String())) - buf.Reset() - } else { - buf.WriteRune(runes[idx]) - } - } - idx += 1 - if idx == len(runes) { - break - } - } - - if buf.Len() > 0 { - vals = append(vals, strings.TrimSpace(buf.String())) - } - - return vals -} - -// StringsWithShadows returns list of string divided by given delimiter. -// Shadows will also be appended if any. -func (k *Key) StringsWithShadows(delim string) []string { - vals := k.ValueWithShadows() - results := make([]string, 0, len(vals)*2) - for i := range vals { - if len(vals) == 0 { - continue - } - - results = append(results, strings.Split(vals[i], delim)...) - } - - for i := range results { - results[i] = k.transformValue(strings.TrimSpace(results[i])) - } - return results -} - -// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Float64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), true, false) - return vals -} - -// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Ints(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), true, false) - return vals -} - -// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Int64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), true, false) - return vals -} - -// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), true, false) - return vals -} - -// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), true, false) - return vals -} - -// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) TimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then -// it will not be included to result list. -func (k *Key) ValidFloat64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), false, false) - return vals -} - -// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will -// not be included to result list. -func (k *Key) ValidInts(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), false, false) - return vals -} - -// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, -// then it will not be included to result list. -func (k *Key) ValidInt64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), false, false) - return vals -} - -// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, -// then it will not be included to result list. -func (k *Key) ValidUints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), false, false) - return vals -} - -// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned -// integer, then it will not be included to result list. -func (k *Key) ValidUint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), false, false) - return vals -} - -// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) - return vals -} - -// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimes(delim string) []time.Time { - return k.ValidTimesFormat(time.RFC3339, delim) -} - -// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictFloat64s(delim string) ([]float64, error) { - return k.parseFloat64s(k.Strings(delim), false, true) -} - -// StrictInts returns list of int divided by given delimiter or error on first invalid input. -func (k *Key) StrictInts(delim string) ([]int, error) { - return k.parseInts(k.Strings(delim), false, true) -} - -// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictInt64s(delim string) ([]int64, error) { - return k.parseInt64s(k.Strings(delim), false, true) -} - -// StrictUints returns list of uint divided by given delimiter or error on first invalid input. -func (k *Key) StrictUints(delim string) ([]uint, error) { - return k.parseUints(k.Strings(delim), false, true) -} - -// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictUint64s(delim string) ([]uint64, error) { - return k.parseUint64s(k.Strings(delim), false, true) -} - -// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { - return k.parseTimesFormat(format, k.Strings(delim), false, true) -} - -// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimes(delim string) ([]time.Time, error) { - return k.StrictTimesFormat(time.RFC3339, delim) -} - -// parseFloat64s transforms strings to float64s. -func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { - vals := make([]float64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseFloat(str, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseInts transforms strings to ints. -func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { - vals := make([]int, 0, len(strs)) - for _, str := range strs { - val, err := strconv.Atoi(str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseInt64s transforms strings to int64s. -func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { - vals := make([]int64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseInt(str, 10, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseUints transforms strings to uints. -func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { - vals := make([]uint, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseUint(str, 10, 0) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, uint(val)) - } - } - return vals, nil -} - -// parseUint64s transforms strings to uint64s. -func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { - vals := make([]uint64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseUint(str, 10, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseTimesFormat transforms strings to times in given format. -func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { - vals := make([]time.Time, 0, len(strs)) - for _, str := range strs { - val, err := time.Parse(format, str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - if k.s.f.BlockMode { - k.s.f.lock.Lock() - defer k.s.f.lock.Unlock() - } - - k.value = v - k.s.keysHash[k.name] = v -} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go deleted file mode 100644 index 3daf54c..0000000 --- a/vendor/github.com/go-ini/ini/parser.go +++ /dev/null @@ -1,494 +0,0 @@ -// Copyright 2015 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bufio" - "bytes" - "fmt" - "io" - "regexp" - "strconv" - "strings" - "unicode" -) - -var pythonMultiline = regexp.MustCompile("^(\\s+)([^\n]+)") - -type tokenType int - -const ( - _TOKEN_INVALID tokenType = iota - _TOKEN_COMMENT - _TOKEN_SECTION - _TOKEN_KEY -) - -type parser struct { - buf *bufio.Reader - isEOF bool - count int - comment *bytes.Buffer -} - -func newParser(r io.Reader) *parser { - return &parser{ - buf: bufio.NewReader(r), - count: 1, - comment: &bytes.Buffer{}, - } -} - -// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. -// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding -func (p *parser) BOM() error { - mask, err := p.buf.Peek(2) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 2 { - return nil - } - - switch { - case mask[0] == 254 && mask[1] == 255: - fallthrough - case mask[0] == 255 && mask[1] == 254: - p.buf.Read(mask) - case mask[0] == 239 && mask[1] == 187: - mask, err := p.buf.Peek(3) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 3 { - return nil - } - if mask[2] == 191 { - p.buf.Read(mask) - } - } - return nil -} - -func (p *parser) readUntil(delim byte) ([]byte, error) { - data, err := p.buf.ReadBytes(delim) - if err != nil { - if err == io.EOF { - p.isEOF = true - } else { - return nil, err - } - } - return data, nil -} - -func cleanComment(in []byte) ([]byte, bool) { - i := bytes.IndexAny(in, "#;") - if i == -1 { - return nil, false - } - return in[i:], true -} - -func readKeyName(in []byte) (string, int, error) { - line := string(in) - - // Check if key name surrounded by quotes. - var keyQuote string - if line[0] == '"' { - if len(line) > 6 && string(line[0:3]) == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - - // Get out key name - endIdx := -1 - if len(keyQuote) > 0 { - startIdx := len(keyQuote) - // FIXME: fail case -> """"""name"""=value - pos := strings.Index(line[startIdx:], keyQuote) - if pos == -1 { - return "", -1, fmt.Errorf("missing closing key quote: %s", line) - } - pos += startIdx - - // Find key-value delimiter - i := strings.IndexAny(line[pos+startIdx:], "=:") - if i < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - endIdx = pos + i - return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil - } - - endIdx = strings.IndexAny(line, "=:") - if endIdx < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil -} - -func (p *parser) readMultilines(line, val, valQuote string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := string(data) - - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - - comment, has := cleanComment([]byte(next[pos:])) - if has { - p.comment.Write(bytes.TrimSpace(comment)) - } - break - } - val += next - if p.isEOF { - return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) - } - } - return val, nil -} - -func (p *parser) readContinuationLines(val string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := strings.TrimSpace(string(data)) - - if len(next) == 0 { - break - } - val += next - if val[len(val)-1] != '\\' { - break - } - val = val[:len(val)-1] - } - return val, nil -} - -// hasSurroundedQuote check if and only if the first and last characters -// are quotes \" or \'. -// It returns false if any other parts also contain same kind of quotes. -func hasSurroundedQuote(in string, quote byte) bool { - return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && - strings.IndexByte(in[1:], quote) == len(in)-2 -} - -func (p *parser) readValue(in []byte, - parserBufferSize int, - ignoreContinuation, ignoreInlineComment, unescapeValueDoubleQuotes, unescapeValueCommentSymbols, allowPythonMultilines, spaceBeforeInlineComment bool) (string, error) { - - line := strings.TrimLeftFunc(string(in), unicode.IsSpace) - if len(line) == 0 { - return "", nil - } - - var valQuote string - if len(line) > 3 && string(line[0:3]) == `"""` { - valQuote = `"""` - } else if line[0] == '`' { - valQuote = "`" - } else if unescapeValueDoubleQuotes && line[0] == '"' { - valQuote = `"` - } - - if len(valQuote) > 0 { - startIdx := len(valQuote) - pos := strings.LastIndex(line[startIdx:], valQuote) - // Check for multi-line value - if pos == -1 { - return p.readMultilines(line, line[startIdx:], valQuote) - } - - if unescapeValueDoubleQuotes && valQuote == `"` { - return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil - } - return line[startIdx : pos+startIdx], nil - } - - lastChar := line[len(line)-1] - // Won't be able to reach here if value only contains whitespace - line = strings.TrimSpace(line) - trimmedLastChar := line[len(line)-1] - - // Check continuation lines when desired - if !ignoreContinuation && trimmedLastChar == '\\' { - return p.readContinuationLines(line[:len(line)-1]) - } - - // Check if ignore inline comment - if !ignoreInlineComment { - var i int - if spaceBeforeInlineComment { - i = strings.Index(line, " #") - if i == -1 { - i = strings.Index(line, " ;") - } - - } else { - i = strings.IndexAny(line, "#;") - } - - if i > -1 { - p.comment.WriteString(line[i:]) - line = strings.TrimSpace(line[:i]) - } - - } - - // Trim single and double quotes - if hasSurroundedQuote(line, '\'') || - hasSurroundedQuote(line, '"') { - line = line[1 : len(line)-1] - } else if len(valQuote) == 0 && unescapeValueCommentSymbols { - if strings.Contains(line, `\;`) { - line = strings.Replace(line, `\;`, ";", -1) - } - if strings.Contains(line, `\#`) { - line = strings.Replace(line, `\#`, "#", -1) - } - } else if allowPythonMultilines && lastChar == '\n' { - parserBufferPeekResult, _ := p.buf.Peek(parserBufferSize) - peekBuffer := bytes.NewBuffer(parserBufferPeekResult) - - identSize := -1 - val := line - - for { - peekData, peekErr := peekBuffer.ReadBytes('\n') - if peekErr != nil { - if peekErr == io.EOF { - return val, nil - } - return "", peekErr - } - - peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) - if len(peekMatches) != 3 { - return val, nil - } - - currentIdentSize := len(peekMatches[1]) - // NOTE: Return if not a python-ini multi-line value. - if currentIdentSize < 0 { - return val, nil - } - identSize = currentIdentSize - - // NOTE: Just advance the parser reader (buffer) in-sync with the peek buffer. - _, err := p.readUntil('\n') - if err != nil { - return "", err - } - - val += fmt.Sprintf("\n%s", peekMatches[2]) - } - - // NOTE: If it was a Python multi-line value, - // return the appended value. - if identSize > 0 { - return val, nil - } - } - - return line, nil -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) (err error) { - p := newParser(reader) - if err = p.BOM(); err != nil { - return fmt.Errorf("BOM: %v", err) - } - - // Ignore error because default section name is never empty string. - name := DEFAULT_SECTION - if f.options.Insensitive { - name = strings.ToLower(DEFAULT_SECTION) - } - section, _ := f.NewSection(name) - - // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key - var isLastValueEmpty bool - var lastRegularKey *Key - - var line []byte - var inUnparseableSection bool - - // NOTE: Iterate and increase `currentPeekSize` until - // the size of the parser buffer is found. - // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. - parserBufferSize := 0 - // NOTE: Peek 1kb at a time. - currentPeekSize := 1024 - - if f.options.AllowPythonMultilineValues { - for { - peekBytes, _ := p.buf.Peek(currentPeekSize) - peekBytesLength := len(peekBytes) - - if parserBufferSize >= peekBytesLength { - break - } - - currentPeekSize *= 2 - parserBufferSize = peekBytesLength - } - } - - for !p.isEOF { - line, err = p.readUntil('\n') - if err != nil { - return err - } - - if f.options.AllowNestedValues && - isLastValueEmpty && len(line) > 0 { - if line[0] == ' ' || line[0] == '\t' { - lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) - continue - } - } - - line = bytes.TrimLeftFunc(line, unicode.IsSpace) - if len(line) == 0 { - continue - } - - // Comments - if line[0] == '#' || line[0] == ';' { - // Note: we do not care ending line break, - // it is needed for adding second line, - // so just clean it once at the end when set to value. - p.comment.Write(line) - continue - } - - // Section - if line[0] == '[' { - // Read to the next ']' (TODO: support quoted strings) - closeIdx := bytes.LastIndexByte(line, ']') - if closeIdx == -1 { - return fmt.Errorf("unclosed section: %s", line) - } - - name := string(line[1:closeIdx]) - section, err = f.NewSection(name) - if err != nil { - return err - } - - comment, has := cleanComment(line[closeIdx+1:]) - if has { - p.comment.Write(comment) - } - - section.Comment = strings.TrimSpace(p.comment.String()) - - // Reset aotu-counter and comments - p.comment.Reset() - p.count = 1 - - inUnparseableSection = false - for i := range f.options.UnparseableSections { - if f.options.UnparseableSections[i] == name || - (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { - inUnparseableSection = true - continue - } - } - continue - } - - if inUnparseableSection { - section.isRawSection = true - section.rawBody += string(line) - continue - } - - kname, offset, err := readKeyName(line) - if err != nil { - // Treat as boolean key when desired, and whole line is key name. - if IsErrDelimiterNotFound(err) { - switch { - case f.options.AllowBooleanKeys: - kname, err := p.readValue(line, - parserBufferSize, - f.options.IgnoreContinuation, - f.options.IgnoreInlineComment, - f.options.UnescapeValueDoubleQuotes, - f.options.UnescapeValueCommentSymbols, - f.options.AllowPythonMultilineValues, - f.options.SpaceBeforeInlineComment) - if err != nil { - return err - } - key, err := section.NewBooleanKey(kname) - if err != nil { - return err - } - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - continue - - case f.options.SkipUnrecognizableLines: - continue - } - } - return err - } - - // Auto increment. - isAutoIncr := false - if kname == "-" { - isAutoIncr = true - kname = "#" + strconv.Itoa(p.count) - p.count++ - } - - value, err := p.readValue(line[offset:], - parserBufferSize, - f.options.IgnoreContinuation, - f.options.IgnoreInlineComment, - f.options.UnescapeValueDoubleQuotes, - f.options.UnescapeValueCommentSymbols, - f.options.AllowPythonMultilineValues, - f.options.SpaceBeforeInlineComment) - if err != nil { - return err - } - isLastValueEmpty = len(value) == 0 - - key, err := section.NewKey(kname, value) - if err != nil { - return err - } - key.isAutoIncrement = isAutoIncr - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - lastRegularKey = key - } - return nil -} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go deleted file mode 100644 index 340a1ef..0000000 --- a/vendor/github.com/go-ini/ini/section.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "errors" - "fmt" - "strings" -) - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string - - isRawSection bool - rawBody string -} - -func newSection(f *File, name string) *Section { - return &Section{ - f: f, - name: name, - keys: make(map[string]*Key), - keyList: make([]string, 0, 10), - keysHash: make(map[string]string), - } -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// Body returns rawBody of Section if the section was marked as unparseable. -// It still follows the other rules of the INI format surrounding leading/trailing whitespace. -func (s *Section) Body() string { - return strings.TrimSpace(s.rawBody) -} - -// SetBody updates body content only if section is raw. -func (s *Section) SetBody(body string) { - if !s.isRawSection { - return - } - s.rawBody = body -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } else if s.f.options.Insensitive { - name = strings.ToLower(name) - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - if s.f.options.AllowShadows { - if err := s.keys[name].addShadow(val); err != nil { - return nil, err - } - } else { - s.keys[name].value = val - s.keysHash[name] = val - } - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = newKey(s, name, val) - s.keysHash[name] = val - return s.keys[name], nil -} - -// NewBooleanKey creates a new boolean type key to given section. -func (s *Section) NewBooleanKey(name string) (*Key, error) { - key, err := s.NewKey(name, "true") - if err != nil { - return nil, err - } - - key.isBooleanType = true - return key, nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - // FIXME: change to section level lock? - if s.f.BlockMode { - s.f.lock.RLock() - } - if s.f.options.Insensitive { - name = strings.ToLower(name) - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } else { - break - } - } - return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) - } - return key, nil -} - -// HasKey returns true if section contains a key with given name. -func (s *Section) HasKey(name string) bool { - key, _ := s.GetKey(name) - return key != nil -} - -// Haskey is a backwards-compatible name for HasKey. -// TODO: delete me in v2 -func (s *Section) Haskey(name string) bool { - return s.HasKey(name) -} - -// HasValue returns true if section contains given raw value. -func (s *Section) HasValue(value string) bool { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - for _, k := range s.keys { - if value == k.value { - return true - } - } - return false -} - -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// ParentKeys returns list of keys of parent section. -func (s *Section) ParentKeys() []*Key { - var parentKeys []*Key - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - parentKeys = append(parentKeys, sec.Keys()...) - } else { - break - } - - } - return parentKeys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list -} - -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := map[string]string{} - for key, value := range s.keysHash { - hash[key] = value - } - return hash -} - -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - return - } - } -} - -// ChildSections returns a list of child sections of current section. -// For example, "[parent.child1]" and "[parent.child12]" are child sections -// of section "[parent]". -func (s *Section) ChildSections() []*Section { - prefix := s.name + "." - children := make([]*Section, 0, 3) - for _, name := range s.f.sectionList { - if strings.HasPrefix(name, prefix) { - children = append(children, s.f.sections[name]) - } - } - return children -} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go deleted file mode 100644 index 9719dc6..0000000 --- a/vendor/github.com/go-ini/ini/struct.go +++ /dev/null @@ -1,512 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "strings" - "time" - "unicode" -) - -// NameMapper represents a ini tag name mapper. -type NameMapper func(string) string - -// Built-in name getters. -var ( - // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. - AllCapsUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - } - newstr = append(newstr, unicode.ToUpper(chr)) - } - return string(newstr) - } - // TitleUnderscore converts to format title_underscore. - TitleUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - chr -= ('A' - 'a') - } - newstr = append(newstr, chr) - } - return string(newstr) - } -) - -func (s *Section) parseFieldName(raw, actual string) string { - if len(actual) > 0 { - return actual - } - if s.f.NameMapper != nil { - return s.f.NameMapper(raw) - } - return raw -} - -func parseDelim(actual string) string { - if len(actual) > 0 { - return actual - } - return "," -} - -var reflectTime = reflect.TypeOf(time.Now()).Kind() - -// setSliceWithProperType sets proper values to slice based on its type. -func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - var strs []string - if allowShadow { - strs = key.StringsWithShadows(delim) - } else { - strs = key.Strings(delim) - } - - numVals := len(strs) - if numVals == 0 { - return nil - } - - var vals interface{} - var err error - - sliceOf := field.Type().Elem().Kind() - switch sliceOf { - case reflect.String: - vals = strs - case reflect.Int: - vals, err = key.parseInts(strs, true, false) - case reflect.Int64: - vals, err = key.parseInt64s(strs, true, false) - case reflect.Uint: - vals, err = key.parseUints(strs, true, false) - case reflect.Uint64: - vals, err = key.parseUint64s(strs, true, false) - case reflect.Float64: - vals, err = key.parseFloat64s(strs, true, false) - case reflectTime: - vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - if err != nil && isStrict { - return err - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflect.String: - slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) - case reflect.Int: - slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) - case reflect.Int64: - slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) - case reflect.Uint: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) - case reflect.Uint64: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) - case reflect.Float64: - slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) - } - } - field.Set(slice) - return nil -} - -func wrapStrictError(err error, isStrict bool) error { - if isStrict { - return err - } - return nil -} - -// setWithProperType sets proper value to field based on its type, -// but it does not return error for failing parsing, -// because we want to use default value that is already assigned to strcut. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - switch t.Kind() { - case reflect.String: - if len(key.String()) == 0 { - return nil - } - field.SetString(key.String()) - case reflect.Bool: - boolVal, err := key.Bool() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetBool(boolVal) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - durationVal, err := key.Duration() - // Skip zero value - if err == nil && int64(durationVal) > 0 { - field.Set(reflect.ValueOf(durationVal)) - return nil - } - - intVal, err := key.Int64() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetInt(intVal) - // byte is an alias for uint8, so supporting uint8 breaks support for byte - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: - durationVal, err := key.Duration() - // Skip zero value - if err == nil && int(durationVal) > 0 { - field.Set(reflect.ValueOf(durationVal)) - return nil - } - - uintVal, err := key.Uint64() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetUint(uintVal) - - case reflect.Float32, reflect.Float64: - floatVal, err := key.Float64() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetFloat(floatVal) - case reflectTime: - timeVal, err := key.Time() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.Set(reflect.ValueOf(timeVal)) - case reflect.Slice: - return setSliceWithProperType(key, field, delim, allowShadow, isStrict) - default: - return fmt.Errorf("unsupported type '%s'", t) - } - return nil -} - -func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) { - opts := strings.SplitN(tag, ",", 3) - rawName = opts[0] - if len(opts) > 1 { - omitEmpty = opts[1] == "omitempty" - } - if len(opts) > 2 { - allowShadow = opts[2] == "allowshadow" - } - return rawName, omitEmpty, allowShadow -} - -func (s *Section) mapTo(val reflect.Value, isStrict bool) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - rawName, _, allowShadow := parseTagOptions(tag) - fieldName := s.parseFieldName(tpField.Name, rawName) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous - isStruct := tpField.Type.Kind() == reflect.Struct - if isAnonymous { - field.Set(reflect.New(tpField.Type.Elem())) - } - - if isAnonymous || isStruct { - if sec, err := s.f.GetSection(fieldName); err == nil { - if err = sec.mapTo(field, isStrict); err != nil { - return fmt.Errorf("error mapping field(%s): %v", fieldName, err) - } - continue - } - } - - if key, err := s.GetKey(fieldName); err == nil { - delim := parseDelim(tpField.Tag.Get("delim")) - if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { - return fmt.Errorf("error mapping field(%s): %v", fieldName, err) - } - } - } - return nil -} - -// MapTo maps section to given struct. -func (s *Section) MapTo(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot map to non-pointer struct") - } - - return s.mapTo(val, false) -} - -// MapTo maps section to given struct in strict mode, -// which returns all possible error including value parsing error. -func (s *Section) StrictMapTo(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot map to non-pointer struct") - } - - return s.mapTo(val, true) -} - -// MapTo maps file to given struct. -func (f *File) MapTo(v interface{}) error { - return f.Section("").MapTo(v) -} - -// MapTo maps file to given struct in strict mode, -// which returns all possible error including value parsing error. -func (f *File) StrictMapTo(v interface{}) error { - return f.Section("").StrictMapTo(v) -} - -// MapTo maps data sources to given struct with name mapper. -func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.MapTo(v) -} - -// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, -// which returns all possible error including value parsing error. -func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.StrictMapTo(v) -} - -// MapTo maps data sources to given struct. -func MapTo(v, source interface{}, others ...interface{}) error { - return MapToWithMapper(v, nil, source, others...) -} - -// StrictMapTo maps data sources to given struct in strict mode, -// which returns all possible error including value parsing error. -func StrictMapTo(v, source interface{}, others ...interface{}) error { - return StrictMapToWithMapper(v, nil, source, others...) -} - -// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. -func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { - slice := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - - var buf bytes.Buffer - sliceOf := field.Type().Elem().Kind() - for i := 0; i < field.Len(); i++ { - switch sliceOf { - case reflect.String: - buf.WriteString(slice.Index(i).String()) - case reflect.Int, reflect.Int64: - buf.WriteString(fmt.Sprint(slice.Index(i).Int())) - case reflect.Uint, reflect.Uint64: - buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) - case reflect.Float64: - buf.WriteString(fmt.Sprint(slice.Index(i).Float())) - case reflectTime: - buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-1]) - return nil -} - -// reflectWithProperType does the opposite thing as setWithProperType. -func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { - switch t.Kind() { - case reflect.String: - key.SetValue(field.String()) - case reflect.Bool: - key.SetValue(fmt.Sprint(field.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - key.SetValue(fmt.Sprint(field.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - key.SetValue(fmt.Sprint(field.Uint())) - case reflect.Float32, reflect.Float64: - key.SetValue(fmt.Sprint(field.Float())) - case reflectTime: - key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) - case reflect.Slice: - return reflectSliceWithProperType(key, field, delim) - default: - return fmt.Errorf("unsupported type '%s'", t) - } - return nil -} - -// CR: copied from encoding/json/encode.go with modifications of time.Time support. -// TODO: add more test coverage. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflectTime: - t, ok := v.Interface().(time.Time) - return ok && t.IsZero() - } - return false -} - -func (s *Section) reflectFrom(val reflect.Value) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - opts := strings.SplitN(tag, ",", 2) - if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { - continue - } - - fieldName := s.parseFieldName(tpField.Name, opts[0]) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || - (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { - // Note: The only error here is section doesn't exist. - sec, err := s.f.GetSection(fieldName) - if err != nil { - // Note: fieldName can never be empty here, ignore error. - sec, _ = s.f.NewSection(fieldName) - } - - // Add comment from comment tag - if len(sec.Comment) == 0 { - sec.Comment = tpField.Tag.Get("comment") - } - - if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) - } - continue - } - - // Note: Same reason as secion. - key, err := s.GetKey(fieldName) - if err != nil { - key, _ = s.NewKey(fieldName, "") - } - - // Add comment from comment tag - if len(key.Comment) == 0 { - key.Comment = tpField.Tag.Get("comment") - } - - if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { - return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) - } - - } - return nil -} - -// ReflectFrom reflects secion from given struct. -func (s *Section) ReflectFrom(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot reflect from non-pointer struct") - } - - return s.reflectFrom(val) -} - -// ReflectFrom reflects file from given struct. -func (f *File) ReflectFrom(v interface{}) error { - return f.Section("").ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct with name mapper. -func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { - cfg.NameMapper = mapper - return cfg.ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct. -func ReflectFrom(cfg *File, v interface{}) error { - return ReflectFromWithMapper(cfg, v, nil) -} diff --git a/vendor/github.com/go-pg/pg/.travis.yml b/vendor/github.com/go-pg/pg/.travis.yml deleted file mode 100644 index 5ae69b8..0000000 --- a/vendor/github.com/go-pg/pg/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -dist: trusty -language: go - -addons: - postgresql: "9.6" - -go: - - 1.9.x - - 1.10.x - - tip - -matrix: - allow_failures: - - go: tip - -before_install: - - psql -U postgres -c "CREATE EXTENSION hstore" - -install: - - go get github.com/jinzhu/inflection - - go get gopkg.in/check.v1 - - go get github.com/onsi/ginkgo - - go get github.com/onsi/gomega diff --git a/vendor/github.com/go-pg/pg/CHANGELOG.md b/vendor/github.com/go-pg/pg/CHANGELOG.md deleted file mode 100644 index e99e449..0000000 --- a/vendor/github.com/go-pg/pg/CHANGELOG.md +++ /dev/null @@ -1,70 +0,0 @@ -# Changelog - -## Unreleased - -- Added Options.MinIdleConns. -- Options.MaxAge renamed to Options.MaxConnAge. -- PoolStats.FreeConns is renamed to PoolStats.IdleConns. -- New hook BeforeSelectQuery. -- `,override` is renamed to `,inherit`. - -## v6.15 - -- Dialer.KeepAlive is set to 5 minutes by default. - -## v6.14 - -- Fields ignored with `sql:"-"` tag are no longer considered by ORM relation detector. - -## v6.12 - -- `Insert`, `Update`, and `Delete` can return `pg.ErrNoRows` and `pg.ErrMultiRows` when `Returning` is used and model expects single row. - -## v6.11 - -- `db.Model(&strct).Update()` and `db.Model(&strct).Delete()` no longer adds WHERE condition based on primary key when there are no conditions. Instead you should use `db.Update(&strct)` or `db.Model(&strct).WherePK().Update()`. - -## v6.10 - -- `?Columns` is renamed to `?TableColumns`. `?Columns` is changed to produce column names without table alias. - -## v6.9 - -- `pg:"fk"` tag now accepts SQL names instead of Go names, e.g. `pg:"fk:ParentId"` becomes `pg:"fk:parent_id"`. Old code should continue working in most cases, but it is strongly advised to start using new convention. -- uint and uint64 SQL type is changed from decimal to bigint according to the the lesser of two evils principle. Use `sql:"type:decimal"` to get old behavior. - -## v6.8 - -- `CreateTable` no longer adds ON DELETE hook by default. To get old behavior users should add `sql:"on_delete:CASCADE"` tag on foreign key field. - -## v6 - - - `types.Result` is renamed to `orm.Result`. - - Added `OnQueryProcessed` event that can be used to log / report queries timing. Query logger is removed. - - `orm.URLValues` is renamed to `orm.URLFilters`. It no longer adds ORDER clause. - - `orm.Pager` is renamed to `orm.Pagination`. - - Support for net.IP and net.IPNet. - - Support for context.Context. - - Bulk/multi updates. - - Query.WhereGroup for enclosing conditions in paretheses. - -## v5 - - - All fields are nullable by default. `,null` tag is replaced with `,notnull`. - - `Result.Affected` renamed to `Result.RowsAffected`. - - Added `Result.RowsReturned`. - - `Create` renamed to `Insert`, `BeforeCreate` to `BeforeInsert`, `AfterCreate` to `AfterInsert`. - - Indexed placeholders support, e.g. `db.Exec("SELECT ?0 + ?0", 1)`. - - Named placeholders are evaluated when query is executed. - - Added Update and Delete hooks. - - Order reworked to quote column names. OrderExpr added to bypass Order quoting restrictions. - - Group reworked to quote column names. GroupExpr added to bypass Group quoting restrictions. - -## v4 - - - `Options.Host` and `Options.Port` merged into `Options.Addr`. - - Added `Options.MaxRetries`. Now queries are not retried by default. - - `LoadInto` renamed to `Scan`, `ColumnLoader` renamed to `ColumnScanner`, LoadColumn renamed to ScanColumn, `NewRecord() interface{}` changed to `NewModel() ColumnScanner`, `AppendQuery(dst []byte) []byte` changed to `AppendValue(dst []byte, quote bool) ([]byte, error)`. - - Structs, maps and slices are marshalled to JSON by default. - - Added support for scanning slices, .e.g. scanning `[]int`. - - Added object relational mapping. diff --git a/vendor/github.com/go-pg/pg/LICENSE b/vendor/github.com/go-pg/pg/LICENSE deleted file mode 100644 index 7751509..0000000 --- a/vendor/github.com/go-pg/pg/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2013 github.com/go-pg/pg Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-pg/pg/Makefile b/vendor/github.com/go-pg/pg/Makefile deleted file mode 100644 index 5c67b33..0000000 --- a/vendor/github.com/go-pg/pg/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -all: - go test ./... - go test ./... -short -race - env GOOS=linux GOARCH=386 go test ./... - go vet ./... diff --git a/vendor/github.com/go-pg/pg/README.md b/vendor/github.com/go-pg/pg/README.md deleted file mode 100644 index 8e652ff..0000000 --- a/vendor/github.com/go-pg/pg/README.md +++ /dev/null @@ -1,167 +0,0 @@ -# PostgreSQL client and ORM for Golang - -[![Build Status](https://travis-ci.org/go-pg/pg.svg?branch=master)](https://travis-ci.org/go-pg/pg) -[![GoDoc](https://godoc.org/github.com/go-pg/pg?status.svg)](https://godoc.org/github.com/go-pg/pg) - -## Features: - -- Basic types: integers, floats, string, bool, time.Time, net.IP, net.IPNet. -- sql.NullBool, sql.NullString, sql.NullInt64, sql.NullFloat64 and [pg.NullTime](http://godoc.org/github.com/go-pg/pg#NullTime). -- [sql.Scanner](http://golang.org/pkg/database/sql/#Scanner) and [sql/driver.Valuer](http://golang.org/pkg/database/sql/driver/#Valuer) interfaces. -- Structs, maps and arrays are marshalled as JSON by default. -- PostgreSQL multidimensional Arrays using [array tag](https://godoc.org/github.com/go-pg/pg#example-DB-Model-PostgresArrayStructTag) and [Array wrapper](https://godoc.org/github.com/go-pg/pg#example-Array). -- Hstore using [hstore tag](https://godoc.org/github.com/go-pg/pg#example-DB-Model-HstoreStructTag) and [Hstore wrapper](https://godoc.org/github.com/go-pg/pg#example-Hstore). -- [Composite types](https://godoc.org/github.com/go-pg/pg#example-DB-Model-CompositeType). -- All struct fields are nullable by default and zero values (empty string, 0, zero time, empty map or slice) are marshalled as SQL `NULL`. `sql:",notnull"` tag is used to reverse this behaviour. -- [Transactions](http://godoc.org/github.com/go-pg/pg#example-DB-Begin). -- [Prepared statements](http://godoc.org/github.com/go-pg/pg#example-DB-Prepare). -- [Notifications](http://godoc.org/github.com/go-pg/pg#example-Listener) using `LISTEN` and `NOTIFY`. -- [Copying data](http://godoc.org/github.com/go-pg/pg#example-DB-CopyFrom) using `COPY FROM` and `COPY TO`. -- [Timeouts](http://godoc.org/github.com/go-pg/pg#Options). -- Automatic connection pooling with [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support. -- Queries retries on network errors. -- Working with models using [ORM](https://godoc.org/github.com/go-pg/pg#example-DB-Model) and [SQL](https://godoc.org/github.com/go-pg/pg#example-DB-Query). -- Scanning variables using [ORM](https://godoc.org/github.com/go-pg/pg#example-DB-Select-SomeColumnsIntoVars) and [SQL](https://godoc.org/github.com/go-pg/pg#example-Scan). -- [SelectOrInsert](https://godoc.org/github.com/go-pg/pg#example-DB-Insert-SelectOrInsert) using on-conflict. -- [INSERT ... ON CONFLICT DO UPDATE](https://godoc.org/github.com/go-pg/pg#example-DB-Insert-OnConflictDoUpdate) using ORM. -- Bulk/batch [inserts](https://godoc.org/github.com/go-pg/pg#example-DB-Insert-BulkInsert), [updates](https://godoc.org/github.com/go-pg/pg#example-DB-Update-BulkUpdate), and [deletes](https://godoc.org/github.com/go-pg/pg#example-DB-Delete-BulkDelete). -- Common table expressions using [WITH](https://godoc.org/github.com/go-pg/pg#example-DB-Select-With) and [WrapWith](https://godoc.org/github.com/go-pg/pg#example-DB-Select-WrapWith). -- [CountEstimate](https://godoc.org/github.com/go-pg/pg#example-DB-Model-CountEstimate) using `EXPLAIN` to get [estimated number of matching rows](https://wiki.postgresql.org/wiki/Count_estimate). -- ORM supports [has one](https://godoc.org/github.com/go-pg/pg#example-DB-Model-HasOne), [belongs to](https://godoc.org/github.com/go-pg/pg#example-DB-Model-BelongsTo), [has many](https://godoc.org/github.com/go-pg/pg#example-DB-Model-HasMany), and [many to many](https://godoc.org/github.com/go-pg/pg#example-DB-Model-ManyToMany) with composite/multi-column primary keys. -- [Soft deletes](https://godoc.org/github.com/go-pg/pg#example-DB-Model-SoftDelete). -- [Creating tables from structs](https://godoc.org/github.com/go-pg/pg#example-DB-CreateTable). -- [Pagination](https://godoc.org/github.com/go-pg/pg/orm#Pagination) and [URL filters](https://godoc.org/github.com/go-pg/pg/orm#URLFilters) helpers. -- [ForEach](https://godoc.org/github.com/go-pg/pg#example-DB-Model-ForEach) that calls a function for each row returned by the query without loading all rows into the memory. -- Works with PgBouncer in transaction pooling mode. -- [Migrations](https://github.com/go-pg/migrations). -- [Sharding](https://github.com/go-pg/sharding). - -## Get Started - -```shell -go get -u github.com/go-pg/pg -``` - -- [Wiki](https://github.com/go-pg/pg/wiki) -- [API docs](http://godoc.org/github.com/go-pg/pg) -- [Examples](http://godoc.org/github.com/go-pg/pg#pkg-examples) - -## Look & Feel - -```go -package pg_test - -import ( - "fmt" - - "github.com/go-pg/pg" - "github.com/go-pg/pg/orm" -) - -type User struct { - Id int64 - Name string - Emails []string -} - -func (u User) String() string { - return fmt.Sprintf("User<%d %s %v>", u.Id, u.Name, u.Emails) -} - -type Story struct { - Id int64 - Title string - AuthorId int64 - Author *User -} - -func (s Story) String() string { - return fmt.Sprintf("Story<%d %s %s>", s.Id, s.Title, s.Author) -} - -func ExampleDB_Model() { - db := pg.Connect(&pg.Options{ - User: "postgres", - }) - defer db.Close() - - err := createSchema(db) - if err != nil { - panic(err) - } - - user1 := &User{ - Name: "admin", - Emails: []string{"admin1@admin", "admin2@admin"}, - } - err = db.Insert(user1) - if err != nil { - panic(err) - } - - err = db.Insert(&User{ - Name: "root", - Emails: []string{"root1@root", "root2@root"}, - }) - if err != nil { - panic(err) - } - - story1 := &Story{ - Title: "Cool story", - AuthorId: user1.Id, - } - err = db.Insert(story1) - if err != nil { - panic(err) - } - - // Select user by primary key. - user := &User{Id: user1.Id} - err = db.Select(user) - if err != nil { - panic(err) - } - - // Select all users. - var users []User - err = db.Model(&users).Select() - if err != nil { - panic(err) - } - - // Select story and associated author in one query. - story := new(Story) - err = db.Model(story). - Relation("Author"). - Where("story.id = ?", story1.Id). - Select() - if err != nil { - panic(err) - } - - fmt.Println(user) - fmt.Println(users) - fmt.Println(story) - // Output: User<1 admin [admin1@admin admin2@admin]> - // [User<1 admin [admin1@admin admin2@admin]> User<2 root [root1@root root2@root]>] - // Story<1 Cool story User<1 admin [admin1@admin admin2@admin]>> -} - -func createSchema(db *pg.DB) error { - for _, model := range []interface{}{(*User)(nil), (*Story)(nil)} { - err := db.CreateTable(model, &orm.CreateTableOptions{ - Temp: true, - }) - if err != nil { - return err - } - } - return nil -} -``` - -## See also - -- [Golang msgpack](https://github.com/vmihailenco/msgpack) -- [Golang message task queue](https://github.com/go-msgqueue/msgqueue) diff --git a/vendor/github.com/go-pg/pg/db.go b/vendor/github.com/go-pg/pg/db.go deleted file mode 100644 index c8bbac0..0000000 --- a/vendor/github.com/go-pg/pg/db.go +++ /dev/null @@ -1,517 +0,0 @@ -package pg - -import ( - "context" - "fmt" - "io" - "time" - - "github.com/go-pg/pg/internal" - "github.com/go-pg/pg/internal/pool" - "github.com/go-pg/pg/orm" -) - -// Connect connects to a database using provided options. -// -// The returned DB is safe for concurrent use by multiple goroutines -// and maintains its own connection pool. -func Connect(opt *Options) *DB { - opt.init() - return &DB{ - opt: opt, - pool: newConnPool(opt), - } -} - -// DB is a database handle representing a pool of zero or more -// underlying connections. It's safe for concurrent use by multiple -// goroutines. -type DB struct { - opt *Options - pool pool.Pooler - fmter orm.Formatter - - queryProcessedHooks []queryProcessedHook - - ctx context.Context -} - -var _ orm.DB = (*DB)(nil) - -func (db *DB) String() string { - return fmt.Sprintf("DB", db.opt.Addr, db.fmter) -} - -// Options returns read-only Options that were used to connect to the DB. -func (db *DB) Options() *Options { - return db.opt -} - -// Context returns DB context. -func (db *DB) Context() context.Context { - if db.ctx != nil { - return db.ctx - } - return context.Background() -} - -// WithContext returns a copy of the DB that uses the ctx. -func (db *DB) WithContext(ctx context.Context) *DB { - return &DB{ - opt: db.opt, - pool: db.pool, - fmter: db.fmter, - - queryProcessedHooks: copyQueryProcessedHooks(db.queryProcessedHooks), - - ctx: ctx, - } -} - -// WithTimeout returns a copy of the DB that uses d as the read/write timeout. -func (db *DB) WithTimeout(d time.Duration) *DB { - newopt := *db.opt - newopt.ReadTimeout = d - newopt.WriteTimeout = d - - return &DB{ - opt: &newopt, - pool: db.pool, - fmter: db.fmter, - - queryProcessedHooks: copyQueryProcessedHooks(db.queryProcessedHooks), - - ctx: db.ctx, - } -} - -// WithParam returns a copy of the DB that replaces the param with the value -// in queries. -func (db *DB) WithParam(param string, value interface{}) *DB { - return &DB{ - opt: db.opt, - pool: db.pool, - fmter: db.fmter.WithParam(param, value), - - queryProcessedHooks: copyQueryProcessedHooks(db.queryProcessedHooks), - - ctx: db.ctx, - } -} - -// Param returns value for the param. -func (db *DB) Param(param string) interface{} { - return db.fmter.Param(param) -} - -type PoolStats pool.Stats - -// PoolStats returns connection pool stats. -func (db *DB) PoolStats() *PoolStats { - stats := db.pool.Stats() - return (*PoolStats)(stats) -} - -func (db *DB) retryBackoff(retry int) time.Duration { - return internal.RetryBackoff(retry, db.opt.MinRetryBackoff, db.opt.MaxRetryBackoff) -} - -func (db *DB) conn() (*pool.Conn, error) { - cn, err := db.pool.Get() - if err != nil { - return nil, err - } - - if cn.InitedAt.IsZero() { - cn.InitedAt = time.Now() - err = db.initConn(cn) - if err != nil { - db.pool.Remove(cn) - return nil, err - } - } - - return cn, nil -} - -func (db *DB) initConn(cn *pool.Conn) error { - if db.opt.TLSConfig != nil { - err := db.enableSSL(cn, db.opt.TLSConfig) - if err != nil { - return err - } - } - - err := db.startup(cn, db.opt.User, db.opt.Password, db.opt.Database, db.opt.ApplicationName) - if err != nil { - return err - } - - if db.opt.OnConnect != nil { - dbConn := &DB{ - opt: db.opt, - pool: pool.NewSingleConnPool(cn), - fmter: db.fmter, - } - return db.opt.OnConnect(dbConn) - } - - return nil -} - -func (db *DB) freeConn(cn *pool.Conn, err error) { - if !isBadConn(err, false) { - db.pool.Put(cn) - } else { - db.pool.Remove(cn) - } -} - -func (db *DB) shouldRetry(err error) bool { - if err == nil { - return false - } - if pgerr, ok := err.(Error); ok { - switch pgerr.Field('C') { - case "40001": // serialization_failure - return true - case "55000": // attempted to delete invisible tuple - return true - case "57014": // statement_timeout - return db.opt.RetryStatementTimeout - default: - return false - } - } - return isNetworkError(err) -} - -// Close closes the database client, releasing any open resources. -// -// It is rare to Close a DB, as the DB handle is meant to be -// long-lived and shared between many goroutines. -func (db *DB) Close() error { - return db.pool.Close() -} - -// Exec executes a query ignoring returned rows. The params are for any -// placeholders in the query. -func (db *DB) Exec(query interface{}, params ...interface{}) (res orm.Result, err error) { - for attempt := 0; attempt <= db.opt.MaxRetries; attempt++ { - var cn *pool.Conn - - if attempt >= 1 { - time.Sleep(db.retryBackoff(attempt - 1)) - } - - cn, err = db.conn() - if err != nil { - continue - } - - start := time.Now() - res, err = db.simpleQuery(cn, query, params...) - db.freeConn(cn, err) - db.queryProcessed(db, start, query, params, attempt, res, err) - - if !db.shouldRetry(err) { - break - } - } - return res, err -} - -// ExecOne acts like Exec, but query must affect only one row. It -// returns ErrNoRows error when query returns zero rows or -// ErrMultiRows when query returns multiple rows. -func (db *DB) ExecOne(query interface{}, params ...interface{}) (orm.Result, error) { - res, err := db.Exec(query, params...) - if err != nil { - return nil, err - } - - if err := internal.AssertOneRow(res.RowsAffected()); err != nil { - return nil, err - } - return res, nil -} - -// Query executes a query that returns rows, typically a SELECT. -// The params are for any placeholders in the query. -func (db *DB) Query(model, query interface{}, params ...interface{}) (res orm.Result, err error) { - for attempt := 0; attempt <= db.opt.MaxRetries; attempt++ { - var cn *pool.Conn - - if attempt >= 1 { - time.Sleep(db.retryBackoff(attempt - 1)) - } - - cn, err = db.conn() - if err != nil { - continue - } - - start := time.Now() - res, err = db.simpleQueryData(cn, model, query, params...) - db.freeConn(cn, err) - db.queryProcessed(db, start, query, params, attempt, res, err) - - if !db.shouldRetry(err) { - break - } - } - if err != nil { - return nil, err - } - - if mod := res.Model(); mod != nil && res.RowsReturned() > 0 { - if err = mod.AfterQuery(db); err != nil { - return res, err - } - } - - return res, nil -} - -// QueryOne acts like Query, but query must return only one row. It -// returns ErrNoRows error when query returns zero rows or -// ErrMultiRows when query returns multiple rows. -func (db *DB) QueryOne(model, query interface{}, params ...interface{}) (orm.Result, error) { - res, err := db.Query(model, query, params...) - if err != nil { - return nil, err - } - - if err := internal.AssertOneRow(res.RowsAffected()); err != nil { - return nil, err - } - return res, nil -} - -// Listen listens for notifications sent with NOTIFY command. -func (db *DB) Listen(channels ...string) *Listener { - ln := &Listener{ - db: db, - } - ln.init() - _ = ln.Listen(channels...) - return ln -} - -// CopyFrom copies data from reader to a table. -func (db *DB) CopyFrom(r io.Reader, query interface{}, params ...interface{}) (orm.Result, error) { - cn, err := db.conn() - if err != nil { - return nil, err - } - - res, err := db.copyFrom(cn, r, query, params...) - db.freeConn(cn, err) - return res, err -} - -func (db *DB) copyFrom(cn *pool.Conn, r io.Reader, query interface{}, params ...interface{}) (orm.Result, error) { - err := cn.WithWriter(db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - return writeQueryMsg(wb, db, query, params...) - }) - if err != nil { - return nil, err - } - - err = cn.WithReader(db.opt.ReadTimeout, func(rd *pool.Reader) error { - return readCopyInResponse(rd) - }) - if err != nil { - return nil, err - } - - for { - err = cn.WithWriter(db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - return writeCopyData(wb, r) - }) - if err != nil { - if err == io.EOF { - break - } - return nil, err - } - } - - err = cn.WithWriter(db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - writeCopyDone(wb) - return nil - }) - if err != nil { - return nil, err - } - - var res orm.Result - err = cn.WithReader(db.opt.ReadTimeout, func(rd *pool.Reader) error { - res, err = readReadyForQuery(rd) - return err - }) - if err != nil { - return nil, err - } - - return res, nil -} - -// CopyTo copies data from a table to writer. -func (db *DB) CopyTo(w io.Writer, query interface{}, params ...interface{}) (orm.Result, error) { - cn, err := db.conn() - if err != nil { - return nil, err - } - - res, err := db.copyTo(cn, w, query, params...) - if err != nil { - db.freeConn(cn, err) - return nil, err - } - - db.pool.Put(cn) - return res, nil -} - -func (db *DB) copyTo(cn *pool.Conn, w io.Writer, query interface{}, params ...interface{}) (orm.Result, error) { - err := cn.WithWriter(db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - return writeQueryMsg(wb, db, query, params...) - }) - if err != nil { - return nil, err - } - - var res orm.Result - err = cn.WithReader(db.opt.ReadTimeout, func(rd *pool.Reader) error { - err := readCopyOutResponse(rd) - if err != nil { - return err - } - - res, err = readCopyData(rd, w) - return err - }) - if err != nil { - return nil, err - } - - return res, nil -} - -// Model returns new query for the model. -func (db *DB) Model(model ...interface{}) *orm.Query { - return orm.NewQuery(db, model...) -} - -// Select selects the model by primary key. -func (db *DB) Select(model interface{}) error { - return orm.Select(db, model) -} - -// Insert inserts the model updating primary keys if they are empty. -func (db *DB) Insert(model ...interface{}) error { - return orm.Insert(db, model...) -} - -// Update updates the model by primary key. -func (db *DB) Update(model interface{}) error { - return orm.Update(db, model) -} - -// Delete deletes the model by primary key. -func (db *DB) Delete(model interface{}) error { - return orm.Delete(db, model) -} - -// Delete forces delete of the model with deleted_at column. -func (db *DB) ForceDelete(model interface{}) error { - return orm.ForceDelete(db, model) -} - -// CreateTable creates table for the model. It recognizes following field tags: -// - notnull - sets NOT NULL constraint. -// - unique - sets UNIQUE constraint. -// - default:value - sets default value. -func (db *DB) CreateTable(model interface{}, opt *orm.CreateTableOptions) error { - return orm.CreateTable(db, model, opt) -} - -// DropTable drops table for the model. -func (db *DB) DropTable(model interface{}, opt *orm.DropTableOptions) error { - return orm.DropTable(db, model, opt) -} - -func (db *DB) CreateComposite(model interface{}, opt *orm.CreateCompositeOptions) error { - return orm.CreateComposite(db, model, opt) -} - -func (db *DB) DropComposite(model interface{}, opt *orm.DropCompositeOptions) error { - return orm.DropComposite(db, model, opt) -} - -func (db *DB) FormatQuery(dst []byte, query string, params ...interface{}) []byte { - return db.fmter.Append(dst, query, params...) -} - -func (db *DB) cancelRequest(processId, secretKey int32) error { - cn, err := db.pool.NewConn() - if err != nil { - return err - } - - err = cn.WithWriter(db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - writeCancelRequestMsg(wb, processId, secretKey) - return nil - }) - if err != nil { - return err - } - - cn.Close() - return nil -} - -func (db *DB) simpleQuery( - cn *pool.Conn, query interface{}, params ...interface{}, -) (orm.Result, error) { - err := cn.WithWriter(db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - return writeQueryMsg(wb, db, query, params...) - }) - if err != nil { - return nil, err - } - - var res orm.Result - err = cn.WithReader(db.opt.ReadTimeout, func(rd *pool.Reader) error { - res, err = readSimpleQuery(rd) - return err - }) - if err != nil { - return nil, err - } - - return res, nil -} - -func (db *DB) simpleQueryData( - cn *pool.Conn, model, query interface{}, params ...interface{}, -) (orm.Result, error) { - err := cn.WithWriter(db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - return writeQueryMsg(wb, db, query, params...) - }) - if err != nil { - return nil, err - } - - var res orm.Result - err = cn.WithReader(db.opt.ReadTimeout, func(rd *pool.Reader) error { - res, err = readSimpleQueryData(rd, model) - return err - }) - if err != nil { - return nil, err - } - - return res, nil -} diff --git a/vendor/github.com/go-pg/pg/doc.go b/vendor/github.com/go-pg/pg/doc.go deleted file mode 100644 index f1d8eb1..0000000 --- a/vendor/github.com/go-pg/pg/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Package github.com/go-pg/pg implements a PostgreSQL client. -*/ -package pg diff --git a/vendor/github.com/go-pg/pg/error.go b/vendor/github.com/go-pg/pg/error.go deleted file mode 100644 index 9952076..0000000 --- a/vendor/github.com/go-pg/pg/error.go +++ /dev/null @@ -1,61 +0,0 @@ -package pg - -import ( - "io" - "net" - - "github.com/go-pg/pg/internal" -) - -// ErrNoRows is returned by QueryOne and ExecOne when query returned zero rows -// but at least one row is expected. -var ErrNoRows = internal.ErrNoRows - -// ErrMultiRows is returned by QueryOne and ExecOne when query returned -// multiple rows but exactly one row is expected. -var ErrMultiRows = internal.ErrMultiRows - -// Error represents an error returned by PostgreSQL server -// using PostgreSQL ErrorResponse protocol. -// -// https://www.postgresql.org/docs/10/static/protocol-message-formats.html -type Error interface { - // Field returns a string value associated with an error code. - // - // https://www.postgresql.org/docs/10/static/protocol-error-fields.html - Field(byte) string - - // IntegrityViolation reports whether an error is a part of - // Integrity Constraint Violation class of errors. - // - // https://www.postgresql.org/docs/10/static/errcodes-appendix.html - IntegrityViolation() bool -} - -var _ Error = (*internal.PGError)(nil) - -func isBadConn(err error, allowTimeout bool) bool { - if err == nil { - return false - } - if _, ok := err.(internal.Error); ok { - return false - } - if pgErr, ok := err.(Error); ok && pgErr.Field('S') != "FATAL" { - return false - } - if allowTimeout { - if netErr, ok := err.(net.Error); ok && netErr.Timeout() { - return false - } - } - return true -} - -func isNetworkError(err error) bool { - if err == io.EOF { - return true - } - _, ok := err.(net.Error) - return ok -} diff --git a/vendor/github.com/go-pg/pg/hook.go b/vendor/github.com/go-pg/pg/hook.go deleted file mode 100644 index ff6723b..0000000 --- a/vendor/github.com/go-pg/pg/hook.go +++ /dev/null @@ -1,145 +0,0 @@ -package pg - -import ( - "fmt" - "runtime" - "strings" - "time" - - "github.com/go-pg/pg/orm" -) - -type dummyDB struct { - orm.DB -} - -var _ orm.DB = dummyDB{} - -func (dummyDB) FormatQuery(dst []byte, query string, params ...interface{}) []byte { - return append(dst, query...) -} - -type QueryProcessedEvent struct { - StartTime time.Time - Func string - File string - Line int - - DB orm.DB - Query interface{} - Params []interface{} - Attempt int - Result orm.Result - Error error -} - -func (ev *QueryProcessedEvent) UnformattedQuery() (string, error) { - b, err := queryString(ev.Query) - if err != nil { - return "", err - } - return string(b), nil -} - -func (ev *QueryProcessedEvent) FormattedQuery() (string, error) { - b, err := appendQuery(nil, ev.DB, ev.Query, ev.Params...) - if err != nil { - return "", err - } - return string(b), nil -} - -func queryString(query interface{}) ([]byte, error) { - switch query := query.(type) { - case orm.QueryAppender: - query = query.Copy() - query.Query().DB(dummyDB{}) - return query.AppendQuery(nil) - case string: - return dummyDB{}.FormatQuery(nil, query), nil - default: - return nil, fmt.Errorf("pg: can't append %T", query) - } -} - -type queryProcessedHook func(*QueryProcessedEvent) - -// OnQueryProcessed calls the fn with QueryProcessedEvent -// when query is processed. -func (db *DB) OnQueryProcessed(fn func(*QueryProcessedEvent)) { - db.queryProcessedHooks = append(db.queryProcessedHooks, fn) -} - -func (db *DB) queryProcessed( - ormDB orm.DB, - start time.Time, - query interface{}, - params []interface{}, - attempt int, - res orm.Result, - err error, -) { - if len(db.queryProcessedHooks) == 0 { - return - } - - funcName, file, line := fileLine(2) - event := &QueryProcessedEvent{ - StartTime: start, - Func: funcName, - File: file, - Line: line, - - DB: ormDB, - Query: query, - Params: params, - Attempt: attempt, - Result: res, - Error: err, - } - for _, hook := range db.queryProcessedHooks { - hook(event) - } -} - -const packageName = "github.com/go-pg/pg" - -func fileLine(depth int) (string, string, int) { - for i := depth; ; i++ { - pc, file, line, ok := runtime.Caller(i) - if !ok { - break - } - if strings.Contains(file, packageName) { - continue - } - _, funcName := packageFuncName(pc) - return funcName, file, line - } - return "", "", 0 -} - -func packageFuncName(pc uintptr) (string, string) { - f := runtime.FuncForPC(pc) - if f == nil { - return "", "" - } - - packageName := "" - funcName := f.Name() - - if ind := strings.LastIndex(funcName, "/"); ind > 0 { - packageName += funcName[:ind+1] - funcName = funcName[ind+1:] - } - if ind := strings.Index(funcName, "."); ind > 0 { - packageName += funcName[:ind] - funcName = funcName[ind+1:] - } - - return packageName, funcName -} - -func copyQueryProcessedHooks(s []queryProcessedHook) []queryProcessedHook { - return s[:len(s):len(s)] -} diff --git a/vendor/github.com/go-pg/pg/internal/error.go b/vendor/github.com/go-pg/pg/internal/error.go deleted file mode 100644 index c6ff3d1..0000000 --- a/vendor/github.com/go-pg/pg/internal/error.go +++ /dev/null @@ -1,59 +0,0 @@ -package internal - -import ( - "fmt" -) - -var ErrNoRows = Errorf("pg: no rows in result set") -var ErrMultiRows = Errorf("pg: multiple rows in result set") - -type Error struct { - s string -} - -func Errorf(s string, args ...interface{}) Error { - return Error{s: fmt.Sprintf(s, args...)} -} - -func (err Error) Error() string { - return err.s -} - -type PGError struct { - m map[byte]string -} - -func NewPGError(m map[byte]string) PGError { - return PGError{ - m: m, - } -} - -func (err PGError) Field(k byte) string { - return err.m[k] -} - -func (err PGError) IntegrityViolation() bool { - switch err.Field('C') { - case "23000", "23001", "23502", "23503", "23505", "23514", "23P01": - return true - default: - return false - } -} - -func (err PGError) Error() string { - return fmt.Sprintf("%s #%s %s", - err.Field('S'), err.Field('C'), err.Field('M')) -} - -func AssertOneRow(l int) error { - switch { - case l == 0: - return ErrNoRows - case l > 1: - return ErrMultiRows - default: - return nil - } -} diff --git a/vendor/github.com/go-pg/pg/internal/internal.go b/vendor/github.com/go-pg/pg/internal/internal.go deleted file mode 100644 index ad3fc3c..0000000 --- a/vendor/github.com/go-pg/pg/internal/internal.go +++ /dev/null @@ -1,24 +0,0 @@ -package internal - -import ( - "math/rand" - "time" -) - -// Retry backoff with jitter sleep to prevent overloaded conditions during intervals -// https://www.awsarchitectureblog.com/2015/03/backoff.html -func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration { - if retry < 0 { - retry = 0 - } - - backoff := minBackoff << uint(retry) - if backoff > maxBackoff || backoff < minBackoff { - backoff = maxBackoff - } - - if backoff == 0 { - return 0 - } - return time.Duration(rand.Int63n(int64(backoff))) -} diff --git a/vendor/github.com/go-pg/pg/internal/log.go b/vendor/github.com/go-pg/pg/internal/log.go deleted file mode 100644 index fd14222..0000000 --- a/vendor/github.com/go-pg/pg/internal/log.go +++ /dev/null @@ -1,15 +0,0 @@ -package internal - -import ( - "fmt" - "log" -) - -var Logger *log.Logger - -func Logf(s string, args ...interface{}) { - if Logger == nil { - return - } - Logger.Output(2, fmt.Sprintf(s, args...)) -} diff --git a/vendor/github.com/go-pg/pg/internal/parser/array_parser.go b/vendor/github.com/go-pg/pg/internal/parser/array_parser.go deleted file mode 100644 index 524d124..0000000 --- a/vendor/github.com/go-pg/pg/internal/parser/array_parser.go +++ /dev/null @@ -1,92 +0,0 @@ -package parser - -import ( - "bytes" - "fmt" -) - -type ArrayParser struct { - *Parser - - stickyErr error -} - -func NewArrayParser(b []byte) *ArrayParser { - var err error - if len(b) < 2 || b[0] != '{' || b[len(b)-1] != '}' { - err = fmt.Errorf("pg: can't parse array: %s", string(b)) - } else { - b = b[1 : len(b)-1] - } - return &ArrayParser{ - Parser: New(b), - - stickyErr: err, - } -} - -func (p *ArrayParser) NextElem() ([]byte, error) { - if p.stickyErr != nil { - return nil, p.stickyErr - } - - switch c := p.Peek(); c { - case '"': - p.Advance() - b := p.readSubstring() - - if p.Valid() { - if err := p.MustSkip(','); err != nil { - return nil, err - } - } - - return b, nil - case '{': - b := p.readSubArray() - if b != nil { - b = append(b, '}') - } - - if p.Valid() { - if err := p.MustSkip(','); err != nil { - return nil, err - } - } - - return b, nil - default: - b, _ := p.ReadSep(',') - if bytes.Equal(b, pgNull) { - b = nil - } - return b, nil - } -} - -func (p *ArrayParser) readSubArray() []byte { - var b []byte - for p.Valid() { - c := p.Read() - switch c { - case '"': - b = append(b, '"') - for { - bb, ok := p.ReadSep('"') - b = append(b, bb...) - stop := len(b) > 0 && b[len(b)-1] != '\\' - if ok { - b = append(b, '"') - } - if stop { - break - } - } - case '}': - return b - default: - b = append(b, c) - } - } - return b -} diff --git a/vendor/github.com/go-pg/pg/internal/parser/composite_parser.go b/vendor/github.com/go-pg/pg/internal/parser/composite_parser.go deleted file mode 100644 index 2b918ea..0000000 --- a/vendor/github.com/go-pg/pg/internal/parser/composite_parser.go +++ /dev/null @@ -1,53 +0,0 @@ -package parser - -import ( - "fmt" -) - -type CompositeParser struct { - *Parser - - stickyErr error -} - -func NewCompositeParser(b []byte) *CompositeParser { - var err error - if len(b) < 2 || b[0] != '(' || b[len(b)-1] != ')' { - err = fmt.Errorf("pg: can't parse composite value: %s", string(b)) - } else { - b = b[1 : len(b)-1] - } - return &CompositeParser{ - Parser: New(b), - - stickyErr: err, - } -} - -func (p *CompositeParser) NextElem() ([]byte, error) { - if p.stickyErr != nil { - return nil, p.stickyErr - } - - switch c := p.Peek(); c { - case '"': - b, err := p.ReadString() - if err != nil { - return nil, err - } - - if p.Valid() { - if err := p.MustSkip(','); err != nil { - return nil, err - } - } - - return b, nil - default: - b, _ := p.ReadSep(',') - if len(b) == 0 { // NULL - b = nil - } - return b, nil - } -} diff --git a/vendor/github.com/go-pg/pg/internal/parser/hstore_parser.go b/vendor/github.com/go-pg/pg/internal/parser/hstore_parser.go deleted file mode 100644 index f51561b..0000000 --- a/vendor/github.com/go-pg/pg/internal/parser/hstore_parser.go +++ /dev/null @@ -1,40 +0,0 @@ -package parser - -import "fmt" - -type HstoreParser struct { - *Parser -} - -func NewHstoreParser(b []byte) *HstoreParser { - return &HstoreParser{ - Parser: New(b), - } -} - -func (p *HstoreParser) NextKey() ([]byte, error) { - if p.Skip(',') { - p.Skip(' ') - } - - if !p.Skip('"') { - return nil, fmt.Errorf("pg: can't parse hstore key: %q", p.Bytes()) - } - - key := p.readSubstring() - if !(p.Skip('=') && p.Skip('>')) { - return nil, fmt.Errorf("pg: can't parse hstore key: %q", p.Bytes()) - } - - return key, nil -} - -func (p *HstoreParser) NextValue() ([]byte, error) { - if !p.Skip('"') { - return nil, fmt.Errorf("pg: can't parse hstore value: %q", p.Bytes()) - } - - value := p.readSubstring() - p.SkipBytes([]byte(", ")) - return value, nil -} diff --git a/vendor/github.com/go-pg/pg/internal/parser/parser.go b/vendor/github.com/go-pg/pg/internal/parser/parser.go deleted file mode 100644 index d94b7be..0000000 --- a/vendor/github.com/go-pg/pg/internal/parser/parser.go +++ /dev/null @@ -1,170 +0,0 @@ -package parser - -import ( - "bytes" - "fmt" - "strconv" - - "github.com/go-pg/pg/internal" -) - -type Parser struct { - b []byte -} - -func New(b []byte) *Parser { - return &Parser{ - b: b, - } -} - -func NewString(s string) *Parser { - return New(internal.StringToBytes(s)) -} - -func (p *Parser) Bytes() []byte { - return p.b -} - -func (p *Parser) Valid() bool { - return len(p.b) > 0 -} - -func (p *Parser) Read() byte { - if p.Valid() { - c := p.b[0] - p.Advance() - return c - } - return 0 -} - -func (p *Parser) Peek() byte { - if p.Valid() { - return p.b[0] - } - return 0 -} - -func (p *Parser) Advance() { - p.b = p.b[1:] -} - -func (p *Parser) Skip(c byte) bool { - if p.Peek() == c { - p.Advance() - return true - } - return false -} - -func (p *Parser) MustSkip(c byte) error { - if p.Skip(c) { - return nil - } - return fmt.Errorf("expecting '%c', got %q", c, p.Bytes()) -} - -func (p *Parser) SkipBytes(b []byte) bool { - if len(b) > len(p.b) { - return false - } - if !bytes.Equal(p.b[:len(b)], b) { - return false - } - p.b = p.b[len(b):] - return true -} - -func (p *Parser) ReadSep(c byte) ([]byte, bool) { - ind := bytes.IndexByte(p.b, c) - if ind == -1 { - b := p.b - p.b = p.b[len(p.b):] - return b, false - } - - b := p.b[:ind] - p.b = p.b[ind+1:] - return b, true -} - -func (p *Parser) ReadIdentifier() (s string, numeric bool) { - end := len(p.b) - numeric = true - for i, ch := range p.b { - if isNum(ch) { - continue - } - if isAlpha(ch) || (i > 0 && ch == '_') { - numeric = false - continue - } - end = i - break - } - if end == 0 { - return "", false - } - b := p.b[:end] - p.b = p.b[end:] - return internal.BytesToString(b), numeric -} - -func (p *Parser) ReadNumber() int { - end := len(p.b) - for i, ch := range p.b { - if !isNum(ch) { - end = i - break - } - } - if end <= 0 { - return 0 - } - n, _ := strconv.Atoi(string(p.b[:end])) - p.b = p.b[end:] - return n -} - -func (p *Parser) ReadString() ([]byte, error) { - quote := p.Read() - b, ok := p.ReadSep(quote) - if !ok { - return nil, fmt.Errorf("can't find closing quote") - } - return b, nil -} - -func (p *Parser) readSubstring() []byte { - var b []byte - for p.Valid() { - c := p.Read() - switch c { - case '\\': - switch p.Peek() { - case '\\': - b = append(b, '\\') - p.Advance() - case '"': - b = append(b, '"') - p.Advance() - default: - b = append(b, c) - } - case '\'': - switch p.Peek() { - case '\'': - b = append(b, '\'') - p.Advance() - default: - b = append(b, c) - } - case '"': - return b - default: - b = append(b, c) - } - } - return b -} diff --git a/vendor/github.com/go-pg/pg/internal/parser/util.go b/vendor/github.com/go-pg/pg/internal/parser/util.go deleted file mode 100644 index 23844e3..0000000 --- a/vendor/github.com/go-pg/pg/internal/parser/util.go +++ /dev/null @@ -1,15 +0,0 @@ -package parser - -var pgNull = []byte("NULL") - -func isNum(c byte) bool { - return c >= '0' && c <= '9' -} - -func isAlpha(c byte) bool { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') -} - -func isAlnum(c byte) bool { - return isAlpha(c) || isNum(c) -} diff --git a/vendor/github.com/go-pg/pg/internal/pool/conn.go b/vendor/github.com/go-pg/pg/internal/pool/conn.go deleted file mode 100644 index b1900ee..0000000 --- a/vendor/github.com/go-pg/pg/internal/pool/conn.go +++ /dev/null @@ -1,129 +0,0 @@ -package pool - -import ( - "net" - "strconv" - "sync/atomic" - "time" -) - -var noDeadline = time.Time{} - -type Conn struct { - netConn net.Conn - - buf []byte - rd *Reader - rdLocked bool - wb *WriteBuffer - - InitedAt time.Time - pooled bool - usedAt atomic.Value - - ProcessId int32 - SecretKey int32 - - _lastId int64 -} - -func NewConn(netConn net.Conn) *Conn { - cn := &Conn{ - buf: makeBuffer(), - rd: NewReader(NewElasticBufReader(netConn)), - wb: NewWriteBuffer(), - } - cn.SetNetConn(netConn) - cn.SetUsedAt(time.Now()) - return cn -} - -func (cn *Conn) UsedAt() time.Time { - return cn.usedAt.Load().(time.Time) -} - -func (cn *Conn) SetUsedAt(tm time.Time) { - cn.usedAt.Store(tm) -} - -func (cn *Conn) RemoteAddr() net.Addr { - return cn.netConn.RemoteAddr() -} - -func (cn *Conn) SetNetConn(netConn net.Conn) { - cn.netConn = netConn - cn.rd.Reset(netConn) -} - -func (cn *Conn) NetConn() net.Conn { - return cn.netConn -} - -func (cn *Conn) NextId() string { - cn._lastId++ - return strconv.FormatInt(cn._lastId, 10) -} - -func (cn *Conn) setReadTimeout(timeout time.Duration) error { - now := time.Now() - cn.SetUsedAt(now) - if timeout > 0 { - return cn.netConn.SetReadDeadline(now.Add(timeout)) - } - return cn.netConn.SetReadDeadline(noDeadline) -} - -func (cn *Conn) setWriteTimeout(timeout time.Duration) error { - now := time.Now() - cn.SetUsedAt(now) - if timeout > 0 { - return cn.netConn.SetWriteDeadline(now.Add(timeout)) - } - return cn.netConn.SetWriteDeadline(noDeadline) -} - -func (cn *Conn) LockReaderBuffer() { - cn.rdLocked = true - cn.rd.ResetBuffer(makeBuffer()) -} - -func (cn *Conn) WithReader(timeout time.Duration, fn func(rd *Reader) error) error { - _ = cn.setReadTimeout(timeout) - - if !cn.rdLocked { - cn.rd.ResetBuffer(cn.buf) - } - - err := fn(cn.rd) - - if !cn.rdLocked { - cn.buf = cn.rd.Buffer() - } - - return err -} - -func (cn *Conn) WithWriter(timeout time.Duration, fn func(wb *WriteBuffer) error) error { - _ = cn.setWriteTimeout(timeout) - - cn.wb.ResetBuffer(cn.buf) - - firstErr := fn(cn.wb) - - _, err := cn.netConn.Write(cn.wb.Bytes) - cn.buf = cn.wb.Buffer() - if err != nil && firstErr == nil { - firstErr = err - } - - return firstErr -} - -func (cn *Conn) Close() error { - return cn.netConn.Close() -} - -func makeBuffer() []byte { - const defaulBufSize = 4096 - return make([]byte, defaulBufSize) -} diff --git a/vendor/github.com/go-pg/pg/internal/pool/elastic_reader.go b/vendor/github.com/go-pg/pg/internal/pool/elastic_reader.go deleted file mode 100644 index b4f8a6e..0000000 --- a/vendor/github.com/go-pg/pg/internal/pool/elastic_reader.go +++ /dev/null @@ -1,185 +0,0 @@ -package pool - -import ( - "bytes" - "errors" - "io" -) - -const defaultBufSize = 4096 - -// ElasticBufReader is like bufio.Reader but instead of returning ErrBufferFull -// it automatically grows the buffer. -type ElasticBufReader struct { - buf []byte - rd io.Reader // reader provided by the client - r, w int // buf read and write positions - err error -} - -func NewElasticBufReader(rd io.Reader) *ElasticBufReader { - return &ElasticBufReader{ - rd: rd, - } -} - -func (b *ElasticBufReader) Reset(rd io.Reader) { - b.rd = rd - b.r, b.w = 0, 0 - b.err = nil -} - -func (b *ElasticBufReader) Buffer() []byte { - return b.buf -} - -func (b *ElasticBufReader) ResetBuffer(buf []byte) { - b.buf = buf - b.r, b.w = 0, 0 - b.err = nil -} - -// Buffered returns the number of bytes that can be read from the current buffer. -func (b *ElasticBufReader) Buffered() int { - return b.w - b.r -} - -func (b *ElasticBufReader) Bytes() []byte { - return b.buf[b.r:b.w] -} - -var errNegativeRead = errors.New("bufio: reader returned negative count from Read") - -// fill reads a new chunk into the buffer. -func (b *ElasticBufReader) fill() { - // Slide existing data to beginning. - if b.r > 0 { - copy(b.buf, b.buf[b.r:b.w]) - b.w -= b.r - b.r = 0 - } - - if b.w >= len(b.buf) { - panic("bufio: tried to fill full buffer") - } - - // Read new data: try a limited number of times. - const maxConsecutiveEmptyReads = 100 - for i := maxConsecutiveEmptyReads; i > 0; i-- { - n, err := b.rd.Read(b.buf[b.w:]) - if n < 0 { - panic(errNegativeRead) - } - b.w += n - if err != nil { - b.err = err - return - } - if n > 0 { - return - } - } - b.err = io.ErrNoProgress -} - -func (b *ElasticBufReader) readErr() error { - err := b.err - b.err = nil - return err -} - -func (b *ElasticBufReader) ReadSlice(delim byte) (line []byte, err error) { - for { - // Search buffer. - if i := bytes.IndexByte(b.buf[b.r:b.w], delim); i >= 0 { - line = b.buf[b.r : b.r+i+1] - b.r += i + 1 - break - } - - // Pending error? - if b.err != nil { - line = b.buf[b.r:b.w] - b.r = b.w - err = b.readErr() - break - } - - // Buffer full? - if b.Buffered() >= len(b.buf) { - b.grow(len(b.buf) + defaultBufSize) - } - - b.fill() // buffer is not full - } - - return -} - -func (b *ElasticBufReader) ReadLine() (line []byte, err error) { - line, err = b.ReadSlice('\n') - if len(line) == 0 { - if err != nil { - line = nil - } - return - } - err = nil - - if line[len(line)-1] == '\n' { - drop := 1 - if len(line) > 1 && line[len(line)-2] == '\r' { - drop = 2 - } - line = line[:len(line)-drop] - } - return -} - -func (b *ElasticBufReader) ReadByte() (byte, error) { - for b.r == b.w { - if b.err != nil { - return 0, b.readErr() - } - b.fill() // buffer is empty - } - c := b.buf[b.r] - b.r++ - return c, nil -} - -func (b *ElasticBufReader) ReadN(n int) ([]byte, error) { - b.grow(n) - for b.Buffered() < n { - // Pending error? - if b.err != nil { - buf := b.buf[b.r:b.w] - b.r = b.w - return buf, b.readErr() - } - - b.fill() - } - - buf := b.buf[b.r : b.r+n] - b.r += n - return buf, nil -} - -func (b *ElasticBufReader) grow(n int) { - if b.w-b.r >= n { - return - } - - // Slide existing data to beginning. - if b.r > 0 { - copy(b.buf, b.buf[b.r:b.w]) - b.w -= b.r - b.r = 0 - } - - // Extend buffer if needed. - if d := n - len(b.buf); d > 0 { - b.buf = append(b.buf, make([]byte, d)...) - } -} diff --git a/vendor/github.com/go-pg/pg/internal/pool/pool.go b/vendor/github.com/go-pg/pg/internal/pool/pool.go deleted file mode 100644 index 4bdd1e0..0000000 --- a/vendor/github.com/go-pg/pg/internal/pool/pool.go +++ /dev/null @@ -1,476 +0,0 @@ -package pool - -import ( - "errors" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/go-pg/pg/internal" -) - -var ErrClosed = errors.New("pg: database is closed") -var ErrPoolTimeout = errors.New("pg: connection pool timeout") - -var timers = sync.Pool{ - New: func() interface{} { - t := time.NewTimer(time.Hour) - t.Stop() - return t - }, -} - -// Stats contains pool state information and accumulated stats. -type Stats struct { - Hits uint32 // number of times free connection was found in the pool - Misses uint32 // number of times free connection was NOT found in the pool - Timeouts uint32 // number of times a wait timeout occurred - - TotalConns uint32 // number of total connections in the pool - IdleConns uint32 // number of idle connections in the pool - StaleConns uint32 // number of stale connections removed from the pool -} - -type Pooler interface { - NewConn() (*Conn, error) - CloseConn(*Conn) error - - Get() (*Conn, error) - Put(*Conn) - Remove(*Conn) - - Len() int - IdleLen() int - Stats() *Stats - - Close() error -} - -type Options struct { - Dialer func() (net.Conn, error) - OnClose func(*Conn) error - - PoolSize int - MinIdleConns int - MaxConnAge time.Duration - PoolTimeout time.Duration - IdleTimeout time.Duration - IdleCheckFrequency time.Duration -} - -type ConnPool struct { - opt *Options - - dialErrorsNum uint32 // atomic - - lastDialErrorMu sync.RWMutex - lastDialError error - - queue chan struct{} - - connsMu sync.Mutex - conns []*Conn - idleConns []*Conn - poolSize int - idleConnsLen int - - stats Stats - - _closed uint32 // atomic -} - -var _ Pooler = (*ConnPool)(nil) - -func NewConnPool(opt *Options) *ConnPool { - p := &ConnPool{ - opt: opt, - - queue: make(chan struct{}, opt.PoolSize), - conns: make([]*Conn, 0, opt.PoolSize), - idleConns: make([]*Conn, 0, opt.PoolSize), - } - - for i := 0; i < opt.MinIdleConns; i++ { - p.checkMinIdleConns() - } - - if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 { - go p.reaper(opt.IdleCheckFrequency) - } - - return p -} - -func (p *ConnPool) checkMinIdleConns() { - if p.opt.MinIdleConns == 0 { - return - } - if p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns { - p.poolSize++ - p.idleConnsLen++ - go p.addIdleConn() - } -} - -func (p *ConnPool) addIdleConn() { - cn, err := p.newConn(true) - if err != nil { - return - } - - p.connsMu.Lock() - p.conns = append(p.conns, cn) - p.idleConns = append(p.idleConns, cn) - p.connsMu.Unlock() -} - -func (p *ConnPool) NewConn() (*Conn, error) { - return p._NewConn(false) -} - -func (p *ConnPool) _NewConn(pooled bool) (*Conn, error) { - cn, err := p.newConn(pooled) - if err != nil { - return nil, err - } - - p.connsMu.Lock() - p.conns = append(p.conns, cn) - if pooled { - if p.poolSize < p.opt.PoolSize { - p.poolSize++ - } else { - cn.pooled = false - } - } - p.connsMu.Unlock() - return cn, nil -} - -func (p *ConnPool) newConn(pooled bool) (*Conn, error) { - if p.closed() { - return nil, ErrClosed - } - - if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) { - return nil, p.getLastDialError() - } - - netConn, err := p.opt.Dialer() - if err != nil { - p.setLastDialError(err) - if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) { - go p.tryDial() - } - return nil, err - } - - cn := NewConn(netConn) - cn.pooled = pooled - return cn, nil -} - -func (p *ConnPool) tryDial() { - for { - if p.closed() { - return - } - - conn, err := p.opt.Dialer() - if err != nil { - p.setLastDialError(err) - time.Sleep(time.Second) - continue - } - - atomic.StoreUint32(&p.dialErrorsNum, 0) - _ = conn.Close() - return - } -} - -func (p *ConnPool) setLastDialError(err error) { - p.lastDialErrorMu.Lock() - p.lastDialError = err - p.lastDialErrorMu.Unlock() -} - -func (p *ConnPool) getLastDialError() error { - p.lastDialErrorMu.RLock() - err := p.lastDialError - p.lastDialErrorMu.RUnlock() - return err -} - -// Get returns existed connection from the pool or creates a new one. -func (p *ConnPool) Get() (*Conn, error) { - if p.closed() { - return nil, ErrClosed - } - - err := p.waitTurn() - if err != nil { - return nil, err - } - - for { - p.connsMu.Lock() - cn := p.popIdle() - p.connsMu.Unlock() - - if cn == nil { - break - } - - if p.isStaleConn(cn) { - _ = p.CloseConn(cn) - continue - } - - atomic.AddUint32(&p.stats.Hits, 1) - return cn, nil - } - - atomic.AddUint32(&p.stats.Misses, 1) - - newcn, err := p._NewConn(true) - if err != nil { - p.freeTurn() - return nil, err - } - - return newcn, nil -} - -func (p *ConnPool) getTurn() { - p.queue <- struct{}{} -} - -func (p *ConnPool) waitTurn() error { - select { - case p.queue <- struct{}{}: - return nil - default: - timer := timers.Get().(*time.Timer) - timer.Reset(p.opt.PoolTimeout) - - select { - case p.queue <- struct{}{}: - if !timer.Stop() { - <-timer.C - } - timers.Put(timer) - return nil - case <-timer.C: - timers.Put(timer) - atomic.AddUint32(&p.stats.Timeouts, 1) - return ErrPoolTimeout - } - } -} - -func (p *ConnPool) freeTurn() { - <-p.queue -} - -func (p *ConnPool) popIdle() *Conn { - if len(p.idleConns) == 0 { - return nil - } - - idx := len(p.idleConns) - 1 - cn := p.idleConns[idx] - p.idleConns = p.idleConns[:idx] - p.idleConnsLen-- - p.checkMinIdleConns() - return cn -} - -func (p *ConnPool) Put(cn *Conn) { - if !cn.pooled { - p.Remove(cn) - return - } - - p.connsMu.Lock() - p.idleConns = append(p.idleConns, cn) - p.idleConnsLen++ - p.connsMu.Unlock() - p.freeTurn() -} - -func (p *ConnPool) Remove(cn *Conn) { - p.removeConn(cn) - p.freeTurn() - _ = p.closeConn(cn) -} - -func (p *ConnPool) CloseConn(cn *Conn) error { - p.removeConn(cn) - return p.closeConn(cn) -} - -func (p *ConnPool) removeConn(cn *Conn) { - p.connsMu.Lock() - for i, c := range p.conns { - if c == cn { - p.conns = append(p.conns[:i], p.conns[i+1:]...) - if cn.pooled { - p.poolSize-- - p.checkMinIdleConns() - } - break - } - } - p.connsMu.Unlock() -} - -func (p *ConnPool) closeConn(cn *Conn) error { - if p.opt.OnClose != nil { - _ = p.opt.OnClose(cn) - } - return cn.Close() -} - -// Len returns total number of connections. -func (p *ConnPool) Len() int { - p.connsMu.Lock() - n := len(p.conns) - p.connsMu.Unlock() - return n -} - -// IdleLen returns number of idle connections. -func (p *ConnPool) IdleLen() int { - p.connsMu.Lock() - n := p.idleConnsLen - p.connsMu.Unlock() - return n -} - -func (p *ConnPool) Stats() *Stats { - idleLen := p.IdleLen() - return &Stats{ - Hits: atomic.LoadUint32(&p.stats.Hits), - Misses: atomic.LoadUint32(&p.stats.Misses), - Timeouts: atomic.LoadUint32(&p.stats.Timeouts), - - TotalConns: uint32(p.Len()), - IdleConns: uint32(idleLen), - StaleConns: atomic.LoadUint32(&p.stats.StaleConns), - } -} - -func (p *ConnPool) closed() bool { - return atomic.LoadUint32(&p._closed) == 1 -} - -func (p *ConnPool) Filter(fn func(*Conn) bool) error { - var firstErr error - p.connsMu.Lock() - for _, cn := range p.conns { - if fn(cn) { - if err := p.closeConn(cn); err != nil && firstErr == nil { - firstErr = err - } - } - } - p.connsMu.Unlock() - return firstErr -} - -func (p *ConnPool) Close() error { - if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) { - return ErrClosed - } - - var firstErr error - p.connsMu.Lock() - for _, cn := range p.conns { - if err := p.closeConn(cn); err != nil && firstErr == nil { - firstErr = err - } - } - p.conns = nil - p.poolSize = 0 - p.idleConns = nil - p.idleConnsLen = 0 - p.connsMu.Unlock() - - return firstErr -} - -func (p *ConnPool) reapStaleConn() *Conn { - if len(p.idleConns) == 0 { - return nil - } - - cn := p.idleConns[0] - if !p.isStaleConn(cn) { - return nil - } - - p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...) - p.idleConnsLen-- - - return cn -} - -func (p *ConnPool) ReapStaleConns() (int, error) { - var n int - for { - p.getTurn() - - p.connsMu.Lock() - cn := p.reapStaleConn() - p.connsMu.Unlock() - - if cn != nil { - p.removeConn(cn) - } - - p.freeTurn() - - if cn != nil { - p.closeConn(cn) - n++ - } else { - break - } - } - return n, nil -} - -func (p *ConnPool) reaper(frequency time.Duration) { - ticker := time.NewTicker(frequency) - defer ticker.Stop() - - for range ticker.C { - if p.closed() { - break - } - n, err := p.ReapStaleConns() - if err != nil { - internal.Logf("ReapStaleConns failed: %s", err) - continue - } - atomic.AddUint32(&p.stats.StaleConns, uint32(n)) - } -} - -func (p *ConnPool) isStaleConn(cn *Conn) bool { - if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 { - return false - } - - now := time.Now() - if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout { - return true - } - if p.opt.MaxConnAge > 0 && now.Sub(cn.InitedAt) >= p.opt.MaxConnAge { - return true - } - - return false -} diff --git a/vendor/github.com/go-pg/pg/internal/pool/pool_single.go b/vendor/github.com/go-pg/pg/internal/pool/pool_single.go deleted file mode 100644 index b35b78a..0000000 --- a/vendor/github.com/go-pg/pg/internal/pool/pool_single.go +++ /dev/null @@ -1,53 +0,0 @@ -package pool - -type SingleConnPool struct { - cn *Conn -} - -var _ Pooler = (*SingleConnPool)(nil) - -func NewSingleConnPool(cn *Conn) *SingleConnPool { - return &SingleConnPool{ - cn: cn, - } -} - -func (p *SingleConnPool) NewConn() (*Conn, error) { - panic("not implemented") -} - -func (p *SingleConnPool) CloseConn(*Conn) error { - panic("not implemented") -} - -func (p *SingleConnPool) Get() (*Conn, error) { - return p.cn, nil -} - -func (p *SingleConnPool) Put(cn *Conn) { - if p.cn != cn { - panic("p.cn != cn") - } -} - -func (p *SingleConnPool) Remove(cn *Conn) { - if p.cn != cn { - panic("p.cn != cn") - } -} - -func (p *SingleConnPool) Len() int { - return 1 -} - -func (p *SingleConnPool) IdleLen() int { - return 0 -} - -func (p *SingleConnPool) Stats() *Stats { - return nil -} - -func (p *SingleConnPool) Close() error { - return nil -} diff --git a/vendor/github.com/go-pg/pg/internal/pool/reader.go b/vendor/github.com/go-pg/pg/internal/pool/reader.go deleted file mode 100644 index 9eedd47..0000000 --- a/vendor/github.com/go-pg/pg/internal/pool/reader.go +++ /dev/null @@ -1,71 +0,0 @@ -package pool - -import ( - "encoding/binary" - - "github.com/go-pg/pg/internal" -) - -type Reader struct { - *ElasticBufReader - Columns [][]byte -} - -func NewReader(buf *ElasticBufReader) *Reader { - return &Reader{ElasticBufReader: buf} -} - -func (rd *Reader) ReadInt16() (int16, error) { - b, err := rd.ReadN(2) - if err != nil { - return 0, err - } - return int16(binary.BigEndian.Uint16(b)), nil -} - -func (rd *Reader) ReadInt32() (int32, error) { - b, err := rd.ReadN(4) - if err != nil { - return 0, err - } - return int32(binary.BigEndian.Uint32(b)), nil -} - -func (rd *Reader) ReadString() (string, error) { - b, err := rd.ReadSlice(0) - if err != nil { - return "", err - } - return string(b[:len(b)-1]), nil -} - -func (rd *Reader) ReadError() (error, error) { - m := make(map[byte]string) - for { - c, err := rd.ReadByte() - if err != nil { - return nil, err - } - if c == 0 { - break - } - s, err := rd.ReadString() - if err != nil { - return nil, err - } - m[c] = s - } - return internal.NewPGError(m), nil -} - -func (rd *Reader) ReadMessageType() (byte, int, error) { - c, err := rd.ReadByte() - if err != nil { - return 0, 0, err - } - l, err := rd.ReadInt32() - if err != nil { - return 0, 0, err - } - return c, int(l) - 4, nil -} diff --git a/vendor/github.com/go-pg/pg/internal/pool/write_buffer.go b/vendor/github.com/go-pg/pg/internal/pool/write_buffer.go deleted file mode 100644 index 71eb753..0000000 --- a/vendor/github.com/go-pg/pg/internal/pool/write_buffer.go +++ /dev/null @@ -1,96 +0,0 @@ -package pool - -import ( - "encoding/binary" - "io" -) - -type WriteBuffer struct { - Bytes []byte - - msgStart, paramStart int -} - -func NewWriteBuffer() *WriteBuffer { - return new(WriteBuffer) -} - -func (buf *WriteBuffer) Buffer() []byte { - return buf.Bytes[:cap(buf.Bytes)] -} - -func (buf *WriteBuffer) Reset() { - buf.Bytes = buf.Bytes[:0] -} - -func (buf *WriteBuffer) ResetBuffer(b []byte) { - buf.Bytes = b[:0] -} - -func (buf *WriteBuffer) StartMessage(c byte) { - if c == 0 { - buf.msgStart = len(buf.Bytes) - buf.Bytes = append(buf.Bytes, 0, 0, 0, 0) - } else { - buf.msgStart = len(buf.Bytes) + 1 - buf.Bytes = append(buf.Bytes, c, 0, 0, 0, 0) - } -} - -func (buf *WriteBuffer) FinishMessage() { - binary.BigEndian.PutUint32( - buf.Bytes[buf.msgStart:], uint32(len(buf.Bytes)-buf.msgStart)) -} - -func (buf *WriteBuffer) StartParam() { - buf.paramStart = len(buf.Bytes) - buf.Bytes = append(buf.Bytes, 0, 0, 0, 0) -} - -func (buf *WriteBuffer) FinishParam() { - binary.BigEndian.PutUint32( - buf.Bytes[buf.paramStart:], uint32(len(buf.Bytes)-buf.paramStart-4)) -} - -var nullParamLength = int32(-1) - -func (buf *WriteBuffer) FinishNullParam() { - binary.BigEndian.PutUint32( - buf.Bytes[buf.paramStart:], uint32(nullParamLength)) -} - -func (buf *WriteBuffer) Write(b []byte) (int, error) { - buf.Bytes = append(buf.Bytes, b...) - return len(b), nil -} - -func (buf *WriteBuffer) WriteInt16(num int16) { - buf.Bytes = append(buf.Bytes, 0, 0) - binary.BigEndian.PutUint16(buf.Bytes[len(buf.Bytes)-2:], uint16(num)) -} - -func (buf *WriteBuffer) WriteInt32(num int32) { - buf.Bytes = append(buf.Bytes, 0, 0, 0, 0) - binary.BigEndian.PutUint32(buf.Bytes[len(buf.Bytes)-4:], uint32(num)) -} - -func (buf *WriteBuffer) WriteString(s string) { - buf.Bytes = append(buf.Bytes, s...) - buf.Bytes = append(buf.Bytes, 0) -} - -func (buf *WriteBuffer) WriteBytes(b []byte) { - buf.Bytes = append(buf.Bytes, b...) - buf.Bytes = append(buf.Bytes, 0) -} - -func (buf *WriteBuffer) WriteByte(c byte) error { - buf.Bytes = append(buf.Bytes, c) - return nil -} - -func (buf *WriteBuffer) ReadFrom(r io.Reader) (int64, error) { - n, err := r.Read(buf.Bytes[len(buf.Bytes):cap(buf.Bytes)]) - buf.Bytes = buf.Bytes[:len(buf.Bytes)+int(n)] - return int64(n), err -} diff --git a/vendor/github.com/go-pg/pg/internal/safe.go b/vendor/github.com/go-pg/pg/internal/safe.go deleted file mode 100644 index 870fe54..0000000 --- a/vendor/github.com/go-pg/pg/internal/safe.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine - -package internal - -func BytesToString(b []byte) string { - return string(b) -} - -func StringToBytes(s string) []byte { - return []byte(s) -} diff --git a/vendor/github.com/go-pg/pg/internal/strconv.go b/vendor/github.com/go-pg/pg/internal/strconv.go deleted file mode 100644 index 9e42ffb..0000000 --- a/vendor/github.com/go-pg/pg/internal/strconv.go +++ /dev/null @@ -1,19 +0,0 @@ -package internal - -import "strconv" - -func Atoi(b []byte) (int, error) { - return strconv.Atoi(BytesToString(b)) -} - -func ParseInt(b []byte, base int, bitSize int) (int64, error) { - return strconv.ParseInt(BytesToString(b), base, bitSize) -} - -func ParseUint(b []byte, base int, bitSize int) (uint64, error) { - return strconv.ParseUint(BytesToString(b), base, bitSize) -} - -func ParseFloat(b []byte, bitSize int) (float64, error) { - return strconv.ParseFloat(BytesToString(b), bitSize) -} diff --git a/vendor/github.com/go-pg/pg/internal/underscore.go b/vendor/github.com/go-pg/pg/internal/underscore.go deleted file mode 100644 index e71c117..0000000 --- a/vendor/github.com/go-pg/pg/internal/underscore.go +++ /dev/null @@ -1,93 +0,0 @@ -package internal - -func IsUpper(c byte) bool { - return c >= 'A' && c <= 'Z' -} - -func IsLower(c byte) bool { - return c >= 'a' && c <= 'z' -} - -func ToUpper(c byte) byte { - return c - 32 -} - -func ToLower(c byte) byte { - return c + 32 -} - -// Underscore converts "CamelCasedString" to "camel_cased_string". -func Underscore(s string) string { - r := make([]byte, 0, len(s)+5) - for i := 0; i < len(s); i++ { - c := s[i] - if IsUpper(c) { - if i > 0 && i+1 < len(s) && (IsLower(s[i-1]) || IsLower(s[i+1])) { - r = append(r, '_', ToLower(c)) - } else { - r = append(r, ToLower(c)) - } - } else { - r = append(r, c) - } - } - return string(r) -} - -func CamelCased(s string) string { - r := make([]byte, 0, len(s)) - upperNext := true - for i := 0; i < len(s); i++ { - c := s[i] - if c == '_' { - upperNext = true - continue - } - if upperNext { - if IsLower(c) { - c = ToUpper(c) - } - upperNext = false - } - r = append(r, c) - } - return string(r) -} - -func ToExported(s string) string { - if len(s) == 0 { - return s - } - if c := s[0]; IsLower(c) { - b := []byte(s) - b[0] = ToUpper(c) - return string(b) - } - return s -} - -func UpperString(s string) string { - if isUpperString(s) { - return s - } - - b := make([]byte, len(s)) - for i := range b { - c := s[i] - if IsLower(c) { - c = ToUpper(c) - } - b[i] = c - } - return string(b) -} - -func isUpperString(s string) bool { - for i := 0; i < len(s); i++ { - c := s[i] - if IsLower(c) { - return false - } - } - return true -} diff --git a/vendor/github.com/go-pg/pg/internal/unsafe.go b/vendor/github.com/go-pg/pg/internal/unsafe.go deleted file mode 100644 index f8bc18d..0000000 --- a/vendor/github.com/go-pg/pg/internal/unsafe.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !appengine - -package internal - -import ( - "unsafe" -) - -// BytesToString converts byte slice to string. -func BytesToString(b []byte) string { - return *(*string)(unsafe.Pointer(&b)) -} - -// StringToBytes converts string to byte slice. -func StringToBytes(s string) []byte { - return *(*[]byte)(unsafe.Pointer( - &struct { - string - Cap int - }{s, len(s)}, - )) -} diff --git a/vendor/github.com/go-pg/pg/internal/util.go b/vendor/github.com/go-pg/pg/internal/util.go deleted file mode 100644 index 12ec119..0000000 --- a/vendor/github.com/go-pg/pg/internal/util.go +++ /dev/null @@ -1,36 +0,0 @@ -package internal - -import "reflect" - -func MakeSliceNextElemFunc(v reflect.Value) func() reflect.Value { - elemType := v.Type().Elem() - - if elemType.Kind() == reflect.Ptr { - elemType = elemType.Elem() - return func() reflect.Value { - if v.Len() < v.Cap() { - v.Set(v.Slice(0, v.Len()+1)) - elem := v.Index(v.Len() - 1) - if elem.IsNil() { - elem.Set(reflect.New(elemType)) - } - return elem.Elem() - } - - elem := reflect.New(elemType) - v.Set(reflect.Append(v, elem)) - return elem.Elem() - } - } - - zero := reflect.Zero(elemType) - return func() reflect.Value { - if v.Len() < v.Cap() { - v.Set(v.Slice(0, v.Len()+1)) - return v.Index(v.Len() - 1) - } - - v.Set(reflect.Append(v, zero)) - return v.Index(v.Len() - 1) - } -} diff --git a/vendor/github.com/go-pg/pg/listener.go b/vendor/github.com/go-pg/pg/listener.go deleted file mode 100644 index 2eade2b..0000000 --- a/vendor/github.com/go-pg/pg/listener.go +++ /dev/null @@ -1,316 +0,0 @@ -package pg - -import ( - "errors" - "sync" - "time" - - "github.com/go-pg/pg/internal" - "github.com/go-pg/pg/internal/pool" - "github.com/go-pg/pg/types" -) - -const gopgChannel = "gopg:ping" - -var errListenerClosed = errors.New("pg: listener is closed") - -// A notification received with LISTEN command. -type Notification struct { - Channel string - Payload string -} - -// Listener listens for notifications sent with NOTIFY command. -// It's NOT safe for concurrent use by multiple goroutines -// except the Channel API. -type Listener struct { - db *DB - - channels []string - - mu sync.Mutex - cn *pool.Conn - closed bool - exit chan struct{} - - chOnce sync.Once - ch chan *Notification - pingCh chan struct{} -} - -func (ln *Listener) init() { - ln.exit = make(chan struct{}) -} - -func (ln *Listener) conn() (*pool.Conn, error) { - ln.mu.Lock() - cn, err := ln._conn() - ln.mu.Unlock() - - switch err { - case nil: - return cn, nil - case errListenerClosed: - return nil, err - case pool.ErrClosed: - _ = ln.Close() - return nil, errListenerClosed - default: - internal.Logf("pg: Listen failed: %s", err) - return nil, err - } -} - -func (ln *Listener) _conn() (*pool.Conn, error) { - if ln.closed { - return nil, errListenerClosed - } - - if ln.cn != nil { - return ln.cn, nil - } - - cn, err := ln.db.pool.NewConn() - if err != nil { - return nil, err - } - cn.LockReaderBuffer() - - if cn.InitedAt.IsZero() { - err := ln.db.initConn(cn) - if err != nil { - _ = ln.db.pool.CloseConn(cn) - return nil, err - } - cn.InitedAt = time.Now() - } - - if len(ln.channels) > 0 { - err := ln.listen(cn, ln.channels...) - if err != nil { - _ = ln.db.pool.CloseConn(cn) - return nil, err - } - } - - ln.cn = cn - return cn, nil -} - -func (ln *Listener) releaseConn(cn *pool.Conn, err error, allowTimeout bool) { - ln.mu.Lock() - if ln.cn == cn { - if isBadConn(err, allowTimeout) { - ln._reconnect(err) - } - } - ln.mu.Unlock() -} - -func (ln *Listener) _closeTheCn(reason error) error { - if ln.cn == nil { - return nil - } - if !ln.closed { - internal.Logf("pg: discarding bad listener connection: %s", reason) - } - - err := ln.db.pool.CloseConn(ln.cn) - ln.cn = nil - return err -} - -func (ln *Listener) _reconnect(reason error) { - _ = ln._closeTheCn(reason) - _, _ = ln._conn() -} - -// Close closes the listener, releasing any open resources. -func (ln *Listener) Close() error { - ln.mu.Lock() - defer ln.mu.Unlock() - - if ln.closed { - return errListenerClosed - } - ln.closed = true - close(ln.exit) - - return ln._closeTheCn(errListenerClosed) -} - -// Listen starts listening for notifications on channels. -func (ln *Listener) Listen(channels ...string) error { - cn, err := ln.conn() - if err != nil { - return err - } - - err = ln.listen(cn, channels...) - if err != nil { - ln.releaseConn(cn, err, false) - return err - } - - ln.channels = appendIfNotExists(ln.channels, channels...) - return nil -} - -func (ln *Listener) listen(cn *pool.Conn, channels ...string) error { - err := cn.WithWriter(ln.db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - for _, channel := range channels { - err := writeQueryMsg(wb, ln.db, "LISTEN ?", pgChan(channel)) - if err != nil { - return err - } - } - return nil - }) - return err -} - -// Receive indefinitely waits for a notification. This is low-level API -// and in most cases Channel should be used instead. -func (ln *Listener) Receive() (channel string, payload string, err error) { - return ln.ReceiveTimeout(0) -} - -// ReceiveTimeout waits for a notification until timeout is reached. -// This is low-level API and in most cases Channel should be used instead. -func (ln *Listener) ReceiveTimeout(timeout time.Duration) (channel, payload string, err error) { - cn, err := ln.conn() - if err != nil { - return "", "", err - } - - err = cn.WithReader(timeout, func(rd *pool.Reader) error { - channel, payload, err = readNotification(rd) - return err - }) - if err != nil { - ln.releaseConn(cn, err, timeout > 0) - return "", "", err - } - - return channel, payload, nil -} - -// Channel returns a channel for concurrently receiving notifications. -// It periodically sends Ping messages to test connection health. -// -// The channel is closed with Listener. Receive* APIs can not be used -// after channel is created. -func (ln *Listener) Channel() <-chan *Notification { - ln.chOnce.Do(ln.initChannel) - return ln.ch -} - -func (ln *Listener) initChannel() { - _ = ln.Listen(gopgChannel) - - ln.ch = make(chan *Notification, 100) - ln.pingCh = make(chan struct{}, 10) - - go func() { - var errCount int - for { - channel, payload, err := ln.Receive() - if err != nil { - if err == errListenerClosed { - close(ln.ch) - return - } - if errCount > 0 { - time.Sleep(ln.db.retryBackoff(errCount)) - } - errCount++ - continue - } - errCount = 0 - - // Any message is as good as a ping. - select { - case ln.pingCh <- struct{}{}: - default: - } - - switch channel { - case gopgChannel: - // ignore - default: - ln.ch <- &Notification{channel, payload} - } - } - }() - - go func() { - const timeout = 5 * time.Second - - timer := time.NewTimer(timeout) - timer.Stop() - - healthy := true - var pingErr error - for { - timer.Reset(timeout) - select { - case <-ln.pingCh: - healthy = true - if !timer.Stop() { - <-timer.C - } - case <-timer.C: - pingErr = ln.ping() - if healthy { - healthy = false - } else { - ln.mu.Lock() - ln._reconnect(pingErr) - ln.mu.Unlock() - } - case <-ln.exit: - return - } - } - }() -} - -func (ln *Listener) ping() error { - _, err := ln.db.Exec("NOTIFY ?", pgChan(gopgChannel)) - return err -} - -func appendIfNotExists(ss []string, es ...string) []string { -loop: - for _, e := range es { - for _, s := range ss { - if s == e { - continue loop - } - } - ss = append(ss, e) - } - return ss -} - -type pgChan string - -var _ types.ValueAppender = pgChan("") - -func (ch pgChan) AppendValue(b []byte, quote int) []byte { - if quote == 0 { - return append(b, ch...) - } - - b = append(b, '"') - for _, c := range []byte(ch) { - if c == '"' { - b = append(b, '"', '"') - } else { - b = append(b, c) - } - } - b = append(b, '"') - - return b -} diff --git a/vendor/github.com/go-pg/pg/messages.go b/vendor/github.com/go-pg/pg/messages.go deleted file mode 100644 index 9d71e97..0000000 --- a/vendor/github.com/go-pg/pg/messages.go +++ /dev/null @@ -1,1093 +0,0 @@ -package pg - -import ( - "crypto/md5" - "crypto/tls" - "encoding/hex" - "errors" - "fmt" - "io" - - "github.com/go-pg/pg/internal" - "github.com/go-pg/pg/internal/pool" - "github.com/go-pg/pg/orm" - "github.com/go-pg/pg/types" -) - -const ( - commandCompleteMsg = 'C' - errorResponseMsg = 'E' - noticeResponseMsg = 'N' - parameterStatusMsg = 'S' - authenticationOKMsg = 'R' - backendKeyDataMsg = 'K' - noDataMsg = 'n' - passwordMessageMsg = 'p' - terminateMsg = 'X' - - authenticationOK = 0 - authenticationCleartextPassword = 3 - authenticationMD5Password = 5 - authenticationSASL = 10 - - notificationResponseMsg = 'A' - - describeMsg = 'D' - parameterDescriptionMsg = 't' - - queryMsg = 'Q' - readyForQueryMsg = 'Z' - emptyQueryResponseMsg = 'I' - rowDescriptionMsg = 'T' - dataRowMsg = 'D' - - parseMsg = 'P' - parseCompleteMsg = '1' - - bindMsg = 'B' - bindCompleteMsg = '2' - - executeMsg = 'E' - - syncMsg = 'S' - flushMsg = 'H' - - closeMsg = 'C' - closeCompleteMsg = '3' - - copyInResponseMsg = 'G' - copyOutResponseMsg = 'H' - copyDataMsg = 'd' - copyDoneMsg = 'c' -) - -var errEmptyQuery = internal.Errorf("pg: query is empty") - -func (db *DB) startup(cn *pool.Conn, user, password, database, appName string) error { - err := cn.WithWriter(db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - writeStartupMsg(wb, user, database, appName) - return nil - }) - if err != nil { - return err - } - - return cn.WithReader(db.opt.ReadTimeout, func(rd *pool.Reader) error { - for { - c, msgLen, err := rd.ReadMessageType() - if err != nil { - return err - } - - switch c { - case backendKeyDataMsg: - processId, err := rd.ReadInt32() - if err != nil { - return err - } - secretKey, err := rd.ReadInt32() - if err != nil { - return err - } - cn.ProcessId = processId - cn.SecretKey = secretKey - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return err - } - case authenticationOKMsg: - err := db.auth(cn, rd, user, password) - if err != nil { - return err - } - case readyForQueryMsg: - _, err := rd.ReadN(msgLen) - return err - case errorResponseMsg: - e, err := rd.ReadError() - if err != nil { - return err - } - return e - default: - return fmt.Errorf("pg: unknown startup message response: %q", c) - } - } - }) -} - -var errSSLNotSupported = errors.New("pg: SSL is not enabled on the server") - -func (db *DB) enableSSL(cn *pool.Conn, tlsConf *tls.Config) error { - err := cn.WithWriter(db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - writeSSLMsg(wb) - return nil - }) - if err != nil { - return err - } - - err = cn.WithReader(db.opt.ReadTimeout, func(rd *pool.Reader) error { - c, err := rd.ReadByte() - if err != nil { - return err - } - if c != 'S' { - return errSSLNotSupported - } - return nil - }) - if err != nil { - return err - } - - cn.SetNetConn(tls.Client(cn.NetConn(), tlsConf)) - return nil -} - -func (db *DB) auth(cn *pool.Conn, rd *pool.Reader, user, password string) error { - num, err := rd.ReadInt32() - if err != nil { - return err - } - - switch num { - case authenticationOK: - return nil - case authenticationCleartextPassword: - return db.authCleartext(cn, rd, password) - case authenticationMD5Password: - return db.authMD5(cn, rd, user, password) - case authenticationSASL: - return db.authSASL(cn, rd, user, password) - default: - return fmt.Errorf("pg: unknown authentication message response: %d", num) - } -} - -func (db *DB) authCleartext(cn *pool.Conn, rd *pool.Reader, password string) error { - err := cn.WithWriter(db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - writePasswordMsg(wb, password) - return nil - }) - if err != nil { - return err - } - - c, _, err := rd.ReadMessageType() - if err != nil { - return err - } - - switch c { - case authenticationOKMsg: - code, err := rd.ReadInt32() - if err != nil { - return err - } - if code != 0 { - return fmt.Errorf("pg: unexpected authentication code: %d", code) - } - return nil - case errorResponseMsg: - e, err := rd.ReadError() - if err != nil { - return err - } - return e - default: - return fmt.Errorf("pg: unknown password message response: %q", c) - } -} - -func (db *DB) authMD5(cn *pool.Conn, rd *pool.Reader, user, password string) error { - b, err := rd.ReadN(4) - if err != nil { - return err - } - - secret := "md5" + md5s(md5s(password+user)+string(b)) - err = cn.WithWriter(db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - writePasswordMsg(wb, secret) - return nil - }) - if err != nil { - return err - } - - c, _, err := rd.ReadMessageType() - if err != nil { - return err - } - switch c { - case authenticationOKMsg: - code, err := rd.ReadInt32() - if err != nil { - return err - } - if code != 0 { - return fmt.Errorf("pg: unexpected authentication code: %d", code) - } - return nil - case errorResponseMsg: - e, err := rd.ReadError() - if err != nil { - return err - } - return e - default: - return fmt.Errorf("pg: unknown password message response: %q", c) - } -} - -func (db *DB) authSASL(cn *pool.Conn, rd *pool.Reader, user, password string) error { - return fmt.Errorf("pg: SASL authentication is not supported") -} - -func md5s(s string) string { - h := md5.New() - h.Write([]byte(s)) - return hex.EncodeToString(h.Sum(nil)) -} - -func writeStartupMsg(buf *pool.WriteBuffer, user, database, appName string) { - buf.StartMessage(0) - buf.WriteInt32(196608) - buf.WriteString("user") - buf.WriteString(user) - buf.WriteString("database") - buf.WriteString(database) - if appName != "" { - buf.WriteString("application_name") - buf.WriteString(appName) - } - buf.WriteString("") - buf.FinishMessage() -} - -func writeSSLMsg(buf *pool.WriteBuffer) { - buf.StartMessage(0) - buf.WriteInt32(80877103) - buf.FinishMessage() -} - -func writePasswordMsg(buf *pool.WriteBuffer, password string) { - buf.StartMessage(passwordMessageMsg) - buf.WriteString(password) - buf.FinishMessage() -} - -func writeFlushMsg(buf *pool.WriteBuffer) { - buf.StartMessage(flushMsg) - buf.FinishMessage() -} - -func writeCancelRequestMsg(buf *pool.WriteBuffer, processId, secretKey int32) { - buf.StartMessage(0) - buf.WriteInt32(80877102) - buf.WriteInt32(processId) - buf.WriteInt32(secretKey) - buf.FinishMessage() -} - -func writeQueryMsg(buf *pool.WriteBuffer, fmter orm.QueryFormatter, query interface{}, params ...interface{}) error { - buf.StartMessage(queryMsg) - bytes, err := appendQuery(buf.Bytes, fmter, query, params...) - if err != nil { - buf.Reset() - return err - } - buf.Bytes = bytes - buf.WriteByte(0x0) - buf.FinishMessage() - return nil -} - -func appendQuery(dst []byte, fmter orm.QueryFormatter, query interface{}, params ...interface{}) ([]byte, error) { - switch query := query.(type) { - case orm.QueryAppender: - return query.AppendQuery(dst) - case string: - return fmter.FormatQuery(dst, query, params...), nil - default: - return nil, fmt.Errorf("pg: can't append %T", query) - } -} - -func writeSyncMsg(buf *pool.WriteBuffer) { - buf.StartMessage(syncMsg) - buf.FinishMessage() -} - -func writeParseDescribeSyncMsg(buf *pool.WriteBuffer, name, q string) { - buf.StartMessage(parseMsg) - buf.WriteString(name) - buf.WriteString(q) - buf.WriteInt16(0) - buf.FinishMessage() - - buf.StartMessage(describeMsg) - buf.WriteByte('S') - buf.WriteString(name) - buf.FinishMessage() - - writeSyncMsg(buf) -} - -func readParseDescribeSync(rd *pool.Reader) ([][]byte, error) { - var columns [][]byte - var firstErr error - for { - c, msgLen, err := rd.ReadMessageType() - if err != nil { - return nil, err - } - switch c { - case parseCompleteMsg: - _, err = rd.ReadN(msgLen) - if err != nil { - return nil, err - } - case rowDescriptionMsg: // Response to the DESCRIBE message. - columns, err = readRowDescription(rd, nil) - if err != nil { - return nil, err - } - case parameterDescriptionMsg: // Response to the DESCRIBE message. - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - case noDataMsg: // Response to the DESCRIBE message. - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - case readyForQueryMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if firstErr != nil { - return nil, firstErr - } - return columns, err - case errorResponseMsg: - e, err := rd.ReadError() - if err != nil { - return nil, err - } - if firstErr == nil { - firstErr = e - } - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return nil, err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("pg: readParseDescribeSync: unexpected message %#x", c) - } - } -} - -// Writes BIND, EXECUTE and SYNC messages. -func writeBindExecuteMsg(buf *pool.WriteBuffer, name string, params ...interface{}) error { - buf.StartMessage(bindMsg) - buf.WriteString("") - buf.WriteString(name) - buf.WriteInt16(0) - buf.WriteInt16(int16(len(params))) - for _, param := range params { - buf.StartParam() - bytes := types.Append(buf.Bytes, param, 0) - if bytes != nil { - buf.Bytes = bytes - buf.FinishParam() - } else { - buf.FinishNullParam() - } - } - buf.WriteInt16(0) - buf.FinishMessage() - - buf.StartMessage(executeMsg) - buf.WriteString("") - buf.WriteInt32(0) - buf.FinishMessage() - - writeSyncMsg(buf) - - return nil -} - -func writeCloseMsg(buf *pool.WriteBuffer, name string) { - buf.StartMessage(closeMsg) - buf.WriteByte('S') - buf.WriteString(name) - buf.FinishMessage() -} - -func readCloseCompleteMsg(rd *pool.Reader) error { - for { - c, msgLen, err := rd.ReadMessageType() - if err != nil { - return err - } - switch c { - case closeCompleteMsg: - _, err := rd.ReadN(msgLen) - return err - case errorResponseMsg: - e, err := rd.ReadError() - if err != nil { - return err - } - return e - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return err - } - default: - return fmt.Errorf("pg: readCloseCompleteMsg: unexpected message %#x", c) - } - } -} - -func readSimpleQuery(rd *pool.Reader) (*result, error) { - var res result - var firstErr error - for { - c, msgLen, err := rd.ReadMessageType() - if err != nil { - return nil, err - } - - switch c { - case commandCompleteMsg: - b, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if err := res.parse(b); err != nil && firstErr == nil { - firstErr = err - } - case readyForQueryMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if firstErr != nil { - return nil, firstErr - } - return &res, nil - case rowDescriptionMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - case dataRowMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - res.returned++ - case errorResponseMsg: - e, err := rd.ReadError() - if err != nil { - return nil, err - } - if firstErr == nil { - firstErr = e - } - case emptyQueryResponseMsg: - if firstErr == nil { - firstErr = errEmptyQuery - } - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return nil, err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("pg: readSimpleQuery: unexpected message %#x", c) - } - } -} - -func readExtQuery(rd *pool.Reader) (*result, error) { - var res result - var firstErr error - for { - c, msgLen, err := rd.ReadMessageType() - if err != nil { - return nil, err - } - - switch c { - case bindCompleteMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - case dataRowMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - res.returned++ - case commandCompleteMsg: // Response to the EXECUTE message. - b, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if err := res.parse(b); err != nil && firstErr == nil { - firstErr = err - } - case readyForQueryMsg: // Response to the SYNC message. - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if firstErr != nil { - return nil, firstErr - } - return &res, nil - case errorResponseMsg: - e, err := rd.ReadError() - if err != nil { - return nil, err - } - if firstErr == nil { - firstErr = e - } - case emptyQueryResponseMsg: - if firstErr == nil { - firstErr = errEmptyQuery - } - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return nil, err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("pg: readExtQuery: unexpected message %#x", c) - } - } -} - -func readRowDescription(rd *pool.Reader, columns [][]byte) ([][]byte, error) { - colNum, err := rd.ReadInt16() - if err != nil { - return nil, err - } - - columns = setByteSliceLen(columns, int(colNum)) - for i := 0; i < int(colNum); i++ { - b, err := rd.ReadSlice(0) - if err != nil { - return nil, err - } - columns[i] = append(columns[i][:0], b[:len(b)-1]...) - - _, err = rd.ReadN(18) - if err != nil { - return nil, err - } - } - - return columns, nil -} - -func setByteSliceLen(b [][]byte, n int) [][]byte { - if n <= cap(b) { - return b[:n] - } - b = b[:cap(b)] - b = append(b, make([][]byte, n-cap(b))...) - return b -} - -func readDataRow(rd *pool.Reader, scanner orm.ColumnScanner, columns [][]byte) error { - colNum, err := rd.ReadInt16() - if err != nil { - return err - } - - var firstErr error - for colIdx := int16(0); colIdx < colNum; colIdx++ { - l, err := rd.ReadInt32() - if err != nil { - return err - } - - var b []byte - if l != -1 { // NULL - b, err = rd.ReadN(int(l)) - if err != nil { - return err - } - } - - column := internal.BytesToString(columns[colIdx]) - err = scanner.ScanColumn(int(colIdx), column, b) - if err != nil && firstErr == nil { - firstErr = internal.Errorf(err.Error()) - } - - } - - return firstErr -} - -func newModel(mod interface{}) (orm.Model, error) { - m, err := orm.NewModel(mod) - if err != nil { - return nil, err - } - return m, m.Init() -} - -func readSimpleQueryData(rd *pool.Reader, mod interface{}) (*result, error) { - var res result - var firstErr error - for { - c, msgLen, err := rd.ReadMessageType() - if err != nil { - return nil, err - } - - switch c { - case rowDescriptionMsg: - rd.Columns, err = readRowDescription(rd, rd.Columns[:0]) - if err != nil { - return nil, err - } - - if res.model == nil { - var err error - res.model, err = newModel(mod) - if err != nil { - if firstErr == nil { - firstErr = err - } - res.model = Discard - } - } - case dataRowMsg: - m := res.model.NewModel() - if err := readDataRow(rd, m, rd.Columns); err != nil { - if firstErr == nil { - firstErr = err - } - } else if err := res.model.AddModel(m); err != nil { - if firstErr == nil { - firstErr = err - } - } - - res.returned++ - case commandCompleteMsg: - b, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if err := res.parse(b); err != nil && firstErr == nil { - firstErr = err - } - case readyForQueryMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if firstErr != nil { - return nil, firstErr - } - return &res, nil - case errorResponseMsg: - e, err := rd.ReadError() - if err != nil { - return nil, err - } - if firstErr == nil { - firstErr = e - } - case emptyQueryResponseMsg: - if firstErr == nil { - firstErr = errEmptyQuery - } - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return nil, err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("pg: readSimpleQueryData: unexpected message %#x", c) - } - } -} - -func readExtQueryData(rd *pool.Reader, mod interface{}, columns [][]byte) (*result, error) { - var res result - var firstErr error - for { - c, msgLen, err := rd.ReadMessageType() - if err != nil { - return nil, err - } - - switch c { - case bindCompleteMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - case dataRowMsg: - if res.model == nil { - var err error - res.model, err = newModel(mod) - if err != nil { - if firstErr == nil { - firstErr = err - } - res.model = Discard - } - } - - m := res.model.NewModel() - if err := readDataRow(rd, m, columns); err != nil { - if firstErr == nil { - firstErr = err - } - } else if err := res.model.AddModel(m); err != nil { - if firstErr == nil { - firstErr = err - } - } - - res.returned++ - case commandCompleteMsg: // Response to the EXECUTE message. - b, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if err := res.parse(b); err != nil && firstErr == nil { - firstErr = err - } - case readyForQueryMsg: // Response to the SYNC message. - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if firstErr != nil { - return nil, firstErr - } - return &res, nil - case errorResponseMsg: - e, err := rd.ReadError() - if err != nil { - return nil, err - } - if firstErr == nil { - firstErr = e - } - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return nil, err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("pg: readExtQueryData: unexpected message %#x", c) - } - } -} - -func readCopyInResponse(rd *pool.Reader) error { - var firstErr error - for { - c, msgLen, err := rd.ReadMessageType() - if err != nil { - return err - } - - switch c { - case copyInResponseMsg: - _, err := rd.ReadN(msgLen) - return err - case errorResponseMsg: - e, err := rd.ReadError() - if err != nil { - return err - } - if firstErr == nil { - firstErr = e - } - case readyForQueryMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return err - } - return firstErr - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return err - } - default: - return fmt.Errorf("pg: readCopyInResponse: unexpected message %#x", c) - } - } -} - -func readCopyOutResponse(rd *pool.Reader) error { - var firstErr error - for { - c, msgLen, err := rd.ReadMessageType() - if err != nil { - return err - } - - switch c { - case copyOutResponseMsg: - _, err := rd.ReadN(msgLen) - return err - case errorResponseMsg: - e, err := rd.ReadError() - if err != nil { - return err - } - if firstErr == nil { - firstErr = e - } - case readyForQueryMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return err - } - return firstErr - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return err - } - default: - return fmt.Errorf("pg: readCopyOutResponse: unexpected message %#x", c) - } - } -} - -func readCopyData(rd *pool.Reader, w io.Writer) (*result, error) { - var res result - var firstErr error - for { - c, msgLen, err := rd.ReadMessageType() - if err != nil { - return nil, err - } - - switch c { - case copyDataMsg: - b, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - - _, err = w.Write(b) - if err != nil { - return nil, err - } - case copyDoneMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - case commandCompleteMsg: - b, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if err := res.parse(b); err != nil && firstErr == nil { - firstErr = err - } - case readyForQueryMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if firstErr != nil { - return nil, firstErr - } - return &res, nil - case errorResponseMsg: - e, err := rd.ReadError() - if err != nil { - return nil, err - } - return nil, e - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return nil, err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("pg: readCopyData: unexpected message %#x", c) - } - } -} - -func writeCopyData(buf *pool.WriteBuffer, r io.Reader) error { - buf.StartMessage(copyDataMsg) - _, err := buf.ReadFrom(r) - buf.FinishMessage() - return err -} - -func writeCopyDone(buf *pool.WriteBuffer) { - buf.StartMessage(copyDoneMsg) - buf.FinishMessage() -} - -func readReadyForQuery(rd *pool.Reader) (*result, error) { - var res result - var firstErr error - for { - c, msgLen, err := rd.ReadMessageType() - if err != nil { - return nil, err - } - - switch c { - case commandCompleteMsg: - b, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if err := res.parse(b); err != nil && firstErr == nil { - firstErr = err - } - case readyForQueryMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if firstErr != nil { - return nil, firstErr - } - return &res, nil - case errorResponseMsg: - e, err := rd.ReadError() - if err != nil { - return nil, err - } - if firstErr == nil { - firstErr = e - } - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return nil, err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("pg: readReadyForQueryOrError: unexpected message %#x", c) - } - } -} - -func readNotification(rd *pool.Reader) (channel, payload string, err error) { - for { - c, msgLen, err := rd.ReadMessageType() - if err != nil { - return "", "", err - } - - switch c { - case commandCompleteMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return "", "", err - } - case readyForQueryMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return "", "", err - } - case errorResponseMsg: - e, err := rd.ReadError() - if err != nil { - return "", "", err - } - return "", "", e - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return "", "", err - } - case notificationResponseMsg: - _, err := rd.ReadInt32() - if err != nil { - return "", "", err - } - channel, err = rd.ReadString() - if err != nil { - return "", "", err - } - payload, err = rd.ReadString() - if err != nil { - return "", "", err - } - return channel, payload, nil - default: - return "", "", fmt.Errorf("pg: unexpected message %q", c) - } - } -} - -var terminateMessage = []byte{terminateMsg, 0, 0, 0, 4} - -func terminateConn(cn *pool.Conn) error { - // Don't use cn.Buf because it is racy with user code. - _, err := cn.NetConn().Write(terminateMessage) - return err -} - -//------------------------------------------------------------------------------ - -func logNotice(rd *pool.Reader, msgLen int) error { - _, err := rd.ReadN(msgLen) - return err -} - -func logParameterStatus(rd *pool.Reader, msgLen int) error { - _, err := rd.ReadN(msgLen) - return err -} diff --git a/vendor/github.com/go-pg/pg/options.go b/vendor/github.com/go-pg/pg/options.go deleted file mode 100644 index 11e2497..0000000 --- a/vendor/github.com/go-pg/pg/options.go +++ /dev/null @@ -1,242 +0,0 @@ -package pg - -import ( - "crypto/tls" - "errors" - "fmt" - "net" - "net/url" - "runtime" - "strings" - "time" - - "github.com/go-pg/pg/internal/pool" -) - -// Database connection options. -type Options struct { - // Network type, either tcp or unix. - // Default is tcp. - Network string - // TCP host:port or Unix socket depending on Network. - Addr string - - // Dialer creates new network connection and has priority over - // Network and Addr options. - Dialer func(network, addr string) (net.Conn, error) - - // Hook that is called when new connection is established. - OnConnect func(*DB) error - - User string - Password string - Database string - - // ApplicationName is the application name. Used in logs on Pg side. - // Only availaible from pg-9.0. - ApplicationName string - - // TLS config for secure connections. - TLSConfig *tls.Config - - // Maximum number of retries before giving up. - // Default is to not retry failed queries. - MaxRetries int - // Whether to retry queries cancelled because of statement_timeout. - RetryStatementTimeout bool - // Minimum backoff between each retry. - // Default is 250 milliseconds; -1 disables backoff. - MinRetryBackoff time.Duration - // Maximum backoff between each retry. - // Default is 4 seconds; -1 disables backoff. - MaxRetryBackoff time.Duration - - // Dial timeout for establishing new connections. - // Default is 5 seconds. - DialTimeout time.Duration - - // Timeout for socket reads. If reached, commands will fail - // with a timeout instead of blocking. - ReadTimeout time.Duration - // Timeout for socket writes. If reached, commands will fail - // with a timeout instead of blocking. - WriteTimeout time.Duration - - // Maximum number of socket connections. - // Default is 10 connections per every CPU as reported by runtime.NumCPU. - PoolSize int - // Minimum number of idle connections which is useful when establishing - // new connection is slow. - MinIdleConns int - // Connection age at which client retires (closes) the connection. - // It is useful with proxies like PgBouncer and HAProxy. - // Default is to not close aged connections. - MaxConnAge time.Duration - // Time for which client waits for free connection if all - // connections are busy before returning an error. - // Default is 30 seconds if ReadTimeOut is not defined, otherwise, - // ReadTimeout + 1 second. - PoolTimeout time.Duration - // Amount of time after which client closes idle connections. - // Should be less than server's timeout. - // Default is 5 minutes. -1 disables idle timeout check. - IdleTimeout time.Duration - // Frequency of idle checks made by idle connections reaper. - // Default is 1 minute. -1 disables idle connections reaper, - // but idle connections are still discarded by the client - // if IdleTimeout is set. - IdleCheckFrequency time.Duration -} - -func (opt *Options) init() { - if opt.Network == "" { - opt.Network = "tcp" - } - - if opt.Addr == "" { - switch opt.Network { - case "tcp": - opt.Addr = "localhost:5432" - case "unix": - opt.Addr = "/var/run/postgresql/.s.PGSQL.5432" - } - } - - if opt.PoolSize == 0 { - opt.PoolSize = 10 * runtime.NumCPU() - } - - if opt.PoolTimeout == 0 { - if opt.ReadTimeout != 0 { - opt.PoolTimeout = opt.ReadTimeout + time.Second - } else { - opt.PoolTimeout = 30 * time.Second - } - } - - if opt.DialTimeout == 0 { - opt.DialTimeout = 5 * time.Second - } - - if opt.IdleTimeout == 0 { - opt.IdleTimeout = 5 * time.Minute - } - if opt.IdleCheckFrequency == 0 { - opt.IdleCheckFrequency = time.Minute - } - - switch opt.MinRetryBackoff { - case -1: - opt.MinRetryBackoff = 0 - case 0: - opt.MinRetryBackoff = 250 * time.Millisecond - } - switch opt.MaxRetryBackoff { - case -1: - opt.MaxRetryBackoff = 0 - case 0: - opt.MaxRetryBackoff = 4 * time.Second - } -} - -// ParseURL parses an URL into options that can be used to connect to PostgreSQL. -func ParseURL(sURL string) (*Options, error) { - parsedUrl, err := url.Parse(sURL) - if err != nil { - return nil, err - } - - // scheme - if parsedUrl.Scheme != "postgres" && parsedUrl.Scheme != "postgresql" { - return nil, errors.New("pg: invalid scheme: " + parsedUrl.Scheme) - } - - // host and port - options := &Options{ - Addr: parsedUrl.Host, - } - if !strings.Contains(options.Addr, ":") { - options.Addr = options.Addr + ":5432" - } - - // username and password - if parsedUrl.User != nil { - options.User = parsedUrl.User.Username() - - if password, ok := parsedUrl.User.Password(); ok { - options.Password = password - } - } - - if options.User == "" { - options.User = "postgres" - } - - // database - if len(strings.Trim(parsedUrl.Path, "/")) > 0 { - options.Database = parsedUrl.Path[1:] - } else { - return nil, errors.New("pg: database name not provided") - } - - // ssl mode - query, err := url.ParseQuery(parsedUrl.RawQuery) - if err != nil { - return nil, err - } - - if sslMode, ok := query["sslmode"]; ok && len(sslMode) > 0 { - switch sslMode[0] { - case "allow", "prefer", "require": - options.TLSConfig = &tls.Config{InsecureSkipVerify: true} - case "disable": - options.TLSConfig = nil - default: - return nil, errors.New(fmt.Sprintf("pg: sslmode '%v' is not supported", sslMode[0])) - } - } else { - options.TLSConfig = &tls.Config{InsecureSkipVerify: true} - } - - delete(query, "sslmode") - - if appName, ok := query["application_name"]; ok && len(appName) > 0 { - options.ApplicationName = appName[0] - } - - delete(query, "application_name") - - if len(query) > 0 { - return nil, errors.New("pg: options other than 'sslmode' and 'application_name' are not supported") - } - - return options, nil -} - -func (opt *Options) getDialer() func() (net.Conn, error) { - if opt.Dialer != nil { - return func() (net.Conn, error) { - return opt.Dialer(opt.Network, opt.Addr) - } - } - return func() (net.Conn, error) { - netDialer := &net.Dialer{ - Timeout: opt.DialTimeout, - KeepAlive: 5 * time.Minute, - } - return netDialer.Dial(opt.Network, opt.Addr) - } -} - -func newConnPool(opt *Options) *pool.ConnPool { - return pool.NewConnPool(&pool.Options{ - Dialer: opt.getDialer(), - PoolSize: opt.PoolSize, - PoolTimeout: opt.PoolTimeout, - IdleTimeout: opt.IdleTimeout, - IdleCheckFrequency: opt.IdleCheckFrequency, - OnClose: func(cn *pool.Conn) error { - return terminateConn(cn) - }, - }) -} diff --git a/vendor/github.com/go-pg/pg/orm/composite.go b/vendor/github.com/go-pg/pg/orm/composite.go deleted file mode 100644 index 45a11ff..0000000 --- a/vendor/github.com/go-pg/pg/orm/composite.go +++ /dev/null @@ -1,56 +0,0 @@ -package orm - -import ( - "fmt" - "reflect" - - "github.com/go-pg/pg/internal/parser" - "github.com/go-pg/pg/types" -) - -func compositeScanner(typ reflect.Type) types.ScannerFunc { - return func(v reflect.Value, b []byte) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - if b == nil { - if !v.IsNil() { - v.Set(reflect.Zero(v.Type())) - } - return nil - } - - table := GetTable(typ) - p := parser.NewCompositeParser(b) - for i := 0; p.Valid(); i++ { - elem, err := p.NextElem() - if err != nil { - return err - } - - field := table.Fields[i] - err = field.ScanValue(v, elem) - if err != nil { - return err - } - } - - return nil - } -} - -func compositeAppender(typ reflect.Type) types.AppenderFunc { - return func(b []byte, v reflect.Value, quote int) []byte { - table := GetTable(typ) - b = append(b, '(') - for i, f := range table.Fields { - if i > 0 { - b = append(b, ',') - } - b = f.AppendValue(b, v, quote) - } - b = append(b, ')') - return b - } -} diff --git a/vendor/github.com/go-pg/pg/orm/composite_create.go b/vendor/github.com/go-pg/pg/orm/composite_create.go deleted file mode 100644 index 2714efa..0000000 --- a/vendor/github.com/go-pg/pg/orm/composite_create.go +++ /dev/null @@ -1,68 +0,0 @@ -package orm - -import ( - "errors" - "strconv" -) - -type CreateCompositeOptions struct { - Varchar int // replaces PostgreSQL data type `text` with `varchar(n)` -} - -func CreateComposite(db DB, model interface{}, opt *CreateCompositeOptions) error { - q := NewQuery(db, model) - _, err := q.db.Exec(createCompositeQuery{ - q: q, - opt: opt, - }) - return err -} - -type createCompositeQuery struct { - q *Query - opt *CreateCompositeOptions -} - -func (q createCompositeQuery) Copy() QueryAppender { - return q -} - -func (q createCompositeQuery) Query() *Query { - return q.q -} - -func (q createCompositeQuery) AppendQuery(b []byte) ([]byte, error) { - if q.q.stickyErr != nil { - return nil, q.q.stickyErr - } - if q.q.model == nil { - return nil, errors.New("pg: Model(nil)") - } - - table := q.q.model.Table() - - b = append(b, "CREATE TYPE "...) - b = append(b, q.q.model.Table().Alias...) - b = append(b, " AS ("...) - - for i, field := range table.Fields { - if i > 0 { - b = append(b, ", "...) - } - - b = append(b, field.Column...) - b = append(b, " "...) - if q.opt != nil && q.opt.Varchar > 0 && - field.SQLType == "text" && !field.HasFlag(customTypeFlag) { - b = append(b, "varchar("...) - b = strconv.AppendInt(b, int64(q.opt.Varchar), 10) - b = append(b, ")"...) - } else { - b = append(b, field.SQLType...) - } - } - - b = append(b, ")"...) - - return b, nil -} diff --git a/vendor/github.com/go-pg/pg/orm/composite_drop.go b/vendor/github.com/go-pg/pg/orm/composite_drop.go deleted file mode 100644 index f219803..0000000 --- a/vendor/github.com/go-pg/pg/orm/composite_drop.go +++ /dev/null @@ -1,50 +0,0 @@ -package orm - -import "errors" - -type DropCompositeOptions struct { - IfExists bool - Cascade bool -} - -func DropComposite(db DB, model interface{}, opt *DropCompositeOptions) error { - q := NewQuery(db, model) - _, err := q.db.Exec(dropCompositeQuery{ - q: q, - opt: opt, - }) - return err -} - -type dropCompositeQuery struct { - q *Query - opt *DropCompositeOptions -} - -func (q dropCompositeQuery) Copy() QueryAppender { - return q -} - -func (q dropCompositeQuery) Query() *Query { - return q.q -} - -func (q dropCompositeQuery) AppendQuery(b []byte) ([]byte, error) { - if q.q.stickyErr != nil { - return nil, q.q.stickyErr - } - if q.q.model == nil { - return nil, errors.New("pg: Model(nil)") - } - - b = append(b, "DROP TYPE "...) - if q.opt != nil && q.opt.IfExists { - b = append(b, "IF EXISTS "...) - } - b = append(b, q.q.model.Table().Alias...) - if q.opt != nil && q.opt.Cascade { - b = append(b, " CASCADE"...) - } - - return b, nil -} diff --git a/vendor/github.com/go-pg/pg/orm/count_estimate.go b/vendor/github.com/go-pg/pg/orm/count_estimate.go deleted file mode 100644 index c8fde32..0000000 --- a/vendor/github.com/go-pg/pg/orm/count_estimate.go +++ /dev/null @@ -1,85 +0,0 @@ -package orm - -import ( - "fmt" - - "github.com/go-pg/pg/internal" -) - -// Placeholder that is replaced with count(*). -const placeholder = `'_go_pg_placeholder'` - -// https://wiki.postgresql.org/wiki/Count_estimate -var pgCountEstimateFunc = fmt.Sprintf(` -CREATE OR REPLACE FUNCTION _go_pg_count_estimate_v2(query text, threshold int) -RETURNS int AS $$ -DECLARE - rec record; - nrows int; -BEGIN - FOR rec IN EXECUTE 'EXPLAIN ' || query LOOP - nrows := substring(rec."QUERY PLAN" FROM ' rows=(\d+)'); - EXIT WHEN nrows IS NOT NULL; - END LOOP; - - -- Return the estimation if there are too many rows. - IF nrows > threshold THEN - RETURN nrows; - END IF; - - -- Otherwise execute real count query. - query := replace(query, 'SELECT '%s'', 'SELECT count(*)'); - EXECUTE query INTO nrows; - - IF nrows IS NULL THEN - nrows := 0; - END IF; - - RETURN nrows; -END; -$$ LANGUAGE plpgsql; -`, placeholder) - -// CountEstimate uses EXPLAIN to get estimated number of rows returned the query. -// If that number is bigger than the threshold it returns the estimation. -// Otherwise it executes another query using count aggregate function and -// returns the result. -// -// Based on https://wiki.postgresql.org/wiki/Count_estimate -func (q *Query) CountEstimate(threshold int) (int, error) { - if q.stickyErr != nil { - return 0, q.stickyErr - } - - query, err := q.countSelectQuery(placeholder).AppendQuery(nil) - if err != nil { - return 0, err - } - - for i := 0; i < 3; i++ { - var count int - _, err = q.db.QueryOne( - Scan(&count), - "SELECT _go_pg_count_estimate_v2(?, ?)", - string(query), threshold, - ) - if err != nil { - if pgerr, ok := err.(internal.PGError); ok && pgerr.Field('C') == "42883" { - // undefined_function - err = q.createCountEstimateFunc() - if err != nil { - return 0, err - } - continue - } - } - return count, err - } - - return 0, err -} - -func (q *Query) createCountEstimateFunc() error { - _, err := q.db.Exec(pgCountEstimateFunc) - return err -} diff --git a/vendor/github.com/go-pg/pg/orm/delete.go b/vendor/github.com/go-pg/pg/orm/delete.go deleted file mode 100644 index 5bd1888..0000000 --- a/vendor/github.com/go-pg/pg/orm/delete.go +++ /dev/null @@ -1,83 +0,0 @@ -package orm - -import ( - "github.com/go-pg/pg/internal" -) - -func Delete(db DB, model interface{}) error { - res, err := NewQuery(db, model).WherePK().Delete() - if err != nil { - return err - } - return internal.AssertOneRow(res.RowsAffected()) -} - -func ForceDelete(db DB, model interface{}) error { - res, err := NewQuery(db, model).WherePK().ForceDelete() - if err != nil { - return err - } - return internal.AssertOneRow(res.RowsAffected()) -} - -type deleteQuery struct { - q *Query -} - -var _ QueryAppender = (*deleteQuery)(nil) - -func (q deleteQuery) Copy() QueryAppender { - return deleteQuery{ - q: q.q.Copy(), - } -} - -func (q deleteQuery) Query() *Query { - return q.q -} - -func (q deleteQuery) AppendQuery(b []byte) ([]byte, error) { - if q.q.stickyErr != nil { - return nil, q.q.stickyErr - } - - var err error - - if len(q.q.with) > 0 { - b, err = q.q.appendWith(b) - if err != nil { - return nil, err - } - } - - b = append(b, "DELETE FROM "...) - b = q.q.appendFirstTableWithAlias(b) - - if q.q.hasMultiTables() { - b = append(b, " USING "...) - b = q.q.appendOtherTables(b) - } - - b = append(b, " WHERE "...) - value := q.q.model.Value() - if q.q.isSliceModel() { - table := q.q.model.Table() - b = appendColumnAndSliceValue(b, value, table.Alias, table.PKs) - - if q.q.hasWhere() { - b = append(b, " AND "...) - b = q.q.appendWhere(b) - } - } else { - b, err = q.q.mustAppendWhere(b) - if err != nil { - return nil, err - } - } - - if len(q.q.returning) > 0 { - b = q.q.appendReturning(b) - } - - return b, nil -} diff --git a/vendor/github.com/go-pg/pg/orm/field.go b/vendor/github.com/go-pg/pg/orm/field.go deleted file mode 100644 index fc954bb..0000000 --- a/vendor/github.com/go-pg/pg/orm/field.go +++ /dev/null @@ -1,108 +0,0 @@ -package orm - -import ( - "reflect" - - "github.com/go-pg/pg/types" -) - -const ( - PrimaryKeyFlag = uint8(1) << iota - ForeignKeyFlag - NotNullFlag - UniqueFlag - ArrayFlag - customTypeFlag -) - -type Field struct { - Field reflect.StructField - Type reflect.Type - - GoName string // struct field name, e.g. Id - SQLName string // SQL name, .e.g. id - Column types.Q // escaped SQL name, e.g. "id" - SQLType string - Index []int - Default types.Q - OnDelete string - - flags uint8 - - append types.AppenderFunc - scan types.ScannerFunc - - isZero func(reflect.Value) bool -} - -func indexEqual(ind1, ind2 []int) bool { - if len(ind1) != len(ind2) { - return false - } - for i, ind := range ind1 { - if ind != ind2[i] { - return false - } - } - return true -} - -func (f *Field) Copy() *Field { - cp := *f - cp.Index = cp.Index[:len(f.Index):len(f.Index)] - return &cp -} - -func (f *Field) SetFlag(flag uint8) { - f.flags |= flag -} - -func (f *Field) HasFlag(flag uint8) bool { - return f.flags&flag != 0 -} - -func (f *Field) Value(strct reflect.Value) reflect.Value { - return strct.FieldByIndex(f.Index) -} - -func (f *Field) IsZero(strct reflect.Value) bool { - return f.isZero(f.Value(strct)) -} - -func (f *Field) OmitZero() bool { - return !f.HasFlag(NotNullFlag) -} - -func (f *Field) AppendValue(b []byte, strct reflect.Value, quote int) []byte { - fv := f.Value(strct) - if !f.HasFlag(NotNullFlag) && f.isZero(fv) { - return types.AppendNull(b, quote) - } - return f.append(b, fv, quote) -} - -func (f *Field) ScanValue(strct reflect.Value, b []byte) error { - fv := fieldByIndex(strct, f.Index) - return f.scan(fv, b) -} - -type Method struct { - Index int - - flags int8 - - appender func([]byte, reflect.Value, int) []byte -} - -func (m *Method) Has(flag int8) bool { - return m.flags&flag != 0 -} - -func (m *Method) Value(strct reflect.Value) reflect.Value { - return strct.Method(m.Index).Call(nil)[0] -} - -func (m *Method) AppendValue(dst []byte, strct reflect.Value, quote int) []byte { - mv := m.Value(strct) - return m.appender(dst, mv, quote) -} diff --git a/vendor/github.com/go-pg/pg/orm/format.go b/vendor/github.com/go-pg/pg/orm/format.go deleted file mode 100644 index 9c5f978..0000000 --- a/vendor/github.com/go-pg/pg/orm/format.go +++ /dev/null @@ -1,286 +0,0 @@ -package orm - -import ( - "bytes" - "fmt" - "sort" - "strconv" - "strings" - - "github.com/go-pg/pg/internal" - "github.com/go-pg/pg/internal/parser" - "github.com/go-pg/pg/types" -) - -var formatter Formatter - -type FormatAppender interface { - AppendFormat([]byte, QueryFormatter) []byte -} - -type sepFormatAppender interface { - FormatAppender - AppendSep([]byte) []byte -} - -//------------------------------------------------------------------------------ - -type queryParamsAppender struct { - query string - params []interface{} -} - -var _ FormatAppender = (*queryParamsAppender)(nil) -var _ types.ValueAppender = (*queryParamsAppender)(nil) - -func Q(query string, params ...interface{}) *queryParamsAppender { - return &queryParamsAppender{query, params} -} - -func (q *queryParamsAppender) AppendFormat(b []byte, f QueryFormatter) []byte { - return f.FormatQuery(b, q.query, q.params...) -} - -func (q *queryParamsAppender) AppendValue(b []byte, quote int) []byte { - return q.AppendFormat(b, formatter) -} - -func (q *queryParamsAppender) Value() types.Q { - b := q.AppendValue(nil, 1) - return types.Q(internal.BytesToString(b)) -} - -//------------------------------------------------------------------------------ - -type condGroupAppender struct { - sep string - cond []sepFormatAppender -} - -var _ FormatAppender = (*condAppender)(nil) -var _ sepFormatAppender = (*condAppender)(nil) - -func (q *condGroupAppender) AppendSep(b []byte) []byte { - return append(b, q.sep...) -} - -func (q *condGroupAppender) AppendFormat(b []byte, f QueryFormatter) []byte { - b = append(b, '(') - for i, app := range q.cond { - if i > 0 { - b = app.AppendSep(b) - } - b = app.AppendFormat(b, f) - } - b = append(b, ')') - return b -} - -//------------------------------------------------------------------------------ - -type condAppender struct { - sep string - cond string - params []interface{} -} - -var _ FormatAppender = (*condAppender)(nil) -var _ sepFormatAppender = (*condAppender)(nil) - -func (q *condAppender) AppendSep(b []byte) []byte { - return append(b, q.sep...) -} - -func (q *condAppender) AppendFormat(b []byte, f QueryFormatter) []byte { - b = append(b, '(') - b = f.FormatQuery(b, q.cond, q.params...) - b = append(b, ')') - return b -} - -//------------------------------------------------------------------------------ - -type fieldAppender struct { - field string -} - -var _ FormatAppender = (*fieldAppender)(nil) - -func (a fieldAppender) AppendFormat(b []byte, f QueryFormatter) []byte { - return types.AppendField(b, a.field, 1) -} - -//------------------------------------------------------------------------------ - -type Formatter struct { - namedParams map[string]interface{} -} - -func (f Formatter) String() string { - if len(f.namedParams) == 0 { - return "" - } - - var keys []string - for k, _ := range f.namedParams { - keys = append(keys, k) - } - sort.Strings(keys) - - var ss []string - for _, k := range keys { - ss = append(ss, fmt.Sprintf("%s=%v", k, f.namedParams[k])) - } - return " " + strings.Join(ss, " ") -} - -func (f Formatter) copy() Formatter { - var cp Formatter - for param, value := range f.namedParams { - cp.SetParam(param, value) - } - return cp -} - -func (f *Formatter) SetParam(param string, value interface{}) { - if f.namedParams == nil { - f.namedParams = make(map[string]interface{}) - } - f.namedParams[param] = value -} - -func (f *Formatter) WithParam(param string, value interface{}) Formatter { - cp := f.copy() - cp.SetParam(param, value) - return cp -} - -func (f Formatter) Param(param string) interface{} { - return f.namedParams[param] -} - -func (f Formatter) Append(dst []byte, src string, params ...interface{}) []byte { - if (params == nil && f.namedParams == nil) || strings.IndexByte(src, '?') == -1 { - return append(dst, src...) - } - return f.append(dst, parser.NewString(src), params) -} - -func (f Formatter) AppendBytes(dst, src []byte, params ...interface{}) []byte { - if (params == nil && f.namedParams == nil) || bytes.IndexByte(src, '?') == -1 { - return append(dst, src...) - } - return f.append(dst, parser.New(src), params) -} - -func (f Formatter) FormatQuery(dst []byte, query string, params ...interface{}) []byte { - return f.Append(dst, query, params...) -} - -func (f Formatter) append(dst []byte, p *parser.Parser, params []interface{}) []byte { - var paramsIndex int - var namedParamsOnce bool - var tableParams *tableParams - var model tableModel - - if len(params) > 0 { - var ok bool - model, ok = params[len(params)-1].(tableModel) - if ok { - params = params[:len(params)-1] - } - } - - for p.Valid() { - b, ok := p.ReadSep('?') - if !ok { - dst = append(dst, b...) - continue - } - if len(b) > 0 && b[len(b)-1] == '\\' { - dst = append(dst, b[:len(b)-1]...) - dst = append(dst, '?') - continue - } - dst = append(dst, b...) - - if id, numeric := p.ReadIdentifier(); id != "" { - if numeric { - idx, err := strconv.Atoi(id) - if err != nil { - goto restore_param - } - - if idx >= len(params) { - goto restore_param - } - - dst = f.appendParam(dst, params[idx]) - continue - } - - if f.namedParams != nil { - param, paramOK := f.namedParams[id] - if paramOK { - dst = f.appendParam(dst, param) - continue - } - } - - if !namedParamsOnce && len(params) > 0 { - namedParamsOnce = true - tableParams, _ = newTableParams(params[len(params)-1]) - } - - if tableParams != nil { - dst, ok = tableParams.AppendParam(dst, f, id) - if ok { - continue - } - } - - if model != nil { - dst, ok = model.AppendParam(dst, f, id) - if ok { - continue - } - } - - restore_param: - dst = append(dst, '?') - dst = append(dst, id...) - continue - } - - if paramsIndex >= len(params) { - dst = append(dst, '?') - continue - } - - param := params[paramsIndex] - paramsIndex++ - - dst = f.appendParam(dst, param) - } - - return dst -} - -type queryAppender interface { - AppendQuery(dst []byte) ([]byte, error) -} - -func (f Formatter) appendParam(b []byte, param interface{}) []byte { - switch param := param.(type) { - case queryAppender: - bb, err := param.AppendQuery(b) - if err != nil { - return types.AppendError(b, err) - } - return bb - case FormatAppender: - return param.AppendFormat(b, f) - default: - return types.Append(b, param, 1) - } -} diff --git a/vendor/github.com/go-pg/pg/orm/hook.go b/vendor/github.com/go-pg/pg/orm/hook.go deleted file mode 100644 index 45e589c..0000000 --- a/vendor/github.com/go-pg/pg/orm/hook.go +++ /dev/null @@ -1,199 +0,0 @@ -package orm - -import ( - "reflect" -) - -type hookStubs struct{} - -func (hookStubs) AfterQuery(_ DB) error { - return nil -} - -func (hookStubs) BeforeSelectQuery(db DB, q *Query) (*Query, error) { - return q, nil -} - -func (hookStubs) AfterSelect(_ DB) error { - return nil -} - -func (hookStubs) BeforeInsert(_ DB) error { - return nil -} - -func (hookStubs) AfterInsert(_ DB) error { - return nil -} - -func (hookStubs) BeforeUpdate(_ DB) error { - return nil -} - -func (hookStubs) AfterUpdate(_ DB) error { - return nil -} - -func (hookStubs) BeforeDelete(_ DB) error { - return nil -} - -func (hookStubs) AfterDelete(_ DB) error { - return nil -} - -func callHookSlice(slice reflect.Value, ptr bool, db DB, hook func(reflect.Value, DB) error) error { - var firstErr error - for i := 0; i < slice.Len(); i++ { - var err error - if ptr { - err = hook(slice.Index(i), db) - } else { - err = hook(slice.Index(i).Addr(), db) - } - if err != nil && firstErr == nil { - firstErr = err - } - } - return firstErr -} - -//------------------------------------------------------------------------------ - -type afterQueryHook interface { - AfterQuery(db DB) error -} - -var afterQueryHookType = reflect.TypeOf((*afterQueryHook)(nil)).Elem() - -func callAfterQueryHook(v reflect.Value, db DB) error { - return v.Interface().(afterQueryHook).AfterQuery(db) -} - -func callAfterQueryHookSlice(slice reflect.Value, ptr bool, db DB) error { - return callHookSlice(slice, ptr, db, callAfterQueryHook) -} - -//------------------------------------------------------------------------------ - -type beforeSelectQueryHook interface { - BeforeSelectQuery(db DB, q *Query) (*Query, error) -} - -var beforeSelectQueryHookType = reflect.TypeOf((*beforeSelectQueryHook)(nil)).Elem() - -func callBeforeSelectQueryHook(v reflect.Value, db DB, q *Query) (*Query, error) { - return v.Interface().(beforeSelectQueryHook).BeforeSelectQuery(db, q) -} - -//------------------------------------------------------------------------------ - -type afterSelectHook interface { - AfterSelect(db DB) error -} - -var afterSelectHookType = reflect.TypeOf((*afterSelectHook)(nil)).Elem() - -func callAfterSelectHook(v reflect.Value, db DB) error { - return v.Interface().(afterSelectHook).AfterSelect(db) -} - -func callAfterSelectHookSlice(slice reflect.Value, ptr bool, db DB) error { - return callHookSlice(slice, ptr, db, callAfterSelectHook) -} - -//------------------------------------------------------------------------------ - -type beforeInsertHook interface { - BeforeInsert(db DB) error -} - -var beforeInsertHookType = reflect.TypeOf((*beforeInsertHook)(nil)).Elem() - -func callBeforeInsertHook(v reflect.Value, db DB) error { - return v.Interface().(beforeInsertHook).BeforeInsert(db) -} - -func callBeforeInsertHookSlice(slice reflect.Value, ptr bool, db DB) error { - return callHookSlice(slice, ptr, db, callBeforeInsertHook) -} - -//------------------------------------------------------------------------------ - -type afterInsertHook interface { - AfterInsert(db DB) error -} - -var afterInsertHookType = reflect.TypeOf((*afterInsertHook)(nil)).Elem() - -func callAfterInsertHook(v reflect.Value, db DB) error { - return v.Interface().(afterInsertHook).AfterInsert(db) -} - -func callAfterInsertHookSlice(slice reflect.Value, ptr bool, db DB) error { - return callHookSlice(slice, ptr, db, callAfterInsertHook) -} - -//------------------------------------------------------------------------------ - -type beforeUpdateHook interface { - BeforeUpdate(db DB) error -} - -var beforeUpdateHookType = reflect.TypeOf((*beforeUpdateHook)(nil)).Elem() - -func callBeforeUpdateHook(v reflect.Value, db DB) error { - return v.Interface().(beforeUpdateHook).BeforeUpdate(db) -} - -func callBeforeUpdateHookSlice(slice reflect.Value, ptr bool, db DB) error { - return callHookSlice(slice, ptr, db, callBeforeUpdateHook) -} - -//------------------------------------------------------------------------------ - -type afterUpdateHook interface { - AfterUpdate(db DB) error -} - -var afterUpdateHookType = reflect.TypeOf((*afterUpdateHook)(nil)).Elem() - -func callAfterUpdateHook(v reflect.Value, db DB) error { - return v.Interface().(afterUpdateHook).AfterUpdate(db) -} - -func callAfterUpdateHookSlice(slice reflect.Value, ptr bool, db DB) error { - return callHookSlice(slice, ptr, db, callAfterUpdateHook) -} - -//------------------------------------------------------------------------------ - -type beforeDeleteHook interface { - BeforeDelete(db DB) error -} - -var beforeDeleteHookType = reflect.TypeOf((*beforeDeleteHook)(nil)).Elem() - -func callBeforeDeleteHook(v reflect.Value, db DB) error { - return v.Interface().(beforeDeleteHook).BeforeDelete(db) -} - -func callBeforeDeleteHookSlice(slice reflect.Value, ptr bool, db DB) error { - return callHookSlice(slice, ptr, db, callBeforeDeleteHook) -} - -//------------------------------------------------------------------------------ - -type afterDeleteHook interface { - AfterDelete(db DB) error -} - -var afterDeleteHookType = reflect.TypeOf((*afterDeleteHook)(nil)).Elem() - -func callAfterDeleteHook(v reflect.Value, db DB) error { - return v.Interface().(afterDeleteHook).AfterDelete(db) -} - -func callAfterDeleteHookSlice(slice reflect.Value, ptr bool, db DB) error { - return callHookSlice(slice, ptr, db, callAfterDeleteHook) -} diff --git a/vendor/github.com/go-pg/pg/orm/inflection.go b/vendor/github.com/go-pg/pg/orm/inflection.go deleted file mode 100644 index 9ff8bd6..0000000 --- a/vendor/github.com/go-pg/pg/orm/inflection.go +++ /dev/null @@ -1,17 +0,0 @@ -package orm - -import ( - "github.com/jinzhu/inflection" -) - -var tableNameInflector func(string) string - -func init() { - SetTableNameInflector(inflection.Plural) -} - -// SetTableNameInflector overrides the default func that pluralizes -// model name to get table name, e.g. my_article becomes my_articles. -func SetTableNameInflector(fn func(string) string) { - tableNameInflector = fn -} diff --git a/vendor/github.com/go-pg/pg/orm/insert.go b/vendor/github.com/go-pg/pg/orm/insert.go deleted file mode 100644 index 559072f..0000000 --- a/vendor/github.com/go-pg/pg/orm/insert.go +++ /dev/null @@ -1,157 +0,0 @@ -package orm - -import ( - "errors" - "fmt" - "reflect" -) - -func Insert(db DB, model ...interface{}) error { - _, err := NewQuery(db, model...).Insert() - return err -} - -type insertQuery struct { - q *Query - returningFields []*Field -} - -var _ QueryAppender = (*insertQuery)(nil) - -func (q *insertQuery) Copy() QueryAppender { - return &insertQuery{ - q: q.q.Copy(), - } -} - -func (q *insertQuery) Query() *Query { - return q.q -} - -func (q *insertQuery) AppendQuery(b []byte) ([]byte, error) { - if q.q.stickyErr != nil { - return nil, q.q.stickyErr - } - if q.q.model == nil { - return nil, errors.New("pg: Model(nil)") - } - - table := q.q.model.Table() - value := q.q.model.Value() - var err error - - if len(q.q.with) > 0 { - b, err = q.q.appendWith(b) - if err != nil { - return nil, err - } - } - - b = append(b, "INSERT INTO "...) - if q.q.onConflict != nil { - b = q.q.appendFirstTableWithAlias(b) - } else { - b = q.q.appendFirstTable(b) - } - - if q.q.hasMultiTables() { - if q.q.columns != nil { - b = append(b, " ("...) - b = q.q.appendColumns(b) - b = append(b, ")"...) - } - b = append(b, " SELECT * FROM "...) - b = q.q.appendOtherTables(b) - } else { - fields, err := q.q.getFields() - if err != nil { - return nil, err - } - - if len(fields) == 0 { - fields = table.Fields - } - - b = append(b, " ("...) - b = appendColumns(b, "", fields) - b = append(b, ") VALUES ("...) - if value.Kind() == reflect.Struct { - b = q.appendValues(b, fields, value) - } else { - if value.Len() == 0 { - err = fmt.Errorf("pg: can't bulk-insert empty slice %s", value.Type()) - return nil, err - } - - for i := 0; i < value.Len(); i++ { - el := indirect(value.Index(i)) - b = q.appendValues(b, fields, el) - if i != value.Len()-1 { - b = append(b, "), ("...) - } - } - } - b = append(b, ")"...) - } - - if q.q.onConflict != nil { - b = append(b, " ON CONFLICT "...) - b = q.q.onConflict.AppendFormat(b, q.q) - - if q.q.onConflictDoUpdate() { - if len(q.q.set) > 0 { - b = q.q.appendSet(b) - } - - if len(q.q.updWhere) > 0 { - b = append(b, " WHERE "...) - b = q.q.appendUpdWhere(b) - } - } - } - - if len(q.q.returning) > 0 { - b = q.q.appendReturning(b) - } else if len(q.returningFields) > 0 { - b = appendReturningFields(b, q.returningFields) - } - - return b, nil -} - -func (q *insertQuery) appendValues(b []byte, fields []*Field, v reflect.Value) []byte { - for i, f := range fields { - if i > 0 { - b = append(b, ", "...) - } - - app, ok := q.q.modelValues[f.SQLName] - if ok { - b = app.AppendFormat(b, q.q) - continue - } - - if (f.Default != "" || f.OmitZero()) && f.IsZero(v) { - b = append(b, "DEFAULT"...) - q.addReturningField(f) - } else { - b = f.AppendValue(b, v, 1) - } - } - return b -} - -func (ins *insertQuery) addReturningField(field *Field) { - for _, f := range ins.returningFields { - if f == field { - return - } - } - ins.returningFields = append(ins.returningFields, field) -} - -func appendReturningFields(b []byte, fields []*Field) []byte { - b = append(b, " RETURNING "...) - b = appendColumns(b, "", fields) - return b -} diff --git a/vendor/github.com/go-pg/pg/orm/join.go b/vendor/github.com/go-pg/pg/orm/join.go deleted file mode 100644 index f7c3a99..0000000 --- a/vendor/github.com/go-pg/pg/orm/join.go +++ /dev/null @@ -1,345 +0,0 @@ -package orm - -import ( - "reflect" - - "github.com/go-pg/pg/internal" - "github.com/go-pg/pg/types" -) - -type join struct { - Parent *join - BaseModel tableModel - JoinModel tableModel - Rel *Relation - - ApplyQuery func(*Query) (*Query, error) - Columns []string - on []*condAppender -} - -func (j *join) AppendOn(app *condAppender) { - j.on = append(j.on, app) -} - -func (j *join) Select(q *Query) error { - switch j.Rel.Type { - case HasManyRelation: - return j.selectMany(q) - case Many2ManyRelation: - return j.selectM2M(q) - } - panic("not reached") -} - -func (j *join) selectMany(q *Query) error { - q, err := j.manyQuery(q) - if err != nil { - return err - } - if q == nil { - return nil - } - return q.Select() -} - -func (j *join) manyQuery(q *Query) (*Query, error) { - manyModel := newManyModel(j) - if manyModel == nil { - return nil, nil - } - - q = q.Model(manyModel) - if j.ApplyQuery != nil { - var err error - q, err = j.ApplyQuery(q) - if err != nil { - return nil, err - } - } - - if len(q.columns) == 0 { - q.columns = append(q.columns, hasManyColumnsAppender{j}) - } - - baseTable := j.BaseModel.Table() - var where []byte - if len(j.Rel.FKs) > 1 { - where = append(where, '(') - } - where = appendColumns(where, j.JoinModel.Table().Alias, j.Rel.FKs) - if len(j.Rel.FKs) > 1 { - where = append(where, '(') - } - where = append(where, " IN ("...) - where = appendChildValues( - where, j.JoinModel.Root(), j.JoinModel.ParentIndex(), j.Rel.FKValues) - where = append(where, ")"...) - q = q.Where(internal.BytesToString(where)) - - if j.Rel.Polymorphic != nil { - q = q.Where(`? IN (?, ?)`, - j.Rel.Polymorphic.Column, - baseTable.ModelName, baseTable.TypeName) - } - - return q, nil -} - -func (j *join) selectM2M(q *Query) error { - q, err := j.m2mQuery(q) - if err != nil { - return err - } - if q == nil { - return nil - } - return q.Select() -} - -func (j *join) m2mQuery(q *Query) (*Query, error) { - m2mModel := newM2MModel(j) - if m2mModel == nil { - return nil, nil - } - - q = q.Model(m2mModel) - if j.ApplyQuery != nil { - var err error - q, err = j.ApplyQuery(q) - if err != nil { - return nil, err - } - } - - if len(q.columns) == 0 { - q.columns = append(q.columns, hasManyColumnsAppender{j}) - } - - index := j.JoinModel.ParentIndex() - baseTable := j.BaseModel.Table() - var join []byte - join = append(join, "JOIN "...) - join = q.FormatQuery(join, string(j.Rel.M2MTableName)) - join = append(join, " AS "...) - join = append(join, j.Rel.M2MTableAlias...) - join = append(join, " ON ("...) - for i, col := range j.Rel.BaseFKs { - if i > 0 { - join = append(join, ", "...) - } - join = append(join, j.Rel.M2MTableAlias...) - join = append(join, '.') - join = types.AppendField(join, col, 1) - } - join = append(join, ") IN ("...) - join = appendChildValues(join, j.BaseModel.Root(), index, baseTable.PKs) - join = append(join, ")"...) - q = q.Join(internal.BytesToString(join)) - - joinTable := j.JoinModel.Table() - for i, col := range j.Rel.JoinFKs { - if i >= len(joinTable.PKs) { - break - } - pk := joinTable.PKs[i] - q = q.Where("?.? = ?.?", - joinTable.Alias, pk.Column, - j.Rel.M2MTableAlias, types.F(col)) - } - - return q, nil -} - -func (j *join) hasParent() bool { - if j.Parent != nil { - switch j.Parent.Rel.Type { - case HasOneRelation, BelongsToRelation: - return true - } - } - return false -} - -func (j *join) appendAlias(b []byte) []byte { - b = append(b, '"') - b = appendAlias(b, j, true) - b = append(b, '"') - return b -} - -func (j *join) appendAliasColumn(b []byte, column string) []byte { - b = append(b, '"') - b = appendAlias(b, j, true) - b = append(b, "__"...) - b = types.AppendField(b, column, 2) - b = append(b, '"') - return b -} - -func (j *join) appendBaseAlias(b []byte) []byte { - if j.hasParent() { - b = append(b, '"') - b = appendAlias(b, j.Parent, true) - b = append(b, '"') - return b - } - return append(b, j.BaseModel.Table().Alias...) -} - -func appendAlias(b []byte, j *join, topLevel bool) []byte { - if j.hasParent() { - b = appendAlias(b, j.Parent, topLevel) - topLevel = false - } - if !topLevel { - b = append(b, "__"...) - } - b = append(b, j.Rel.Field.SQLName...) - return b -} - -func (j *join) appendHasOneColumns(b []byte) []byte { - if j.Columns == nil { - for i, f := range j.JoinModel.Table().Fields { - if i > 0 { - b = append(b, ", "...) - } - b = j.appendAlias(b) - b = append(b, '.') - b = append(b, f.Column...) - b = append(b, " AS "...) - b = j.appendAliasColumn(b, f.SQLName) - } - return b - } - - for i, column := range j.Columns { - if i > 0 { - b = append(b, ", "...) - } - b = j.appendAlias(b) - b = append(b, '.') - b = types.AppendField(b, column, 1) - b = append(b, " AS "...) - b = j.appendAliasColumn(b, column) - } - - return b -} - -func (j *join) appendHasOneJoin(q *Query, b []byte) []byte { - b = append(b, "LEFT JOIN "...) - b = q.FormatQuery(b, string(j.JoinModel.Table().NameForSelects)) - b = append(b, " AS "...) - b = j.appendAlias(b) - - b = append(b, " ON "...) - - if len(j.Rel.FKs) > 1 { - b = append(b, '(') - } - if j.Rel.Type == HasOneRelation { - joinTable := j.Rel.JoinTable - for i, fk := range j.Rel.FKs { - if i > 0 { - b = append(b, " AND "...) - } - b = j.appendAlias(b) - b = append(b, '.') - b = append(b, joinTable.PKs[i].Column...) - b = append(b, " = "...) - b = j.appendBaseAlias(b) - b = append(b, '.') - b = append(b, fk.Column...) - } - } else { - baseTable := j.BaseModel.Table() - for i, fk := range j.Rel.FKs { - if i > 0 { - b = append(b, " AND "...) - } - b = j.appendAlias(b) - b = append(b, '.') - b = append(b, fk.Column...) - b = append(b, " = "...) - b = j.appendBaseAlias(b) - b = append(b, '.') - b = append(b, baseTable.PKs[i].Column...) - } - } - if len(j.Rel.FKs) > 1 { - b = append(b, ')') - } - - for _, on := range j.on { - b = on.AppendSep(b) - b = on.AppendFormat(b, q) - } - - if q.softDelete() { - b = append(b, " AND "...) - b = j.appendBaseAlias(b) - b = q.appendSoftDelete(b) - } - - return b -} - -type hasManyColumnsAppender struct { - *join -} - -func (q hasManyColumnsAppender) AppendFormat(b []byte, f QueryFormatter) []byte { - if q.Rel.M2MTableAlias != "" { - b = append(b, q.Rel.M2MTableAlias...) - b = append(b, ".*, "...) - } - - joinTable := q.JoinModel.Table() - - if q.Columns != nil { - for i, column := range q.Columns { - if i > 0 { - b = append(b, ", "...) - } - b = append(b, joinTable.Alias...) - b = append(b, '.') - b = types.AppendField(b, column, 1) - } - return b - } - - return appendColumns(b, joinTable.Alias, joinTable.Fields) -} - -func appendChildValues(b []byte, v reflect.Value, index []int, fields []*Field) []byte { - seen := make(map[string]struct{}) - walk(v, index, func(v reflect.Value) { - start := len(b) - - if len(fields) > 1 { - b = append(b, '(') - } - for i, f := range fields { - if i > 0 { - b = append(b, ", "...) - } - b = f.AppendValue(b, v, 1) - } - if len(fields) > 1 { - b = append(b, ')') - } - b = append(b, ", "...) - - if _, ok := seen[string(b[start:])]; ok { - b = b[:start] - } else { - seen[string(b[start:])] = struct{}{} - } - }) - if len(seen) > 0 { - b = b[:len(b)-2] // trim ", " - } - return b -} diff --git a/vendor/github.com/go-pg/pg/orm/model.go b/vendor/github.com/go-pg/pg/orm/model.go deleted file mode 100644 index 1ecb3c9..0000000 --- a/vendor/github.com/go-pg/pg/orm/model.go +++ /dev/null @@ -1,110 +0,0 @@ -package orm - -import ( - "database/sql" - "errors" - "fmt" - "reflect" - - "github.com/go-pg/pg/types" -) - -type useQueryOne interface { - useQueryOne() bool -} - -type HooklessModel interface { - // Init is responsible to initialize/reset model state. - // It is called only once no matter how many rows - // were returned by database. - Init() error - - // NewModel returns ColumnScanner that is used to scan columns - // from the current row. It is called once for every row. - NewModel() ColumnScanner - - // AddModel adds ColumnScanner created by NewModel to the Collection. - AddModel(ColumnScanner) error - - ColumnScanner -} - -type Model interface { - HooklessModel - - AfterQuery(DB) error - - BeforeSelectQuery(DB, *Query) (*Query, error) - AfterSelect(DB) error - - BeforeInsert(DB) error - AfterInsert(DB) error - - BeforeUpdate(DB) error - AfterUpdate(DB) error - - BeforeDelete(DB) error - AfterDelete(DB) error -} - -func NewModel(values ...interface{}) (Model, error) { - if len(values) > 1 { - return Scan(values...), nil - } - - v0 := values[0] - switch v0 := v0.(type) { - case Model: - return v0, nil - case HooklessModel: - return newModelWithHookStubs(v0), nil - case sql.Scanner: - return Scan(v0), nil - } - - v := reflect.ValueOf(v0) - if !v.IsValid() { - return nil, errors.New("pg: Model(nil)") - } - if v.Kind() != reflect.Ptr { - return nil, fmt.Errorf("pg: Model(non-pointer %T)", v0) - } - v = v.Elem() - - switch v.Kind() { - case reflect.Struct: - return newStructTableModelValue(v), nil - case reflect.Slice: - typ := v.Type() - structType := indirectType(typ.Elem()) - if structType.Kind() == reflect.Struct && structType != timeType { - m := sliceTableModel{ - structTableModel: structTableModel{ - table: GetTable(structType), - root: v, - }, - slice: v, - } - m.init(typ) - return &m, nil - } else { - return &sliceModel{ - slice: v, - scan: types.Scanner(structType), - }, nil - } - } - - return Scan(v0), nil -} - -type modelWithHookStubs struct { - hookStubs - HooklessModel -} - -func newModelWithHookStubs(m HooklessModel) Model { - return modelWithHookStubs{ - HooklessModel: m, - } -} diff --git a/vendor/github.com/go-pg/pg/orm/model_discard.go b/vendor/github.com/go-pg/pg/orm/model_discard.go deleted file mode 100644 index 3b7b438..0000000 --- a/vendor/github.com/go-pg/pg/orm/model_discard.go +++ /dev/null @@ -1,23 +0,0 @@ -package orm - -type Discard struct { - hookStubs -} - -var _ Model = (*Discard)(nil) - -func (Discard) Init() error { - return nil -} - -func (m Discard) NewModel() ColumnScanner { - return m -} - -func (m Discard) AddModel(ColumnScanner) error { - return nil -} - -func (m Discard) ScanColumn(colIdx int, colName string, b []byte) error { - return nil -} diff --git a/vendor/github.com/go-pg/pg/orm/model_func.go b/vendor/github.com/go-pg/pg/orm/model_func.go deleted file mode 100644 index 64b300b..0000000 --- a/vendor/github.com/go-pg/pg/orm/model_func.go +++ /dev/null @@ -1,89 +0,0 @@ -package orm - -import ( - "fmt" - "reflect" -) - -var errorType = reflect.TypeOf((*error)(nil)).Elem() - -type funcModel struct { - Model - fnv reflect.Value - fnIn []reflect.Value -} - -var _ Model = (*funcModel)(nil) - -func newFuncModel(fn interface{}) *funcModel { - m := &funcModel{ - fnv: reflect.ValueOf(fn), - } - - fnt := m.fnv.Type() - if fnt.Kind() != reflect.Func { - panic(fmt.Errorf("ForEach expects a %s, got a %s", - reflect.Func, fnt.Kind())) - } - - if fnt.NumIn() < 1 { - panic(fmt.Errorf("ForEach expects at least 1 arg, got %d", fnt.NumIn())) - } - - if fnt.NumOut() != 1 { - panic(fmt.Errorf("ForEach must return 1 value, got %d", fnt.NumOut())) - } - if fnt.Out(0) != errorType { - panic(fmt.Errorf("ForEach must return an error, got %T", fnt.Out(0))) - } - - if fnt.NumIn() > 1 { - initFuncModelScan(m, fnt) - return m - } - - t0 := fnt.In(0) - var v0 reflect.Value - if t0.Kind() == reflect.Ptr { - t0 = t0.Elem() - v0 = reflect.New(t0) - } else { - v0 = reflect.New(t0).Elem() - } - - m.fnIn = []reflect.Value{v0} - - model, ok := v0.Interface().(Model) - if ok { - m.Model = model - return m - } - - if v0.Kind() == reflect.Ptr { - v0 = v0.Elem() - } - if v0.Kind() != reflect.Struct { - panic(fmt.Errorf("ForEach accepts a %s, got %s", - reflect.Struct, v0.Kind())) - } - m.Model = newStructTableModelValue(v0) - - return m -} - -func initFuncModelScan(m *funcModel, fnt reflect.Type) { - m.fnIn = make([]reflect.Value, fnt.NumIn()) - for i := 0; i < fnt.NumIn(); i++ { - m.fnIn[i] = reflect.New(fnt.In(i)).Elem() - } - m.Model = scanReflectValues(m.fnIn) -} - -func (m *funcModel) AddModel(_ ColumnScanner) error { - out := m.fnv.Call(m.fnIn) - errv := out[0] - if !errv.IsNil() { - return errv.Interface().(error) - } - return nil -} diff --git a/vendor/github.com/go-pg/pg/orm/model_scan.go b/vendor/github.com/go-pg/pg/orm/model_scan.go deleted file mode 100644 index 6874b73..0000000 --- a/vendor/github.com/go-pg/pg/orm/model_scan.go +++ /dev/null @@ -1,68 +0,0 @@ -package orm - -import ( - "fmt" - "reflect" - - "github.com/go-pg/pg/types" -) - -type scanValuesModel struct { - Discard - values []interface{} -} - -var _ Model = scanValuesModel{} - -func Scan(values ...interface{}) scanValuesModel { - return scanValuesModel{ - values: values, - } -} - -func (scanValuesModel) useQueryOne() bool { - return true -} - -func (m scanValuesModel) NewModel() ColumnScanner { - return m -} - -func (m scanValuesModel) ScanColumn(colIdx int, colName string, b []byte) error { - if colIdx >= len(m.values) { - return fmt.Errorf("pg: no Scan var for column index=%d name=%q", - colIdx, colName) - } - return types.Scan(m.values[colIdx], b) -} - -//------------------------------------------------------------------------------ - -type scanReflectValuesModel struct { - Discard - values []reflect.Value -} - -var _ Model = scanReflectValuesModel{} - -func scanReflectValues(values []reflect.Value) scanReflectValuesModel { - return scanReflectValuesModel{ - values: values, - } -} - -func (scanReflectValuesModel) useQueryOne() bool { - return true -} - -func (m scanReflectValuesModel) NewModel() ColumnScanner { - return m -} - -func (m scanReflectValuesModel) ScanColumn(colIdx int, colName string, b []byte) error { - if colIdx >= len(m.values) { - return fmt.Errorf("pg: no Scan var for column index=%d name=%q", - colIdx, colName) - } - return types.ScanValue(m.values[colIdx], b) -} diff --git a/vendor/github.com/go-pg/pg/orm/model_slice.go b/vendor/github.com/go-pg/pg/orm/model_slice.go deleted file mode 100644 index 3e79be2..0000000 --- a/vendor/github.com/go-pg/pg/orm/model_slice.go +++ /dev/null @@ -1,35 +0,0 @@ -package orm - -import ( - "reflect" - - "github.com/go-pg/pg/internal" -) - -type sliceModel struct { - Discard - slice reflect.Value - nextElem func() reflect.Value - scan func(reflect.Value, []byte) error -} - -var _ Model = (*sliceModel)(nil) - -func (m *sliceModel) Init() error { - if m.slice.IsValid() && m.slice.Len() > 0 { - m.slice.Set(m.slice.Slice(0, 0)) - } - return nil -} - -func (m *sliceModel) NewModel() ColumnScanner { - return m -} - -func (m *sliceModel) ScanColumn(colIdx int, _ string, b []byte) error { - if m.nextElem == nil { - m.nextElem = internal.MakeSliceNextElemFunc(m.slice) - } - v := m.nextElem() - return m.scan(v, b) -} diff --git a/vendor/github.com/go-pg/pg/orm/model_table.go b/vendor/github.com/go-pg/pg/orm/model_table.go deleted file mode 100644 index f67b9fb..0000000 --- a/vendor/github.com/go-pg/pg/orm/model_table.go +++ /dev/null @@ -1,109 +0,0 @@ -package orm - -import ( - "errors" - "fmt" - "reflect" -) - -type tableModel interface { - Model - - Table() *Table - Relation() *Relation - AppendParam([]byte, QueryFormatter, string) ([]byte, bool) - - Join(string, func(*Query) (*Query, error)) *join - GetJoin(string) *join - GetJoins() []join - AddJoin(join) *join - - Root() reflect.Value - Index() []int - ParentIndex() []int - Mount(reflect.Value) - Kind() reflect.Kind - Value() reflect.Value - - setDeletedAt() - scanColumn(int, string, []byte) (bool, error) -} - -func newTableModel(value interface{}) (tableModel, error) { - if value, ok := value.(tableModel); ok { - return value, nil - } - - v := reflect.ValueOf(value) - if !v.IsValid() { - return nil, errors.New("pg: Model(nil)") - } - if v.Kind() != reflect.Ptr { - return nil, fmt.Errorf("pg: Model(non-pointer %T)", value) - } - - if v.IsNil() { - typ := v.Type().Elem() - if typ.Kind() == reflect.Struct { - return newStructTableModelType(typ), nil - } - return nil, errors.New("pg: Model(nil)") - } - - return newTableModelValue(v.Elem()) -} - -func newTableModelValue(v reflect.Value) (tableModel, error) { - switch v.Kind() { - case reflect.Struct: - return newStructTableModelValue(v), nil - case reflect.Slice: - structType := sliceElemType(v) - if structType.Kind() == reflect.Struct { - m := sliceTableModel{ - structTableModel: structTableModel{ - table: GetTable(structType), - root: v, - }, - slice: v, - } - m.init(v.Type()) - return &m, nil - } - } - - return nil, fmt.Errorf("pg: Model(unsupported %s)", v.Type()) -} - -func newTableModelIndex(root reflect.Value, index []int, rel *Relation) (tableModel, error) { - typ := typeByIndex(root.Type(), index) - - if typ.Kind() == reflect.Struct { - return &structTableModel{ - table: GetTable(typ), - rel: rel, - - root: root, - index: index, - }, nil - } - - if typ.Kind() == reflect.Slice { - structType := indirectType(typ.Elem()) - if structType.Kind() == reflect.Struct { - m := sliceTableModel{ - structTableModel: structTableModel{ - table: GetTable(structType), - rel: rel, - - root: root, - index: index, - }, - } - m.init(typ) - return &m, nil - } - } - - return nil, fmt.Errorf("pg: NewModel(%s)", typ) -} diff --git a/vendor/github.com/go-pg/pg/orm/model_table_m2m.go b/vendor/github.com/go-pg/pg/orm/model_table_m2m.go deleted file mode 100644 index 98bb686..0000000 --- a/vendor/github.com/go-pg/pg/orm/model_table_m2m.go +++ /dev/null @@ -1,135 +0,0 @@ -package orm - -import ( - "fmt" - "reflect" -) - -type m2mModel struct { - *sliceTableModel - baseTable *Table - rel *Relation - - buf []byte - dstValues map[string][]reflect.Value - columns map[string]string -} - -var _ tableModel = (*m2mModel)(nil) - -func newM2MModel(j *join) *m2mModel { - baseTable := j.BaseModel.Table() - joinModel := j.JoinModel.(*sliceTableModel) - dstValues := dstValues(joinModel, baseTable.PKs) - if len(dstValues) == 0 { - return nil - } - m := &m2mModel{ - sliceTableModel: joinModel, - baseTable: baseTable, - rel: j.Rel, - - dstValues: dstValues, - columns: make(map[string]string), - } - if !m.sliceOfPtr { - m.strct = reflect.New(m.table.Type).Elem() - } - return m -} - -func (m *m2mModel) NewModel() ColumnScanner { - if m.sliceOfPtr { - m.strct = reflect.New(m.table.Type).Elem() - } else { - m.strct.Set(m.table.zeroStruct) - } - m.structInited = false - m.structTableModel.NewModel() - return m -} - -func (m *m2mModel) AddModel(model ColumnScanner) error { - m.buf = modelIdMap(m.buf[:0], m.columns, m.rel.BaseFKs) - dstValues, ok := m.dstValues[string(m.buf)] - if !ok { - return fmt.Errorf( - "pg: relation=%q has no base %s with id=%q (check join conditions)", - m.rel.Field.GoName, m.baseTable, m.buf) - } - - for _, v := range dstValues { - if m.sliceOfPtr { - v.Set(reflect.Append(v, m.strct.Addr())) - } else { - v.Set(reflect.Append(v, m.strct)) - } - } - - return nil -} - -func modelIdMap(b []byte, m map[string]string, columns []string) []byte { - for i, col := range columns { - if i > 0 { - b = append(b, ',') - } - b = append(b, m[col]...) - } - return b -} - -func (m *m2mModel) AfterQuery(db DB) error { - if !m.rel.JoinTable.HasFlag(AfterQueryHookFlag) { - return nil - } - - var retErr error - for _, slices := range m.dstValues { - for _, slice := range slices { - err := callAfterQueryHookSlice(slice, m.sliceOfPtr, db) - if err != nil && retErr == nil { - retErr = err - } - } - } - return retErr -} - -func (m *m2mModel) AfterSelect(db DB) error { - return nil -} - -func (m *m2mModel) BeforeInsert(db DB) error { - return nil -} - -func (m *m2mModel) AfterInsert(db DB) error { - return nil -} - -func (m *m2mModel) BeforeUpdate(db DB) error { - return nil -} - -func (m *m2mModel) AfterUpdate(db DB) error { - return nil -} - -func (m *m2mModel) BeforeDelete(db DB) error { - return nil -} - -func (m *m2mModel) AfterDelete(db DB) error { - return nil -} - -func (m *m2mModel) ScanColumn(colIdx int, colName string, b []byte) error { - ok, err := m.sliceTableModel.scanColumn(colIdx, colName, b) - if ok { - return err - } - - m.columns[colName] = string(b) - return nil -} diff --git a/vendor/github.com/go-pg/pg/orm/model_table_many.go b/vendor/github.com/go-pg/pg/orm/model_table_many.go deleted file mode 100644 index 41af7ee..0000000 --- a/vendor/github.com/go-pg/pg/orm/model_table_many.go +++ /dev/null @@ -1,113 +0,0 @@ -package orm - -import ( - "fmt" - "reflect" -) - -type manyModel struct { - *sliceTableModel - baseTable *Table - rel *Relation - - buf []byte - dstValues map[string][]reflect.Value -} - -var _ tableModel = (*manyModel)(nil) - -func newManyModel(j *join) *manyModel { - baseTable := j.BaseModel.Table() - joinModel := j.JoinModel.(*sliceTableModel) - dstValues := dstValues(joinModel, j.Rel.FKValues) - if len(dstValues) == 0 { - return nil - } - m := manyModel{ - sliceTableModel: joinModel, - baseTable: baseTable, - rel: j.Rel, - - dstValues: dstValues, - } - if !m.sliceOfPtr { - m.strct = reflect.New(m.table.Type).Elem() - } - return &m -} - -func (m *manyModel) NewModel() ColumnScanner { - if m.sliceOfPtr { - m.strct = reflect.New(m.table.Type).Elem() - } else { - m.strct.Set(m.table.zeroStruct) - } - m.structInited = false - m.structTableModel.NewModel() - return m -} - -func (m *manyModel) AddModel(model ColumnScanner) error { - m.buf = modelId(m.buf[:0], m.strct, m.rel.FKs) - dstValues, ok := m.dstValues[string(m.buf)] - if !ok { - return fmt.Errorf( - "pg: relation=%q has no base model=%q with id=%q (check join conditions)", - m.rel.Field.GoName, m.baseTable.TypeName, m.buf) - } - - for _, v := range dstValues { - if m.sliceOfPtr { - v.Set(reflect.Append(v, m.strct.Addr())) - } else { - v.Set(reflect.Append(v, m.strct)) - } - } - - return nil -} - -func (m *manyModel) AfterQuery(db DB) error { - if !m.rel.JoinTable.HasFlag(AfterQueryHookFlag) { - return nil - } - - var retErr error - for _, slices := range m.dstValues { - for _, slice := range slices { - err := callAfterQueryHookSlice(slice, m.sliceOfPtr, db) - if err != nil && retErr == nil { - retErr = err - } - } - } - return retErr -} - -func (m *manyModel) AfterSelect(db DB) error { - return nil -} - -func (m *manyModel) BeforeInsert(db DB) error { - return nil -} - -func (m *manyModel) AfterInsert(db DB) error { - return nil -} - -func (m *manyModel) BeforeUpdate(db DB) error { - return nil -} - -func (m *manyModel) AfterUpdate(db DB) error { - return nil -} - -func (m *manyModel) BeforeDelete(db DB) error { - return nil -} - -func (m *manyModel) AfterDelete(db DB) error { - return nil -} diff --git a/vendor/github.com/go-pg/pg/orm/model_table_slice.go b/vendor/github.com/go-pg/pg/orm/model_table_slice.go deleted file mode 100644 index 8b63ac6..0000000 --- a/vendor/github.com/go-pg/pg/orm/model_table_slice.go +++ /dev/null @@ -1,152 +0,0 @@ -package orm - -import ( - "reflect" - "time" -) - -type sliceTableModel struct { - structTableModel - - slice reflect.Value - sliceOfPtr bool -} - -var _ tableModel = (*sliceTableModel)(nil) - -func (m *sliceTableModel) init(sliceType reflect.Type) { - switch sliceType.Elem().Kind() { - case reflect.Ptr, reflect.Interface: - m.sliceOfPtr = true - } -} - -func (sliceTableModel) useQueryOne() {} - -func (m *sliceTableModel) AppendParam(b []byte, f QueryFormatter, name string) ([]byte, bool) { - if field, ok := m.table.FieldsMap[name]; ok { - b = append(b, "_data."...) - b = append(b, field.Column...) - return b, true - } - - return m.structTableModel.AppendParam(b, f, name) -} - -func (m *sliceTableModel) Join(name string, apply func(*Query) (*Query, error)) *join { - return m.join(m.Value(), name, apply) -} - -func (m *sliceTableModel) Bind(bind reflect.Value) { - m.slice = bind.Field(m.index[len(m.index)-1]) -} - -func (m *sliceTableModel) Kind() reflect.Kind { - return reflect.Slice -} - -func (m *sliceTableModel) Value() reflect.Value { - return m.slice -} - -func (m *sliceTableModel) Init() error { - if m.slice.IsValid() && m.slice.Len() > 0 { - m.slice.Set(m.slice.Slice(0, 0)) - } - return nil -} - -func (m *sliceTableModel) NewModel() ColumnScanner { - m.strct = m.nextElem() - m.structInited = false - return m -} - -func (m *sliceTableModel) AfterQuery(db DB) error { - if !m.table.HasFlag(AfterQueryHookFlag) { - return nil - } - return callAfterQueryHookSlice(m.slice, m.sliceOfPtr, db) -} - -func (m *sliceTableModel) AfterSelect(db DB) error { - if !m.table.HasFlag(AfterSelectHookFlag) { - return nil - } - return callAfterSelectHookSlice(m.slice, m.sliceOfPtr, db) -} - -func (m *sliceTableModel) BeforeInsert(db DB) error { - if !m.table.HasFlag(BeforeInsertHookFlag) { - return nil - } - return callBeforeInsertHookSlice(m.slice, m.sliceOfPtr, db) -} - -func (m *sliceTableModel) AfterInsert(db DB) error { - if !m.table.HasFlag(AfterInsertHookFlag) { - return nil - } - return callAfterInsertHookSlice(m.slice, m.sliceOfPtr, db) -} - -func (m *sliceTableModel) BeforeUpdate(db DB) error { - if !m.table.HasFlag(BeforeUpdateHookFlag) { - return nil - } - return callBeforeUpdateHookSlice(m.slice, m.sliceOfPtr, db) -} - -func (m *sliceTableModel) AfterUpdate(db DB) error { - if !m.table.HasFlag(AfterUpdateHookFlag) { - return nil - } - return callAfterUpdateHookSlice(m.slice, m.sliceOfPtr, db) -} - -func (m *sliceTableModel) BeforeDelete(db DB) error { - if !m.table.HasFlag(BeforeDeleteHookFlag) { - return nil - } - return callBeforeDeleteHookSlice(m.slice, m.sliceOfPtr, db) -} - -func (m *sliceTableModel) AfterDelete(db DB) error { - if !m.table.HasFlag(AfterDeleteHookFlag) { - return nil - } - return callAfterDeleteHookSlice(m.slice, m.sliceOfPtr, db) -} - -func (m *sliceTableModel) nextElem() reflect.Value { - if m.slice.Len() < m.slice.Cap() { - m.slice.Set(m.slice.Slice(0, m.slice.Len()+1)) - elem := m.slice.Index(m.slice.Len() - 1) - if m.sliceOfPtr { - if elem.IsNil() { - elem.Set(reflect.New(elem.Type().Elem())) - } - return elem.Elem() - } - return elem - } - - if m.sliceOfPtr { - elem := reflect.New(m.table.Type) - m.slice.Set(reflect.Append(m.slice, elem)) - return elem.Elem() - } - - m.slice.Set(reflect.Append(m.slice, m.table.zeroStruct)) - return m.slice.Index(m.slice.Len() - 1) -} - -func (m *sliceTableModel) setDeletedAt() { - field := m.table.FieldsMap["deleted_at"] - now := time.Now() - for i := 0; i < m.slice.Len(); i++ { - strct := indirect(m.slice.Index(i)) - value := field.Value(strct) - value.Set(reflect.ValueOf(now)) - } -} diff --git a/vendor/github.com/go-pg/pg/orm/model_table_struct.go b/vendor/github.com/go-pg/pg/orm/model_table_struct.go deleted file mode 100644 index 44e77eb..0000000 --- a/vendor/github.com/go-pg/pg/orm/model_table_struct.go +++ /dev/null @@ -1,363 +0,0 @@ -package orm - -import ( - "errors" - "fmt" - "reflect" - "strings" - "time" -) - -type structTableModel struct { - table *Table - rel *Relation - joins []join - - root reflect.Value - index []int - - strct reflect.Value - structInited bool - structInitErr error -} - -var _ tableModel = (*structTableModel)(nil) - -func newStructTableModelValue(v reflect.Value) *structTableModel { - return &structTableModel{ - table: GetTable(v.Type()), - root: v, - strct: v, - } -} - -func newStructTableModelType(typ reflect.Type) *structTableModel { - return &structTableModel{ - table: GetTable(typ), - } -} - -func (structTableModel) useQueryOne() bool { - return true -} - -func (m *structTableModel) Table() *Table { - return m.table -} - -func (m *structTableModel) Relation() *Relation { - return m.rel -} - -func (m *structTableModel) AppendParam(b []byte, f QueryFormatter, name string) ([]byte, bool) { - b, ok := m.table.AppendParam(b, m.strct, name) - if ok { - return b, true - } - - switch name { - case "TableName": - b = f.FormatQuery(b, string(m.table.Name)) - return b, true - case "TableAlias": - b = append(b, m.table.Alias...) - return b, true - case "TableColumns": - b = appendColumns(b, m.table.Alias, m.table.Fields) - return b, true - case "Columns": - b = appendColumns(b, "", m.table.Fields) - return b, true - } - - return b, false -} - -func (m *structTableModel) Root() reflect.Value { - return m.root -} - -func (m *structTableModel) Index() []int { - return m.index -} - -func (m *structTableModel) ParentIndex() []int { - return m.index[:len(m.index)-len(m.rel.Field.Index)] -} - -func (m *structTableModel) Kind() reflect.Kind { - return reflect.Struct -} - -func (m *structTableModel) Value() reflect.Value { - return m.strct -} - -func (m *structTableModel) Mount(host reflect.Value) { - m.strct = host.FieldByIndex(m.rel.Field.Index) - m.structInited = false -} - -func (m *structTableModel) initStruct() error { - if m.structInited { - return m.structInitErr - } - m.structInited = true - - switch m.strct.Kind() { - case reflect.Invalid: - m.structInitErr = errors.New("pg: Model(nil)") - return m.structInitErr - case reflect.Interface: - m.strct = m.strct.Elem() - } - - if m.strct.Kind() == reflect.Ptr { - if m.strct.IsNil() { - m.strct.Set(reflect.New(m.strct.Type().Elem())) - m.strct = m.strct.Elem() - } else { - m.strct = m.strct.Elem() - } - } - - m.mountJoins() - - return nil -} - -func (m *structTableModel) mountJoins() { - for i := range m.joins { - j := &m.joins[i] - switch j.Rel.Type { - case HasOneRelation, BelongsToRelation: - j.JoinModel.Mount(m.strct) - } - } -} - -func (structTableModel) Init() error { - return nil -} - -func (m *structTableModel) NewModel() ColumnScanner { - return m -} - -func (m *structTableModel) AddModel(_ ColumnScanner) error { - return nil -} - -func (m *structTableModel) AfterQuery(db DB) error { - if !m.table.HasFlag(AfterQueryHookFlag) { - return nil - } - return callAfterQueryHook(m.strct.Addr(), db) -} - -func (m *structTableModel) BeforeSelectQuery(db DB, q *Query) (*Query, error) { - if !m.table.HasFlag(BeforeSelectQueryHookFlag) { - return q, nil - } - return callBeforeSelectQueryHook(m.table.zeroStruct.Addr(), db, q) -} - -func (m *structTableModel) AfterSelect(db DB) error { - if !m.table.HasFlag(AfterSelectHookFlag) { - return nil - } - return callAfterSelectHook(m.strct.Addr(), db) -} - -func (m *structTableModel) BeforeInsert(db DB) error { - if !m.table.HasFlag(BeforeInsertHookFlag) { - return nil - } - return callBeforeInsertHook(m.strct.Addr(), db) -} - -func (m *structTableModel) AfterInsert(db DB) error { - if !m.table.HasFlag(AfterInsertHookFlag) { - return nil - } - return callAfterInsertHook(m.strct.Addr(), db) -} - -func (m *structTableModel) BeforeUpdate(db DB) error { - if !m.table.HasFlag(BeforeUpdateHookFlag) { - return nil - } - return callBeforeUpdateHook(m.strct.Addr(), db) -} - -func (m *structTableModel) AfterUpdate(db DB) error { - if !m.table.HasFlag(AfterUpdateHookFlag) { - return nil - } - return callAfterUpdateHook(m.strct.Addr(), db) -} - -func (m *structTableModel) BeforeDelete(db DB) error { - if !m.table.HasFlag(BeforeDeleteHookFlag) { - return nil - } - return callBeforeDeleteHook(m.strct.Addr(), db) -} - -func (m *structTableModel) AfterDelete(db DB) error { - if !m.table.HasFlag(AfterDeleteHookFlag) { - return nil - } - return callAfterDeleteHook(m.strct.Addr(), db) -} - -func (m *structTableModel) ScanColumn(colIdx int, colName string, b []byte) error { - ok, err := m.scanColumn(colIdx, colName, b) - if ok { - return err - } - if m.table.HasFlag(discardUnknownColumns) { - return nil - } - return fmt.Errorf("pg: can't find column=%s in %s (try discard_unknown_columns)", - colName, m.table) -} - -func (m *structTableModel) scanColumn( - colIdx int, colName string, b []byte, -) (bool, error) { - // Don't init nil struct when value is NULL. - if b == nil && - !m.structInited && - m.strct.Kind() == reflect.Ptr && - m.strct.IsNil() { - return true, nil - } - - err := m.initStruct() - if err != nil { - return true, err - } - - joinName, fieldName := splitColumn(colName) - if joinName != "" { - if join := m.GetJoin(joinName); join != nil { - return join.JoinModel.scanColumn(colIdx, fieldName, b) - } - if m.table.ModelName == joinName { - return m.scanColumn(colIdx, fieldName, b) - } - } - - field, ok := m.table.FieldsMap[colName] - if !ok { - return false, nil - } - - return true, field.ScanValue(m.strct, b) -} - -func (m *structTableModel) GetJoin(name string) *join { - for i := range m.joins { - j := &m.joins[i] - if j.Rel.Field.GoName == name || j.Rel.Field.SQLName == name { - return j - } - } - return nil -} - -func (m *structTableModel) GetJoins() []join { - return m.joins -} - -func (m *structTableModel) AddJoin(j join) *join { - m.joins = append(m.joins, j) - return &m.joins[len(m.joins)-1] -} - -func (m *structTableModel) Join(name string, apply func(*Query) (*Query, error)) *join { - return m.join(m.Value(), name, apply) -} - -func (m *structTableModel) join( - bind reflect.Value, name string, apply func(*Query) (*Query, error), -) *join { - path := strings.Split(name, ".") - index := make([]int, 0, len(path)) - - currJoin := join{ - BaseModel: m, - JoinModel: m, - } - var lastJoin *join - var hasColumnName bool - - for _, name := range path { - rel, ok := currJoin.JoinModel.Table().Relations[name] - if !ok { - hasColumnName = true - break - } - currJoin.Rel = rel - index = append(index, rel.Field.Index...) - - if j := currJoin.JoinModel.GetJoin(name); j != nil { - currJoin.BaseModel = j.BaseModel - currJoin.JoinModel = j.JoinModel - - lastJoin = j - } else { - model, err := newTableModelIndex(bind, index, rel) - if err != nil { - return nil - } - - currJoin.Parent = lastJoin - currJoin.BaseModel = currJoin.JoinModel - currJoin.JoinModel = model - - lastJoin = currJoin.BaseModel.AddJoin(currJoin) - } - } - - // No joins with such name. - if lastJoin == nil { - return nil - } - if apply != nil { - lastJoin.ApplyQuery = apply - } - - if hasColumnName { - column := path[len(path)-1] - if column == "_" { - if lastJoin.Columns == nil { - lastJoin.Columns = make([]string, 0) - } - } else { - lastJoin.Columns = append(lastJoin.Columns, column) - } - } - - return lastJoin -} - -func (m *structTableModel) setDeletedAt() { - field := m.table.FieldsMap["deleted_at"] - value := field.Value(m.strct) - if value.Kind() == reflect.Ptr { - now := time.Now() - value.Set(reflect.ValueOf(&now)) - } else { - value.Set(reflect.ValueOf(time.Now())) - } -} - -func splitColumn(s string) (string, string) { - ind := strings.Index(s, "__") - if ind == -1 { - return "", s - } - return s[:ind], s[ind+2:] -} diff --git a/vendor/github.com/go-pg/pg/orm/orm.go b/vendor/github.com/go-pg/pg/orm/orm.go deleted file mode 100644 index 02d2d3a..0000000 --- a/vendor/github.com/go-pg/pg/orm/orm.go +++ /dev/null @@ -1,46 +0,0 @@ -package orm - -import ( - "context" - "io" -) - -// ColumnScanner is used to scan column values. -type ColumnScanner interface { - // Scan assigns a column value from a row. - // - // An error should be returned if the value can not be stored - // without loss of information. - ScanColumn(colIdx int, colName string, b []byte) error -} - -type QueryAppender interface { - Copy() QueryAppender - Query() *Query - AppendQuery(dst []byte) ([]byte, error) -} - -type QueryFormatter interface { - FormatQuery(b []byte, query string, params ...interface{}) []byte -} - -// DB is a common interface for pg.DB and pg.Tx types. -type DB interface { - Model(model ...interface{}) *Query - Select(model interface{}) error - Insert(model ...interface{}) error - Update(model interface{}) error - Delete(model interface{}) error - ForceDelete(model interface{}) error - - Exec(query interface{}, params ...interface{}) (Result, error) - ExecOne(query interface{}, params ...interface{}) (Result, error) - Query(coll, query interface{}, params ...interface{}) (Result, error) - QueryOne(model, query interface{}, params ...interface{}) (Result, error) - - CopyFrom(r io.Reader, query interface{}, params ...interface{}) (Result, error) - CopyTo(w io.Writer, query interface{}, params ...interface{}) (Result, error) - - Context() context.Context - QueryFormatter -} diff --git a/vendor/github.com/go-pg/pg/orm/pager.go b/vendor/github.com/go-pg/pg/orm/pager.go deleted file mode 100644 index 10ec727..0000000 --- a/vendor/github.com/go-pg/pg/orm/pager.go +++ /dev/null @@ -1,111 +0,0 @@ -package orm - -import ( - "net/url" -) - -type Pager struct { - Limit int - Offset int - - // Default max limit is 1000. - MaxLimit int - // Default max offset is 1000000. - MaxOffset int - - stickyErr error -} - -func NewPager(values url.Values) *Pager { - p := new(Pager) - p.SetURLValues(values) - return p -} - -func (p *Pager) SetURLValues(urlValues url.Values) { - values := URLValues(urlValues) - - if values.Has("limit") { - limit, err := values.Int("limit") - if err != nil { - p.stickyErr = err - return - } - p.Limit = int(limit) - } - - if values.Has("page") { - page, err := values.Int("page") - if err != nil { - p.stickyErr = err - return - } - p.SetPage(int(page)) - } -} - -func (p *Pager) maxLimit() int { - if p.MaxLimit > 0 { - return p.MaxLimit - } - return 1000 -} - -func (p *Pager) maxOffset() int { - if p.MaxOffset > 0 { - return p.MaxOffset - } - return 1000000 -} - -func (p *Pager) GetLimit() int { - const defaultLimit = 100 - - if p.Limit < 0 { - return p.Limit - } - if p.Limit == 0 { - return defaultLimit - } - if p.Limit > p.maxLimit() { - return p.maxLimit() - } - return p.Limit -} - -func (p *Pager) GetOffset() int { - if p.Offset > p.maxOffset() { - return p.maxOffset() - } - return p.Offset -} - -func (p *Pager) SetPage(page int) { - if page < 1 { - page = 1 - } - p.Offset = (page - 1) * p.GetLimit() -} - -func (p *Pager) GetPage() int { - return (p.GetOffset() / p.GetLimit()) + 1 -} - -func (p *Pager) Paginate(q *Query) (*Query, error) { - if p == nil { - return q, nil - } - if p.stickyErr != nil { - return nil, p.stickyErr - } - - q = q.Limit(p.GetLimit()).Offset(p.GetOffset()) - return q, nil -} - -// Pagination is used with Query.Apply to set LIMIT and OFFSET from the URL values: -// - ?limit=10 - sets q.Limit(10), max limit is 1000. -// - ?page=5 - sets q.Offset((page - 1) * limit), max offset is 1000000. -func Pagination(values url.Values) func(*Query) (*Query, error) { - return NewPager(values).Paginate -} diff --git a/vendor/github.com/go-pg/pg/orm/query.go b/vendor/github.com/go-pg/pg/orm/query.go deleted file mode 100644 index b44bc7e..0000000 --- a/vendor/github.com/go-pg/pg/orm/query.go +++ /dev/null @@ -1,1302 +0,0 @@ -package orm - -import ( - "errors" - "fmt" - "io" - "reflect" - "strings" - "sync" - "time" - - "github.com/go-pg/pg/internal" - "github.com/go-pg/pg/types" -) - -type withQuery struct { - name string - query *Query -} - -type joinQuery struct { - join *queryParamsAppender - on []*condAppender -} - -func (q *joinQuery) AppendOn(app *condAppender) { - q.on = append(q.on, app) -} - -type Query struct { - db DB - stickyErr error - - model tableModel - ignoreModel bool - deleted bool - - with []withQuery - tables []FormatAppender - columns []FormatAppender - set []FormatAppender - modelValues map[string]*queryParamsAppender - where []sepFormatAppender - updWhere []sepFormatAppender - joins []*joinQuery - joinAppendOn func(app *condAppender) - group []FormatAppender - having []*queryParamsAppender - order []FormatAppender - onConflict *queryParamsAppender - returning []*queryParamsAppender - limit int - offset int - selFor *queryParamsAppender -} - -var _ queryAppender = (*Query)(nil) - -func NewQuery(db DB, model ...interface{}) *Query { - return (&Query{}).DB(db).Model(model...) -} - -// New returns new zero Query binded to the current db and model. -func (q *Query) New() *Query { - return &Query{ - db: q.db, - model: q.model, - ignoreModel: true, - deleted: q.deleted, - } -} - -func (q *Query) AppendQuery(b []byte) ([]byte, error) { - return selectQuery{q: q}.AppendQuery(b) -} - -// Copy returns copy of the Query. -func (q *Query) Copy() *Query { - var modelValues map[string]*queryParamsAppender - if len(q.modelValues) > 0 { - modelValues = make(map[string]*queryParamsAppender, len(q.modelValues)) - for k, v := range q.modelValues { - modelValues[k] = v - } - } - - copy := &Query{ - db: q.db, - stickyErr: q.stickyErr, - - model: q.model, - ignoreModel: q.ignoreModel, - deleted: q.deleted, - - tables: q.tables[:len(q.tables):len(q.tables)], - columns: q.columns[:len(q.columns):len(q.columns)], - set: q.set[:len(q.set):len(q.set)], - modelValues: modelValues, - where: q.where[:len(q.where):len(q.where)], - updWhere: q.updWhere[:len(q.updWhere):len(q.updWhere)], - joins: q.joins[:len(q.joins):len(q.joins)], - group: q.group[:len(q.group):len(q.group)], - having: q.having[:len(q.having):len(q.having)], - order: q.order[:len(q.order):len(q.order)], - onConflict: q.onConflict, - returning: q.returning[:len(q.returning):len(q.returning)], - limit: q.limit, - offset: q.offset, - selFor: q.selFor, - } - for _, with := range q.with { - copy = copy.With(with.name, with.query.Copy()) - } - return copy -} - -func (q *Query) err(err error) *Query { - if q.stickyErr == nil { - q.stickyErr = err - } - return q -} - -func (q *Query) DB(db DB) *Query { - q.db = db - for _, with := range q.with { - with.query.db = db - } - return q -} - -func (q *Query) Model(model ...interface{}) *Query { - var err error - switch l := len(model); { - case l == 0: - q.model = nil - case l == 1: - q.model, err = newTableModel(model[0]) - case l > 1: - q.model, err = newTableModel(&model) - } - if err != nil { - q = q.err(err) - } - if q.ignoreModel { - q.ignoreModel = false - } - return q -} - -func (q *Query) softDelete() bool { - if q.model != nil { - return q.model.Table().HasFlag(softDelete) - } - return false -} - -// Deleted adds `WHERE deleted_at IS NOT NULL` clause for soft deleted models. -func (q *Query) Deleted() *Query { - if q.model != nil { - if err := q.model.Table().mustSoftDelete(); err != nil { - return q.err(err) - } - } - q.deleted = true - return q -} - -// With adds subq as common table expression with the given name. -func (q *Query) With(name string, subq *Query) *Query { - q.with = append(q.with, withQuery{name, subq}) - return q -} - -// WrapWith creates new Query and adds to it current query as -// common table expression with the given name. -func (q *Query) WrapWith(name string) *Query { - wrapper := q.New() - wrapper.with = q.with - q.with = nil - wrapper = wrapper.With(name, q) - return wrapper -} - -func (q *Query) Table(tables ...string) *Query { - for _, table := range tables { - q.tables = append(q.tables, fieldAppender{table}) - } - return q -} - -func (q *Query) TableExpr(expr string, params ...interface{}) *Query { - q.tables = append(q.tables, &queryParamsAppender{expr, params}) - return q -} - -// Column adds a column to the Query quoting it according to PostgreSQL rules. Does not expand params like ?TableAlias etc. -// ColumnExpr can be used to bypass quoting restriction or for params expansion. Column name can be: -// - column_name, -// - table_alias.column_name, -// - table_alias.*. -func (q *Query) Column(columns ...string) *Query { - for _, column := range columns { - if column == "_" { - if q.columns == nil { - q.columns = make([]FormatAppender, 0) - } - continue - } - - if q.model != nil { - if j := q.model.Join(column, nil); j != nil { - continue - } - } - - q.columns = append(q.columns, fieldAppender{column}) - } - return q -} - -// ColumnExpr adds column expression to the Query. -func (q *Query) ColumnExpr(expr string, params ...interface{}) *Query { - q.columns = append(q.columns, &queryParamsAppender{expr, params}) - return q -} - -// ExcludeColumn excludes a column from the list of to be selected columns. -func (q *Query) ExcludeColumn(columns ...string) *Query { - if q.columns == nil { - for _, f := range q.model.Table().Fields { - q.columns = append(q.columns, fieldAppender{f.SQLName}) - } - } - - for _, col := range columns { - if !q.excludeColumn(col) { - return q.err(fmt.Errorf("pg: can't find column=%q", col)) - } - } - return q -} - -func (q *Query) excludeColumn(column string) bool { - for i := 0; i < len(q.columns); i++ { - app, ok := q.columns[i].(fieldAppender) - if ok && app.field == column { - q.columns = append(q.columns[:i], q.columns[i+1:]...) - return true - } - } - return false -} - -func (q *Query) getFields() ([]*Field, error) { - return q._getFields(false) -} - -func (q *Query) getDataFields() ([]*Field, error) { - return q._getFields(true) -} - -func (q *Query) _getFields(omitPKs bool) ([]*Field, error) { - table := q.model.Table() - var columns []*Field - for _, col := range q.columns { - f, ok := col.(fieldAppender) - if !ok { - continue - } - - field, err := table.GetField(f.field) - if err != nil { - return nil, err - } - - if omitPKs && field.HasFlag(PrimaryKeyFlag) { - continue - } - - columns = append(columns, field) - } - return columns, nil -} - -// Relation adds a relation to the query. Relation name can be: -// - RelationName to select all columns, -// - RelationName.column_name, -// - RelationName._ to join relation without selecting relation columns. -func (q *Query) Relation(name string, apply ...func(*Query) (*Query, error)) *Query { - var fn func(*Query) (*Query, error) - if len(apply) == 1 { - fn = apply[0] - } else if len(apply) > 1 { - panic("only one apply function is supported") - } - - join := q.model.Join(name, fn) - if join == nil { - return q.err(fmt.Errorf("%s does not have relation=%q", - q.model.Table(), name)) - } - - if fn == nil { - return q - } - - switch join.Rel.Type { - case HasOneRelation, BelongsToRelation: - q.joinAppendOn = join.AppendOn - return q.Apply(fn) - default: - q.joinAppendOn = nil - return q - } -} - -func (q *Query) Set(set string, params ...interface{}) *Query { - q.set = append(q.set, &queryParamsAppender{set, params}) - return q -} - -// Value overwrites model value for the column in INSERT and UPDATE queries. -func (q *Query) Value(column string, value string, params ...interface{}) *Query { - if !q.hasModel() { - q.err(errors.New("pg: Model(nil)")) - return q - } - - table := q.model.Table() - if _, ok := table.FieldsMap[column]; !ok { - q.err(fmt.Errorf("%s does not have column=%q", table, column)) - return q - } - - if q.modelValues == nil { - q.modelValues = make(map[string]*queryParamsAppender) - } - q.modelValues[column] = &queryParamsAppender{value, params} - return q -} - -func (q *Query) Where(condition string, params ...interface{}) *Query { - q.addWhere(&condAppender{ - sep: " AND ", - cond: condition, - params: params, - }) - return q -} - -func (q *Query) WhereOr(condition string, params ...interface{}) *Query { - q.addWhere(&condAppender{ - sep: " OR ", - cond: condition, - params: params, - }) - return q -} - -// WhereGroup encloses conditions added in the function in parentheses. -// -// q.Where("TRUE"). -// WhereGroup(func(q *orm.Query) (*orm.Query, error)) { -// q = q.WhereOr("FALSE").WhereOr("TRUE"). -// return q, nil -// }) -// -// generates -// -// WHERE TRUE AND (FALSE OR TRUE) -func (q *Query) WhereGroup(fn func(*Query) (*Query, error)) *Query { - return q.whereGroup(" AND ", fn) -} - -// WhereOrGroup encloses conditions added in the function in parentheses. -// -// q.Where("TRUE"). -// WhereOrGroup(func(q *orm.Query) (*orm.Query, error)) { -// q = q.Where("FALSE").Where("TRUE"). -// return q, nil -// }) -// -// generates -// -// WHERE TRUE OR (FALSE AND TRUE) -func (q *Query) WhereOrGroup(fn func(*Query) (*Query, error)) *Query { - return q.whereGroup(" OR ", fn) -} - -func (q *Query) whereGroup(conj string, fn func(*Query) (*Query, error)) *Query { - saved := q.where - q.where = nil - - newq, err := fn(q) - if err != nil { - q.err(err) - return q - } - - if len(newq.where) == 0 { - newq.where = saved - return newq - } - - f := &condGroupAppender{ - sep: conj, - cond: newq.where, - } - newq.where = saved - newq.addWhere(f) - - return newq -} - -// WhereIn is a shortcut for Where and pg.In to work with IN operator: -// -// WhereIn("id IN (?)", 1, 2, 3) -func (q *Query) WhereIn(where string, values ...interface{}) *Query { - return q.Where(where, types.InSlice(values)) -} - -func (q *Query) addWhere(f sepFormatAppender) { - if q.onConflictDoUpdate() { - q.updWhere = append(q.updWhere, f) - } else { - q.where = append(q.where, f) - } -} - -// WherePK adds condition based on the model primary key. -// Typically it is the same as: -// -// Where("id = ?id") -func (q *Query) WherePK() *Query { - if !q.hasModel() { - q.err(errors.New("pg: Model(nil)")) - return q - } - if q.model.Kind() == reflect.Slice { - q.err(errors.New("pg: WherePK requires struct Model")) - return q - } - if err := q.model.Table().checkPKs(); err != nil { - q.err(err) - return q - } - q.where = append(q.where, wherePKQuery{q}) - return q -} - -func (q *Query) Join(join string, params ...interface{}) *Query { - j := &joinQuery{ - join: &queryParamsAppender{join, params}, - } - q.joins = append(q.joins, j) - q.joinAppendOn = j.AppendOn - return q -} - -// JoinOn appends join condition to the last join. -func (q *Query) JoinOn(condition string, params ...interface{}) *Query { - if q.joinAppendOn == nil { - q.err(errors.New("pg: no joins to apply JoinOn")) - return q - } - q.joinAppendOn(&condAppender{ - sep: " AND ", - cond: condition, - params: params, - }) - return q -} - -func (q *Query) JoinOnOr(condition string, params ...interface{}) *Query { - if q.joinAppendOn == nil { - q.err(errors.New("pg: no joins to apply JoinOn")) - return q - } - q.joinAppendOn(&condAppender{ - sep: " OR ", - cond: condition, - params: params, - }) - return q -} - -func (q *Query) Group(columns ...string) *Query { - for _, column := range columns { - q.group = append(q.group, fieldAppender{column}) - } - return q -} - -func (q *Query) GroupExpr(group string, params ...interface{}) *Query { - q.group = append(q.group, &queryParamsAppender{group, params}) - return q -} - -func (q *Query) Having(having string, params ...interface{}) *Query { - q.having = append(q.having, &queryParamsAppender{having, params}) - return q -} - -// Order adds sort order to the Query quoting column name. Does not expand params like ?TableAlias etc. -// OrderExpr can be used to bypass quoting restriction or for params expansion. -func (q *Query) Order(orders ...string) *Query { -loop: - for _, order := range orders { - if order == "" { - continue - } - ind := strings.Index(order, " ") - if ind != -1 { - field := order[:ind] - sort := order[ind+1:] - switch internal.UpperString(sort) { - case "ASC", "DESC", "ASC NULLS FIRST", "DESC NULLS FIRST", - "ASC NULLS LAST", "DESC NULLS LAST": - q = q.OrderExpr("? ?", types.F(field), types.Q(sort)) - continue loop - } - } - - q.order = append(q.order, fieldAppender{order}) - } - return q -} - -// Order adds sort order to the Query. -func (q *Query) OrderExpr(order string, params ...interface{}) *Query { - if order != "" { - q.order = append(q.order, &queryParamsAppender{order, params}) - } - return q -} - -func (q *Query) Limit(n int) *Query { - q.limit = n - return q -} - -func (q *Query) Offset(n int) *Query { - q.offset = n - return q -} - -func (q *Query) OnConflict(s string, params ...interface{}) *Query { - q.onConflict = &queryParamsAppender{s, params} - return q -} - -func (q *Query) onConflictDoUpdate() bool { - return q.onConflict != nil && - strings.HasSuffix(internal.UpperString(q.onConflict.query), "DO UPDATE") -} - -func (q *Query) Returning(s string, params ...interface{}) *Query { - q.returning = append(q.returning, &queryParamsAppender{s, params}) - return q -} - -func (q *Query) For(s string, params ...interface{}) *Query { - q.selFor = &queryParamsAppender{s, params} - return q -} - -// Apply calls the fn passing the Query as an argument. -func (q *Query) Apply(fn func(*Query) (*Query, error)) *Query { - qq, err := fn(q) - if err != nil { - q.err(err) - return q - } - return qq -} - -// Count returns number of rows matching the query using count aggregate function. -func (q *Query) Count() (int, error) { - if q.stickyErr != nil { - return 0, q.stickyErr - } - - var count int - _, err := q.db.QueryOne( - Scan(&count), - q.countSelectQuery("count(*)"), - q.model, - ) - return count, err -} - -func (q *Query) countSelectQuery(column string) selectQuery { - return selectQuery{ - q: q, - count: column, - } -} - -// First sorts rows by primary key and selects the first row. -// It is a shortcut for: -// -// q.OrderExpr("id ASC").Limit(1) -func (q *Query) First() error { - err := q.model.Table().checkPKs() - if err != nil { - return err - } - - b := appendColumns(nil, q.model.Table().Alias, q.model.Table().PKs) - return q.OrderExpr(internal.BytesToString(b)).Limit(1).Select() -} - -// Last sorts rows by primary key and selects the last row. -// It is a shortcut for: -// -// q.OrderExpr("id DESC").Limit(1) -func (q *Query) Last() error { - err := q.model.Table().checkPKs() - if err != nil { - return err - } - - // TODO: fix for multi columns - b := appendColumns(nil, q.model.Table().Alias, q.model.Table().PKs) - b = append(b, " DESC"...) - return q.OrderExpr(internal.BytesToString(b)).Limit(1).Select() -} - -// Select selects the model. -func (q *Query) Select(values ...interface{}) error { - if q.stickyErr != nil { - return q.stickyErr - } - - model, err := q.newModel(values...) - if err != nil { - return err - } - - q, err = model.BeforeSelectQuery(q.db, q) - if err != nil { - return err - } - - res, err := q.query(model, selectQuery{q: q}) - if err != nil { - return err - } - - if res.RowsReturned() > 0 { - if q.model != nil { - if err := q.selectJoins(q.model.GetJoins()); err != nil { - return err - } - } - if err := model.AfterSelect(q.db); err != nil { - return err - } - } - - return nil -} - -func (q *Query) newModel(values ...interface{}) (Model, error) { - if len(values) > 0 { - return NewModel(values...) - } - return q.model, nil -} - -func (q *Query) query(model Model, query interface{}) (Result, error) { - if _, ok := model.(useQueryOne); ok { - return q.db.QueryOne(model, query, q.model) - } - return q.db.Query(model, query, q.model) -} - -// SelectAndCount runs Select and Count in two goroutines, -// waits for them to finish and returns the result. If query limit is -1 -// it does not select any data and only counts the results. -func (q *Query) SelectAndCount(values ...interface{}) (count int, firstErr error) { - if q.stickyErr != nil { - return 0, q.stickyErr - } - - var wg sync.WaitGroup - var mu sync.Mutex - - if q.limit >= 0 { - wg.Add(1) - go func() { - defer wg.Done() - err := q.Select(values...) - if err != nil { - mu.Lock() - if firstErr == nil { - firstErr = err - } - mu.Unlock() - } - }() - } - - wg.Add(1) - go func() { - defer wg.Done() - var err error - count, err = q.Count() - if err != nil { - mu.Lock() - if firstErr == nil { - firstErr = err - } - mu.Unlock() - } - }() - - wg.Wait() - return count, firstErr -} - -// SelectAndCountEstimate runs Select and CountEstimate in two goroutines, -// waits for them to finish and returns the result. If query limit is -1 -// it does not select any data and only counts the results. -func (q *Query) SelectAndCountEstimate(threshold int, values ...interface{}) (count int, firstErr error) { - if q.stickyErr != nil { - return 0, q.stickyErr - } - - var wg sync.WaitGroup - var mu sync.Mutex - - if q.limit >= 0 { - wg.Add(1) - go func() { - defer wg.Done() - err := q.Select(values...) - if err != nil { - mu.Lock() - if firstErr == nil { - firstErr = err - } - mu.Unlock() - } - }() - } - - wg.Add(1) - go func() { - defer wg.Done() - var err error - count, err = q.CountEstimate(threshold) - if err != nil { - mu.Lock() - if firstErr == nil { - firstErr = err - } - mu.Unlock() - } - }() - - wg.Wait() - return count, firstErr -} - -// ForEach calls the function for each row returned by the query -// without loading all rows into the memory. -// Function accepts a struct, pointer to a struct, orm.Model, -// or values for columns in a row. Function must return an error. -func (q *Query) ForEach(fn interface{}) error { - m := newFuncModel(fn) - return q.Select(m) -} - -func (q *Query) forEachHasOneJoin(fn func(*join)) { - if q.model == nil { - return - } - q._forEachHasOneJoin(fn, q.model.GetJoins()) -} - -func (q *Query) _forEachHasOneJoin(fn func(*join), joins []join) { - for i := range joins { - j := &joins[i] - switch j.Rel.Type { - case HasOneRelation, BelongsToRelation: - fn(j) - q._forEachHasOneJoin(fn, j.JoinModel.GetJoins()) - } - } -} - -func (q *Query) selectJoins(joins []join) error { - var err error - for i := range joins { - j := &joins[i] - if j.Rel.Type == HasOneRelation || j.Rel.Type == BelongsToRelation { - err = q.selectJoins(j.JoinModel.GetJoins()) - } else { - err = j.Select(q.New()) - } - if err != nil { - return err - } - } - return nil -} - -// Insert inserts the model. -func (q *Query) Insert(values ...interface{}) (Result, error) { - if q.stickyErr != nil { - return nil, q.stickyErr - } - - model, err := q.newModel(values...) - if err != nil { - return nil, err - } - - if q.model != nil && q.model.Table().HasFlag(BeforeInsertHookFlag) { - err = q.model.BeforeInsert(q.db) - if err != nil { - return nil, err - } - } - - query := &insertQuery{q: q} - res, err := q.returningQuery(model, query) - if err != nil { - return nil, err - } - - if q.model != nil { - err = q.model.AfterInsert(q.db) - if err != nil { - return nil, err - } - } - - return res, nil -} - -// SelectOrInsert selects the model inserting one if it does not exist. -// It returns true when model was inserted. -func (q *Query) SelectOrInsert(values ...interface{}) (inserted bool, _ error) { - if q.stickyErr != nil { - return false, q.stickyErr - } - - insertq := q - if len(insertq.columns) > 0 { - insertq = insertq.Copy() - insertq.columns = nil - } - - var insertErr error - for i := 0; i < 5; i++ { - if i >= 2 { - time.Sleep(internal.RetryBackoff(i-2, 250*time.Millisecond, 5*time.Second)) - } - - err := q.Select(values...) - if err == nil { - return false, nil - } - if err != internal.ErrNoRows { - return false, err - } - - res, err := insertq.Insert(values...) - if err != nil { - insertErr = err - if err == internal.ErrNoRows { - continue - } - if pgErr, ok := err.(internal.PGError); ok { - if pgErr.IntegrityViolation() { - continue - } - if pgErr.Field('C') == "55000" { - // Retry on "#55000 attempted to delete invisible tuple". - continue - } - } - return false, err - } - if res.RowsAffected() == 1 { - return true, nil - } - } - - err := fmt.Errorf( - "pg: SelectOrInsert: select returns no rows (insert fails with err=%q)", - insertErr, - ) - return false, err -} - -// Update updates the model. -func (q *Query) Update(scan ...interface{}) (Result, error) { - return q.update(scan, false) -} - -// Update updates the model omitting null columns. -func (q *Query) UpdateNotNull(scan ...interface{}) (Result, error) { - return q.update(scan, true) -} - -func (q *Query) update(scan []interface{}, omitZero bool) (Result, error) { - if q.stickyErr != nil { - return nil, q.stickyErr - } - - model, err := q.newModel(scan...) - if err != nil { - return nil, err - } - - if q.model != nil { - err = q.model.BeforeUpdate(q.db) - if err != nil { - return nil, err - } - } - - query := updateQuery{q: q, omitZero: omitZero} - res, err := q.returningQuery(model, query) - if err != nil { - return nil, err - } - - if q.model != nil { - err = q.model.AfterUpdate(q.db) - if err != nil { - return nil, err - } - } - - return res, nil -} - -func (q *Query) returningQuery(model Model, query interface{}) (Result, error) { - if len(q.returning) == 0 { - return q.db.Query(model, query, q.model) - } - if _, ok := model.(useQueryOne); ok { - return q.db.QueryOne(model, query, q.model) - } - return q.db.Query(model, query, q.model) -} - -// Delete deletes the model. When model has deleted_at column the row -// is soft deleted instead. -func (q *Query) Delete(values ...interface{}) (Result, error) { - if q.softDelete() { - q.model.setDeletedAt() - columns := q.columns - q.columns = nil - res, err := q.Column("deleted_at").Update(values...) - q.columns = columns - return res, err - } - return q.ForceDelete(values...) -} - -// Delete forces delete of the model with deleted_at column. -func (q *Query) ForceDelete(values ...interface{}) (Result, error) { - if q.stickyErr != nil { - return nil, q.stickyErr - } - if q.model == nil { - return nil, errors.New("pg: Model(nil)") - } - q.deleted = true - - model, err := q.newModel(values...) - if err != nil { - return nil, err - } - - if q.model != nil { - err = q.model.BeforeDelete(q.db) - if err != nil { - return nil, err - } - } - - res, err := q.returningQuery(model, deleteQuery{q}) - if err != nil { - return nil, err - } - - if q.model != nil { - err = q.model.AfterDelete(q.db) - if err != nil { - return nil, err - } - } - - return res, nil -} - -// Exec is an alias for DB.Exec. -func (q *Query) Exec(query interface{}, params ...interface{}) (Result, error) { - params = append(params, q.model) - return q.db.Exec(query, params...) -} - -// ExecOne is an alias for DB.ExecOne. -func (q *Query) ExecOne(query interface{}, params ...interface{}) (Result, error) { - params = append(params, q.model) - return q.db.ExecOne(query, params...) -} - -// Query is an alias for DB.Query. -func (q *Query) Query(model, query interface{}, params ...interface{}) (Result, error) { - params = append(params, q.model) - return q.db.Query(model, query, params...) -} - -// QueryOne is an alias for DB.QueryOne. -func (q *Query) QueryOne(model, query interface{}, params ...interface{}) (Result, error) { - params = append(params, q.model) - return q.db.QueryOne(model, query, params...) -} - -// CopyFrom is an alias from DB.CopyFrom. -func (q *Query) CopyFrom(r io.Reader, query interface{}, params ...interface{}) (Result, error) { - params = append(params, q.model) - return q.db.CopyFrom(r, query, params...) -} - -// CopyTo is an alias from DB.CopyTo. -func (q *Query) CopyTo(w io.Writer, query interface{}, params ...interface{}) (Result, error) { - params = append(params, q.model) - return q.db.CopyTo(w, query, params...) -} - -func (q *Query) FormatQuery(b []byte, query string, params ...interface{}) []byte { - params = append(params, q.model) - if q.db != nil { - return q.db.FormatQuery(b, query, params...) - } - return formatter.Append(b, query, params...) -} - -// Exists returns true or false depending if there are any rows matching the query. -func (q *Query) Exists() (bool, error) { - cp := q.Copy() // copy to not change original query - cp.columns = []FormatAppender{fieldAppender{"1"}} - cp.order = nil - cp.limit = 1 - res, err := q.db.Exec(selectQuery{q: q}) - if err != nil { - return false, err - } - return res.RowsAffected() > 0, nil -} - -func (q *Query) hasModel() bool { - return !q.ignoreModel && q.model != nil -} - -func (q *Query) hasTables() bool { - return q.hasModel() || len(q.tables) > 0 -} - -func (q *Query) appendTableName(b []byte) []byte { - return q.FormatQuery(b, string(q.model.Table().Name)) -} - -func (q *Query) appendTableNameWithAlias(b []byte) []byte { - b = q.appendTableName(b) - b = append(b, " AS "...) - b = append(b, q.model.Table().Alias...) - return b -} - -func (q *Query) appendFirstTable(b []byte) []byte { - if q.hasModel() { - return q.appendTableName(b) - } - if len(q.tables) > 0 { - b = q.tables[0].AppendFormat(b, q) - } - return b -} - -func (q *Query) appendFirstTableWithAlias(b []byte) []byte { - if q.hasModel() { - return q.appendTableNameWithAlias(b) - } - if len(q.tables) > 0 { - b = q.tables[0].AppendFormat(b, q) - } - return b -} - -func (q *Query) hasMultiTables() bool { - if q.hasModel() { - return len(q.tables) > 0 - } - return len(q.tables) > 1 -} - -func (q *Query) appendOtherTables(b []byte) []byte { - tables := q.tables - if !q.hasModel() { - tables = tables[1:] - } - for i, f := range tables { - if i > 0 { - b = append(b, ", "...) - } - b = f.AppendFormat(b, q) - } - return b -} - -func (q *Query) appendColumns(b []byte) []byte { - for i, f := range q.columns { - if i > 0 { - b = append(b, ", "...) - } - b = f.AppendFormat(b, q) - } - return b -} - -func (q *Query) hasWhere() bool { - return len(q.where) > 0 || q.softDelete() -} - -func (q *Query) mustAppendWhere(b []byte) ([]byte, error) { - if q.hasWhere() { - b = q.appendWhere(b) - return b, nil - } - - if q.model == nil { - return nil, errors.New("pg: Model(nil)") - } - err := errors.New( - "pg: Update and Delete queries require Where clause (try WherePK)") - return nil, err -} - -func (q *Query) appendWhere(b []byte) []byte { - b = q._appendWhere(b, q.where) - if q.softDelete() { - if len(q.where) > 0 { - b = append(b, " AND "...) - } - b = append(b, q.model.Table().Alias...) - b = q.appendSoftDelete(b) - } - return b -} - -func (q *Query) appendSoftDelete(b []byte) []byte { - if q.deleted { - b = append(b, ".deleted_at IS NOT NULL"...) - } else { - b = append(b, ".deleted_at IS NULL"...) - } - return b -} - -func (q *Query) appendUpdWhere(b []byte) []byte { - return q._appendWhere(b, q.updWhere) -} - -func (q *Query) _appendWhere(b []byte, where []sepFormatAppender) []byte { - for i, f := range where { - if i > 0 { - b = f.AppendSep(b) - } - b = f.AppendFormat(b, q) - } - return b -} - -func (q *Query) appendSet(b []byte) []byte { - b = append(b, " SET "...) - for i, f := range q.set { - if i > 0 { - b = append(b, ", "...) - } - b = f.AppendFormat(b, q) - } - return b -} - -func (q *Query) appendReturning(b []byte) []byte { - b = append(b, " RETURNING "...) - for i, f := range q.returning { - if i > 0 { - b = append(b, ", "...) - } - b = f.AppendFormat(b, q) - } - return b -} - -func (q *Query) appendWith(b []byte) ([]byte, error) { - var err error - b = append(b, "WITH "...) - for i, with := range q.with { - if i > 0 { - b = append(b, ", "...) - } - b = types.AppendField(b, with.name, 1) - b = append(b, " AS ("...) - - b, err = selectQuery{q: with.query}.AppendQuery(b) - if err != nil { - return nil, err - } - - b = append(b, ')') - } - b = append(b, ' ') - return b, nil -} - -func (q *Query) isSliceModel() bool { - if !q.hasModel() { - return false - } - return q.model.Kind() == reflect.Slice && q.model.Value().Len() > 0 -} - -//------------------------------------------------------------------------------ - -type wherePKQuery struct { - *Query -} - -func (wherePKQuery) AppendSep(b []byte) []byte { - return append(b, " AND "...) -} - -func (q wherePKQuery) AppendFormat(b []byte, f QueryFormatter) []byte { - table := q.model.Table() - value := q.model.Value() - return appendColumnAndValue(b, value, table.Alias, table.PKs) -} - -func appendColumnAndValue(b []byte, v reflect.Value, alias types.Q, fields []*Field) []byte { - for i, f := range fields { - if i > 0 { - b = append(b, " AND "...) - } - b = append(b, alias...) - b = append(b, '.') - b = append(b, f.Column...) - b = append(b, " = "...) - b = f.AppendValue(b, v, 1) - } - return b -} - -func appendColumnAndSliceValue(b []byte, slice reflect.Value, alias types.Q, fields []*Field) []byte { - if slice.Len() == 0 { - return append(b, "1 = 2"...) - } - - if len(fields) > 1 { - b = append(b, '(') - } - b = appendColumns(b, alias, fields) - if len(fields) > 1 { - b = append(b, ')') - } - - b = append(b, " IN ("...) - - for i := 0; i < slice.Len(); i++ { - if i > 0 { - b = append(b, ", "...) - } - - el := indirect(slice.Index(i)) - - if len(fields) > 1 { - b = append(b, '(') - } - for i, f := range fields { - if i > 0 { - b = append(b, ", "...) - } - b = f.AppendValue(b, el, 1) - } - if len(fields) > 1 { - b = append(b, ')') - } - } - b = append(b, ')') - - return b -} diff --git a/vendor/github.com/go-pg/pg/orm/relation.go b/vendor/github.com/go-pg/pg/orm/relation.go deleted file mode 100644 index 720e6ad..0000000 --- a/vendor/github.com/go-pg/pg/orm/relation.go +++ /dev/null @@ -1,32 +0,0 @@ -package orm - -import ( - "fmt" - - "github.com/go-pg/pg/types" -) - -const ( - HasOneRelation = 1 << iota - BelongsToRelation - HasManyRelation - Many2ManyRelation -) - -type Relation struct { - Type int - Field *Field - JoinTable *Table - FKs []*Field - Polymorphic *Field - FKValues []*Field - - M2MTableName types.Q - M2MTableAlias types.Q - BaseFKs []string - JoinFKs []string -} - -func (r *Relation) String() string { - return fmt.Sprintf("relation=%s", r.Field.GoName) -} diff --git a/vendor/github.com/go-pg/pg/orm/result.go b/vendor/github.com/go-pg/pg/orm/result.go deleted file mode 100644 index 6f81245..0000000 --- a/vendor/github.com/go-pg/pg/orm/result.go +++ /dev/null @@ -1,14 +0,0 @@ -package orm - -// A Result summarizes an executed SQL command. -type Result interface { - Model() Model - - // RowsAffected returns the number of rows affected by SELECT, INSERT, UPDATE, - // or DELETE queries. It returns -1 if query can't possibly affect any rows, - // e.g. in case of CREATE or SHOW queries. - RowsAffected() int - - // RowsReturned returns the number of rows returned by the query. - RowsReturned() int -} diff --git a/vendor/github.com/go-pg/pg/orm/select.go b/vendor/github.com/go-pg/pg/orm/select.go deleted file mode 100644 index faa15e3..0000000 --- a/vendor/github.com/go-pg/pg/orm/select.go +++ /dev/null @@ -1,201 +0,0 @@ -package orm - -import ( - "strconv" - "strings" -) - -func Select(db DB, model interface{}) error { - return NewQuery(db, model).WherePK().Select() -} - -type selectQuery struct { - q *Query - - count string -} - -var _ QueryAppender = (*selectQuery)(nil) - -func (q selectQuery) Copy() QueryAppender { - return selectQuery{ - q: q.q.Copy(), - count: q.count, - } -} - -func (q selectQuery) Query() *Query { - return q.q -} - -func (q selectQuery) AppendQuery(b []byte) ([]byte, error) { - if q.q.stickyErr != nil { - return nil, q.q.stickyErr - } - - var err error - - cteCount := q.count != "" && (len(q.q.group) > 0 || q.isDistinct()) - if cteCount { - b = append(b, `WITH "_count_wrapper" AS (`...) - } - - if len(q.q.with) > 0 { - b, err = q.q.appendWith(b) - if err != nil { - return nil, err - } - } - - b = append(b, "SELECT "...) - if q.count != "" && !cteCount { - b = append(b, q.count...) - } else { - b = q.appendColumns(b) - } - - if q.q.hasTables() { - b = append(b, " FROM "...) - b = q.appendTables(b) - } - - q.q.forEachHasOneJoin(func(j *join) { - b = append(b, ' ') - b = j.appendHasOneJoin(q.q, b) - }) - if len(q.q.joins) > 0 { - for _, j := range q.q.joins { - b = append(b, ' ') - b = j.join.AppendFormat(b, q.q) - if len(j.on) > 0 { - b = append(b, " ON "...) - } - for i, on := range j.on { - if i > 0 { - b = on.AppendSep(b) - } - b = on.AppendFormat(b, q.q) - } - } - } - - if q.q.hasWhere() { - b = append(b, " WHERE "...) - b = q.q.appendWhere(b) - } - - if len(q.q.group) > 0 { - b = append(b, " GROUP BY "...) - for i, f := range q.q.group { - if i > 0 { - b = append(b, ", "...) - } - b = f.AppendFormat(b, q.q) - } - } - - if len(q.q.having) > 0 { - b = append(b, " HAVING "...) - for i, f := range q.q.having { - if i > 0 { - b = append(b, " AND "...) - } - b = append(b, '(') - b = f.AppendFormat(b, q.q) - b = append(b, ')') - } - } - - if q.count == "" { - if len(q.q.order) > 0 { - b = append(b, " ORDER BY "...) - for i, f := range q.q.order { - if i > 0 { - b = append(b, ", "...) - } - b = f.AppendFormat(b, q.q) - } - } - - if q.q.limit != 0 { - b = append(b, " LIMIT "...) - b = strconv.AppendInt(b, int64(q.q.limit), 10) - } - - if q.q.offset != 0 { - b = append(b, " OFFSET "...) - b = strconv.AppendInt(b, int64(q.q.offset), 10) - } - - if q.q.selFor != nil { - b = append(b, " FOR "...) - b = q.q.selFor.AppendFormat(b, q.q) - } - } else if cteCount { - b = append(b, `) SELECT `...) - b = append(b, q.count...) - b = append(b, ` FROM "_count_wrapper"`...) - } - - return b, nil -} - -func (q selectQuery) appendColumns(b []byte) []byte { - start := len(b) - - if q.q.columns != nil { - b = q.q.appendColumns(b) - } else if q.q.hasModel() { - table := q.q.model.Table() - b = appendColumns(b, table.Alias, table.Fields) - } else { - b = append(b, '*') - } - - q.q.forEachHasOneJoin(func(j *join) { - if len(b) != start { - b = append(b, ", "...) - start = len(b) - } - - b = j.appendHasOneColumns(b) - - if len(b) == start { - b = b[:len(b)-2] - } - }) - - return b -} - -func (q selectQuery) isDistinct() bool { - for _, column := range q.q.columns { - column, ok := column.(*queryParamsAppender) - if ok { - if strings.Contains(column.query, "DISTINCT") || - strings.Contains(column.query, "distinct") { - return true - } - } - } - return false -} - -func (q selectQuery) appendTables(b []byte) []byte { - if q.q.hasModel() { - b = q.q.FormatQuery(b, string(q.q.model.Table().NameForSelects)) - b = append(b, " AS "...) - b = append(b, q.q.model.Table().Alias...) - - if len(q.q.tables) > 0 { - b = append(b, ", "...) - } - } - for i, f := range q.q.tables { - if i > 0 { - b = append(b, ", "...) - } - b = f.AppendFormat(b, q.q) - } - return b -} diff --git a/vendor/github.com/go-pg/pg/orm/table.go b/vendor/github.com/go-pg/pg/orm/table.go deleted file mode 100644 index cef7825..0000000 --- a/vendor/github.com/go-pg/pg/orm/table.go +++ /dev/null @@ -1,891 +0,0 @@ -package orm - -import ( - "bytes" - "database/sql" - "encoding/json" - "fmt" - "net" - "reflect" - "strings" - "time" - - "github.com/go-pg/pg/internal" - "github.com/go-pg/pg/types" -) - -const ( - AfterQueryHookFlag = uint16(1) << iota - BeforeSelectQueryHookFlag - AfterSelectHookFlag - BeforeInsertHookFlag - AfterInsertHookFlag - BeforeUpdateHookFlag - AfterUpdateHookFlag - BeforeDeleteHookFlag - AfterDeleteHookFlag - discardUnknownColumns - softDelete -) - -var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() -var ipType = reflect.TypeOf((*net.IP)(nil)).Elem() -var ipNetType = reflect.TypeOf((*net.IPNet)(nil)).Elem() -var scannerType = reflect.TypeOf((*sql.Scanner)(nil)).Elem() -var nullBoolType = reflect.TypeOf((*sql.NullBool)(nil)).Elem() -var nullFloatType = reflect.TypeOf((*sql.NullFloat64)(nil)).Elem() -var nullIntType = reflect.TypeOf((*sql.NullInt64)(nil)).Elem() -var nullStringType = reflect.TypeOf((*sql.NullString)(nil)).Elem() - -// Table represents a SQL table created from Go struct. -type Table struct { - Type reflect.Type - zeroStruct reflect.Value - - TypeName string - Name types.Q - NameForSelects types.Q - Alias types.Q - ModelName string - - allFields []*Field // read only - skippedFields []*Field - - Fields []*Field // PKs + DataFields - PKs []*Field - DataFields []*Field - FieldsMap map[string]*Field - - Methods map[string]*Method - Relations map[string]*Relation - Unique map[string][]*Field - - flags uint16 -} - -func (t *Table) setName(name types.Q) { - t.Name = name - t.NameForSelects = name -} - -func newTable(typ reflect.Type) *Table { - t := new(Table) - t.Type = typ - t.zeroStruct = reflect.New(t.Type).Elem() - t.TypeName = internal.ToExported(t.Type.Name()) - t.ModelName = internal.Underscore(t.Type.Name()) - tableName := quoteTableName(tableNameInflector(t.ModelName)) - t.setName(types.Q(types.AppendField(nil, tableName, 1))) - t.Alias = types.Q(types.AppendField(nil, t.ModelName, 1)) - - typ = reflect.PtrTo(t.Type) - if typ.Implements(afterQueryHookType) { - t.SetFlag(AfterQueryHookFlag) - } - if typ.Implements(beforeSelectQueryHookType) { - t.SetFlag(BeforeSelectQueryHookFlag) - } - if typ.Implements(afterSelectHookType) { - t.SetFlag(AfterSelectHookFlag) - } - if typ.Implements(beforeInsertHookType) { - t.SetFlag(BeforeInsertHookFlag) - } - if typ.Implements(afterInsertHookType) { - t.SetFlag(AfterInsertHookFlag) - } - if typ.Implements(beforeUpdateHookType) { - t.SetFlag(BeforeUpdateHookFlag) - } - if typ.Implements(afterUpdateHookType) { - t.SetFlag(AfterUpdateHookFlag) - } - if typ.Implements(beforeDeleteHookType) { - t.SetFlag(BeforeDeleteHookFlag) - } - if typ.Implements(afterDeleteHookType) { - t.SetFlag(AfterDeleteHookFlag) - } - - t.initFields() - t.initMethods() - - return t -} - -func (t *Table) String() string { - return "model=" + t.TypeName -} - -func (t *Table) SetFlag(flag uint16) { - t.flags |= flag -} - -func (t *Table) HasFlag(flag uint16) bool { - if t == nil { - return false - } - return t.flags&flag != 0 -} - -func (t *Table) HasField(field string) bool { - _, err := t.GetField(field) - return err == nil -} - -func (t *Table) checkPKs() error { - if len(t.PKs) == 0 { - return fmt.Errorf("pg: %s does not have primary keys", t) - } - return nil -} - -func (t *Table) mustSoftDelete() error { - if !t.HasFlag(softDelete) { - return fmt.Errorf("pg: %s does not support soft deletes", t) - } - return nil -} - -func (t *Table) AddField(field *Field) { - t.allFields = append(t.allFields, field) - t.Fields = append(t.Fields, field) - if field.HasFlag(PrimaryKeyFlag) { - t.PKs = append(t.PKs, field) - } else { - t.DataFields = append(t.DataFields, field) - } - t.FieldsMap[field.SQLName] = field -} - -func (t *Table) RemoveField(field *Field) { - t.Fields = removeField(t.Fields, field) - if field.HasFlag(PrimaryKeyFlag) { - t.PKs = removeField(t.PKs, field) - } else { - t.DataFields = removeField(t.DataFields, field) - } - delete(t.FieldsMap, field.SQLName) -} - -func removeField(fields []*Field, field *Field) []*Field { - for i, f := range fields { - if f == field { - fields = append(fields[:i], fields[i+1:]...) - } - } - return fields -} - -func (t *Table) GetField(fieldName string) (*Field, error) { - field, ok := t.FieldsMap[fieldName] - if !ok { - return nil, fmt.Errorf("can't find column=%s in %s", fieldName, t) - } - return field, nil -} - -func (t *Table) AppendParam(b []byte, strct reflect.Value, name string) ([]byte, bool) { - field, ok := t.FieldsMap[name] - if ok { - b = field.AppendValue(b, strct, 1) - return b, true - } - - method, ok := t.Methods[name] - if ok { - b = method.AppendValue(b, strct.Addr(), 1) - return b, true - } - - return b, false -} - -func (t *Table) initFields() { - t.Fields = make([]*Field, 0, t.Type.NumField()) - t.FieldsMap = make(map[string]*Field, t.Type.NumField()) - t.addFields(t.Type, nil) -} - -func (t *Table) addFields(typ reflect.Type, baseIndex []int) { - for i := 0; i < typ.NumField(); i++ { - f := typ.Field(i) - - // Make a copy so slice is not shared between fields. - index := make([]int, len(baseIndex)) - copy(index, baseIndex) - - if f.Anonymous { - sqlTag := f.Tag.Get("sql") - if sqlTag == "-" { - continue - } - - fieldType := indirectType(f.Type) - t.addFields(fieldType, append(index, f.Index...)) - - pgTag := parseTag(f.Tag.Get("pg")) - _, inherit := pgTag.Options["inherit"] - _, override := pgTag.Options["override"] - if inherit || override { - embeddedTable := newTable(fieldType) - t.TypeName = embeddedTable.TypeName - t.Name = embeddedTable.Name - t.NameForSelects = embeddedTable.NameForSelects - t.Alias = embeddedTable.Alias - t.ModelName = embeddedTable.ModelName - } - - continue - } - - field := t.newField(f, index) - if field != nil { - t.AddField(field) - } - } -} - -func (t *Table) newField(f reflect.StructField, index []int) *Field { - sqlTag := parseTag(f.Tag.Get("sql")) - - switch f.Name { - case "tableName", "TableName": - if len(index) > 0 { - return nil - } - - if sqlTag.Name != "" { - s, _ := unquoteTagValue(sqlTag.Name) - t.setName(types.Q(quoteTableName(s))) - } - - if v, ok := sqlTag.Options["select"]; ok { - v, _ = unquoteTagValue(v) - t.NameForSelects = types.Q(quoteTableName(v)) - } - - if v, ok := sqlTag.Options["alias"]; ok { - v, _ = unquoteTagValue(v) - t.Alias = types.Q(quoteTableName(v)) - } - - pgTag := parseTag(f.Tag.Get("pg")) - if _, ok := pgTag.Options["discard_unknown_columns"]; ok { - t.SetFlag(discardUnknownColumns) - } - - return nil - } - - if f.PkgPath != "" { - return nil - } - - skip := sqlTag.Name == "-" - if skip || sqlTag.Name == "" { - sqlTag.Name = internal.Underscore(f.Name) - } - - index = append(index, f.Index...) - if field, ok := t.FieldsMap[sqlTag.Name]; ok { - if indexEqual(field.Index, index) { - return field - } - t.RemoveField(field) - } - - field := &Field{ - Field: f, - Type: indirectType(f.Type), - - GoName: f.Name, - SQLName: sqlTag.Name, - Column: types.Q(types.AppendField(nil, sqlTag.Name, 1)), - - Index: index, - } - - if _, ok := sqlTag.Options["notnull"]; ok { - field.SetFlag(NotNullFlag) - } - if v, ok := sqlTag.Options["unique"]; ok { - if v == "" { - field.SetFlag(UniqueFlag) - } else { - if t.Unique == nil { - t.Unique = make(map[string][]*Field) - } - t.Unique[v] = append(t.Unique[v], field) - } - } - if v, ok := sqlTag.Options["default"]; ok { - v, ok = unquoteTagValue(v) - if ok { - field.Default = types.Q(types.AppendString(nil, v, 1)) - } else { - field.Default = types.Q(v) - } - } - - if _, ok := sqlTag.Options["pk"]; ok { - field.SetFlag(PrimaryKeyFlag) - } else if strings.HasSuffix(field.SQLName, "_id") || - strings.HasSuffix(field.SQLName, "_uuid") { - field.SetFlag(ForeignKeyFlag) - } else if strings.HasPrefix(field.SQLName, "fk_") { - field.SetFlag(ForeignKeyFlag) - } else if len(t.PKs) == 0 { - if field.SQLName == "id" || - field.SQLName == "uuid" || - field.SQLName == "pk_"+t.ModelName { - field.SetFlag(PrimaryKeyFlag) - } - } - - pgTag := parseTag(f.Tag.Get("pg")) - - if _, ok := sqlTag.Options["array"]; ok { - field.SetFlag(ArrayFlag) - } else if _, ok := pgTag.Options["array"]; ok { - field.SetFlag(ArrayFlag) - } - - field.SQLType = fieldSQLType(field, pgTag, sqlTag) - if strings.HasSuffix(field.SQLType, "[]") { - field.SetFlag(ArrayFlag) - } - - if v, ok := sqlTag.Options["on_delete"]; ok { - field.OnDelete = v - } - - if v, ok := sqlTag.Options["composite"]; ok { - field.SQLType = v - field.append = compositeAppender(f.Type) - field.scan = compositeScanner(f.Type) - } else if _, ok := pgTag.Options["json_use_number"]; ok { - field.append = types.Appender(f.Type) - field.scan = scanJSONValue - } else if field.HasFlag(ArrayFlag) { - field.append = types.ArrayAppender(f.Type) - field.scan = types.ArrayScanner(f.Type) - } else if _, ok := sqlTag.Options["hstore"]; ok { - field.append = types.HstoreAppender(f.Type) - field.scan = types.HstoreScanner(f.Type) - } else if _, ok := pgTag.Options["hstore"]; ok { - field.append = types.HstoreAppender(f.Type) - field.scan = types.HstoreScanner(f.Type) - } else { - field.append = types.Appender(f.Type) - field.scan = types.Scanner(f.Type) - } - field.isZero = isZeroFunc(f.Type) - - if skip { - t.skippedFields = append(t.skippedFields, field) - t.FieldsMap[field.SQLName] = field - return nil - } - - switch field.SQLName { - case "deleted_at": - if _, ok := pgTag.Options["soft_delete"]; ok && field.Type == timeType { - t.SetFlag(softDelete) - } - } - - return field -} - -func (t *Table) initMethods() { - t.Methods = make(map[string]*Method) - typ := reflect.PtrTo(t.Type) - for i := 0; i < typ.NumMethod(); i++ { - m := typ.Method(i) - if m.PkgPath != "" { - continue - } - if m.Type.NumIn() > 1 { - continue - } - if m.Type.NumOut() != 1 { - continue - } - - retType := m.Type.Out(0) - t.Methods[m.Name] = &Method{ - Index: m.Index, - - appender: types.Appender(retType), - } - } -} - -func (t *Table) init() { - t.initRelations() - t.initInlines() -} - -func (t *Table) initRelations() { - for i := 0; i < len(t.Fields); { - f := t.Fields[i] - if t.tryRelation(f) { - t.Fields = removeField(t.Fields, f) - t.DataFields = removeField(t.DataFields, f) - } else { - i++ - } - } -} - -func (t *Table) initInlines() { - for _, f := range t.skippedFields { - if f.Type.Kind() == reflect.Struct { - joinTable := _tables.get(f.Type, true) - t.inlineFields(f, joinTable.allFields) - } - } -} - -func (t *Table) tryRelation(field *Field) bool { - if isColumn(field.Type) { - return false - } - - switch field.Type.Kind() { - case reflect.Slice: - return t.tryRelationSlice(field) - case reflect.Struct: - return t.tryRelationStruct(field) - } - return false -} - -func (t *Table) tryRelationSlice(field *Field) bool { - elemType := indirectType(field.Type.Elem()) - if elemType.Kind() != reflect.Struct { - return false - } - - pgTag := parseTag(field.Field.Tag.Get("pg")) - joinTable := _tables.get(elemType, true) - - fk, fkOK := pgTag.Options["fk"] - if fkOK { - if fk == "-" { - return false - } - fk = tryUnderscorePrefix(fk) - } - - if m2mTableName, _ := pgTag.Options["many2many"]; m2mTableName != "" { - m2mTable := _tables.getByName(m2mTableName) - - var m2mTableAlias types.Q - if m2mTable != nil { - m2mTableAlias = m2mTable.Alias - } else if ind := strings.IndexByte(m2mTableName, '.'); ind >= 0 { - m2mTableAlias = types.Q(m2mTableName[ind+1:]) - } else { - m2mTableAlias = types.Q(m2mTableName) - } - - var fks []string - if !fkOK { - fk = t.ModelName + "_" - } - if m2mTable != nil { - keys := foreignKeys(t, m2mTable, fk, fkOK) - if len(keys) == 0 { - return false - } - for _, fk := range keys { - fks = append(fks, fk.SQLName) - } - } else { - if fkOK && len(t.PKs) == 1 { - fks = append(fks, fk) - } else { - for _, pk := range t.PKs { - fks = append(fks, fk+pk.SQLName) - } - } - } - - joinFK, joinFKOK := pgTag.Options["joinFK"] - if joinFKOK { - joinFK = tryUnderscorePrefix(joinFK) - } else { - joinFK = joinTable.ModelName + "_" - } - var joinFKs []string - if m2mTable != nil { - keys := foreignKeys(joinTable, m2mTable, joinFK, joinFKOK) - if len(keys) == 0 { - return false - } - for _, fk := range keys { - joinFKs = append(joinFKs, fk.SQLName) - } - } else { - if joinFKOK && len(joinTable.PKs) == 1 { - joinFKs = append(joinFKs, joinFK) - } else { - for _, pk := range joinTable.PKs { - joinFKs = append(joinFKs, joinFK+pk.SQLName) - } - } - } - - t.addRelation(&Relation{ - Type: Many2ManyRelation, - Field: field, - JoinTable: joinTable, - M2MTableName: types.Q(m2mTableName), - M2MTableAlias: m2mTableAlias, - BaseFKs: fks, - JoinFKs: joinFKs, - }) - return true - } - - s, polymorphic := pgTag.Options["polymorphic"] - var typeField *Field - if polymorphic { - fk = tryUnderscorePrefix(s) - - typeField = joinTable.getField(fk + "type") - if typeField == nil { - return false - } - } else if !fkOK { - fk = t.ModelName + "_" - } - - fks := foreignKeys(t, joinTable, fk, fkOK || polymorphic) - if len(fks) == 0 { - return false - } - - var fkValues []*Field - fkValue, ok := pgTag.Options["fk_value"] - if ok { - if len(fks) > 1 { - panic(fmt.Errorf("got fk_value, but there are %d fks", len(fks))) - } - - f := t.getField(fkValue) - if f == nil { - panic(fmt.Errorf("fk_value=%q not found in %s", fkValue, t)) - } - fkValues = append(fkValues, f) - } else { - fkValues = t.PKs - } - - if len(fks) > 0 { - t.addRelation(&Relation{ - Type: HasManyRelation, - Field: field, - JoinTable: joinTable, - FKs: fks, - Polymorphic: typeField, - FKValues: fkValues, - }) - return true - } - - return false -} - -func (t *Table) tryRelationStruct(field *Field) bool { - pgTag := parseTag(field.Field.Tag.Get("pg")) - joinTable := _tables.get(field.Type, true) - if len(joinTable.allFields) == 0 { - return false - } - - res := t.tryHasOne(joinTable, field, pgTag) || - t.tryBelongsToOne(joinTable, field, pgTag) - t.inlineFields(field, joinTable.allFields) - return res -} - -func (t *Table) inlineFields(strct *Field, structFields []*Field) { - for _, f := range structFields { - f = f.Copy() - f.GoName = strct.GoName + "_" + f.GoName - f.SQLName = strct.SQLName + "__" + f.SQLName - f.Column = types.Q(types.AppendField(nil, f.SQLName, 1)) - f.Index = appendNew(strct.Index, f.Index...) - if _, ok := t.FieldsMap[f.SQLName]; !ok { - t.FieldsMap[f.SQLName] = f - } - } -} - -func appendNew(dst []int, src ...int) []int { - cp := make([]int, len(dst)+len(src)) - copy(cp, dst) - copy(cp[len(dst):], src) - return cp -} - -func isPostgresKeyword(s string) bool { - switch strings.ToLower(s) { - case "user", "group", "constraint", "limit", - "member", "placing", "references", "table": - return true - default: - return false - } -} - -func isColumn(typ reflect.Type) bool { - return typ.Implements(scannerType) || reflect.PtrTo(typ).Implements(scannerType) -} - -func fieldSQLType(field *Field, pgTag, sqlTag *tag) string { - if typ, ok := sqlTag.Options["type"]; ok { - field.SetFlag(customTypeFlag) - typ, _ = unquoteTagValue(typ) - return typ - } - - if _, ok := sqlTag.Options["hstore"]; ok { - field.SetFlag(customTypeFlag) - return "hstore" - } else if _, ok := pgTag.Options["hstore"]; ok { - field.SetFlag(customTypeFlag) - return "hstore" - } - - if field.HasFlag(ArrayFlag) { - sqlType := sqlType(field.Type.Elem()) - return sqlType + "[]" - } - - sqlType := sqlType(field.Type) - if field.HasFlag(PrimaryKeyFlag) { - return pkSQLType(sqlType) - } - - switch sqlType { - case "timestamptz": - field.SetFlag(customTypeFlag) - } - - return sqlType -} - -func sqlType(typ reflect.Type) string { - switch typ { - case timeType: - return "timestamptz" - case ipType: - return "inet" - case ipNetType: - return "cidr" - case nullBoolType: - return "boolean" - case nullFloatType: - return "double precision" - case nullIntType: - return "bigint" - case nullStringType: - return "text" - } - - switch typ.Kind() { - case reflect.Int8, reflect.Uint8, reflect.Int16: - return "smallint" - case reflect.Uint16, reflect.Int32: - return "integer" - case reflect.Uint32, reflect.Int64, reflect.Int: - return "bigint" - case reflect.Uint, reflect.Uint64: - // The lesser of two evils. - return "bigint" - case reflect.Float32: - return "real" - case reflect.Float64: - return "double precision" - case reflect.Bool: - return "boolean" - case reflect.String: - return "text" - case reflect.Map, reflect.Struct: - return "jsonb" - case reflect.Array, reflect.Slice: - if typ.Elem().Kind() == reflect.Uint8 { - return "bytea" - } - return "jsonb" - default: - return typ.Kind().String() - } -} - -func pkSQLType(s string) string { - switch s { - case "smallint": - return "smallserial" - case "integer": - return "serial" - case "bigint": - return "bigserial" - } - return s -} - -func sqlTypeEqual(a, b string) bool { - if a == b { - return true - } - return pkSQLType(a) == pkSQLType(b) -} - -func (t *Table) tryHasOne(joinTable *Table, field *Field, tag *tag) bool { - fk, fkOK := tag.Options["fk"] - if fkOK { - if fk == "-" { - return false - } - fk = tryUnderscorePrefix(fk) - } else { - fk = internal.Underscore(field.GoName) + "_" - } - - fks := foreignKeys(joinTable, t, fk, fkOK) - if len(fks) > 0 { - t.addRelation(&Relation{ - Type: HasOneRelation, - Field: field, - FKs: fks, - JoinTable: joinTable, - }) - return true - } - return false -} - -func (t *Table) tryBelongsToOne(joinTable *Table, field *Field, tag *tag) bool { - fk, fkOK := tag.Options["fk"] - if fkOK { - if fk == "-" { - return false - } - fk = tryUnderscorePrefix(fk) - } else { - fk = internal.Underscore(t.TypeName) + "_" - } - - fks := foreignKeys(t, joinTable, fk, fkOK) - if len(fks) > 0 { - t.addRelation(&Relation{ - Type: BelongsToRelation, - Field: field, - FKs: fks, - JoinTable: joinTable, - }) - return true - } - return false -} - -func (t *Table) addRelation(rel *Relation) { - if t.Relations == nil { - t.Relations = make(map[string]*Relation) - } - _, ok := t.Relations[rel.Field.GoName] - if ok { - panic(fmt.Errorf("%s already has %s", t, rel)) - } - t.Relations[rel.Field.GoName] = rel -} - -func foreignKeys(base, join *Table, fk string, tryFK bool) []*Field { - var fks []*Field - - for _, pk := range base.PKs { - fkName := fk + pk.SQLName - f := join.getField(fkName) - if f != nil && sqlTypeEqual(pk.SQLType, f.SQLType) { - fks = append(fks, f) - } - } - if len(fks) > 0 { - return fks - } - - for _, pk := range base.PKs { - if !strings.HasPrefix(pk.SQLName, "pk_") { - continue - } - fkName := "fk_" + pk.SQLName[3:] - f := join.getField(fkName) - if f != nil && sqlTypeEqual(pk.SQLType, f.SQLType) { - fks = append(fks, f) - } - } - if len(fks) > 0 { - return fks - } - - if fk == "" || len(base.PKs) != 1 { - return nil - } - - if tryFK { - f := join.getField(fk) - if f != nil && sqlTypeEqual(base.PKs[0].SQLType, f.SQLType) { - fks = append(fks, f) - return fks - } - } - - for _, suffix := range []string{"id", "uuid"} { - f := join.getField(fk + suffix) - if f != nil && sqlTypeEqual(base.PKs[0].SQLType, f.SQLType) { - fks = append(fks, f) - return fks - } - } - - return nil -} - -func (t *Table) getField(name string) *Field { - return t.FieldsMap[name] -} - -func scanJSONValue(v reflect.Value, b []byte) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(non-pointer %s)", v.Type()) - } - if b == nil { - v.Set(reflect.New(v.Type()).Elem()) - return nil - } - dec := json.NewDecoder(bytes.NewReader(b)) - dec.UseNumber() - return dec.Decode(v.Addr().Interface()) -} - -func tryUnderscorePrefix(s string) string { - if s == "" { - return s - } - if c := s[0]; internal.IsUpper(c) { - return internal.Underscore(s) + "_" - } - return s -} - -func quoteTableName(s string) string { - if isPostgresKeyword(s) { - return `"` + s + `"` - } - return s -} diff --git a/vendor/github.com/go-pg/pg/orm/table_create.go b/vendor/github.com/go-pg/pg/orm/table_create.go deleted file mode 100644 index c3bdda3..0000000 --- a/vendor/github.com/go-pg/pg/orm/table_create.go +++ /dev/null @@ -1,155 +0,0 @@ -package orm - -import ( - "errors" - "strconv" -) - -type CreateTableOptions struct { - Temp bool - IfNotExists bool - Varchar int // replaces PostgreSQL data type `text` with `varchar(n)` - - // FKConstraints causes CreateTable to create foreign key constraints - // for has one relations. ON DELETE hook can be added using tag - // `sql:"on_delete:RESTRICT"` on foreign key field. - FKConstraints bool -} - -func CreateTable(db DB, model interface{}, opt *CreateTableOptions) error { - q := NewQuery(db, model) - _, err := q.db.Exec(createTableQuery{ - q: q, - opt: opt, - }) - return err -} - -type createTableQuery struct { - q *Query - opt *CreateTableOptions -} - -func (q createTableQuery) Copy() QueryAppender { - return q -} - -func (q createTableQuery) Query() *Query { - return q.q -} - -func (q createTableQuery) AppendQuery(b []byte) ([]byte, error) { - if q.q.stickyErr != nil { - return nil, q.q.stickyErr - } - if q.q.model == nil { - return nil, errors.New("pg: Model(nil)") - } - - table := q.q.model.Table() - - b = append(b, "CREATE "...) - if q.opt != nil && q.opt.Temp { - b = append(b, "TEMP "...) - } - b = append(b, "TABLE "...) - if q.opt != nil && q.opt.IfNotExists { - b = append(b, "IF NOT EXISTS "...) - } - b = q.q.appendTableName(b) - b = append(b, " ("...) - - for i, field := range table.Fields { - if i > 0 { - b = append(b, ", "...) - } - - b = append(b, field.Column...) - b = append(b, " "...) - if q.opt != nil && q.opt.Varchar > 0 && - field.SQLType == "text" && !field.HasFlag(customTypeFlag) { - b = append(b, "varchar("...) - b = strconv.AppendInt(b, int64(q.opt.Varchar), 10) - b = append(b, ")"...) - } else { - b = append(b, field.SQLType...) - } - if field.HasFlag(NotNullFlag) { - b = append(b, " NOT NULL"...) - } - if field.HasFlag(UniqueFlag) { - b = append(b, " UNIQUE"...) - } - if field.Default != "" { - b = append(b, " DEFAULT "...) - b = append(b, field.Default...) - } - } - - b = appendPKConstraint(b, table.PKs) - for _, fields := range table.Unique { - b = appendUnique(b, fields) - } - - if q.opt != nil && q.opt.FKConstraints { - for _, rel := range table.Relations { - b = q.appendFKConstraint(b, table, rel) - } - } - - b = append(b, ")"...) - - return b, nil -} - -func appendPKConstraint(b []byte, pks []*Field) []byte { - if len(pks) == 0 { - return b - } - - b = append(b, ", PRIMARY KEY ("...) - b = appendColumns(b, "", pks) - b = append(b, ")"...) - return b -} - -func appendUnique(b []byte, fields []*Field) []byte { - b = append(b, ", UNIQUE ("...) - b = appendColumns(b, "", fields) - b = append(b, ")"...) - return b -} - -func (q createTableQuery) appendFKConstraint(b []byte, table *Table, rel *Relation) []byte { - if rel.Type != HasOneRelation { - return b - } - - b = append(b, ", FOREIGN KEY ("...) - b = appendColumns(b, "", rel.FKs) - b = append(b, ")"...) - - b = append(b, " REFERENCES "...) - b = q.q.FormatQuery(b, string(rel.JoinTable.Name)) - b = append(b, " ("...) - b = appendColumns(b, "", rel.JoinTable.PKs) - b = append(b, ")"...) - - if s := onDelete(rel.FKs); s != "" { - b = append(b, " ON DELETE "...) - b = append(b, s...) - } - - return b -} - -func onDelete(fks []*Field) string { - var onDelete string - for _, f := range fks { - if f.OnDelete != "" { - onDelete = f.OnDelete - break - } - } - return onDelete -} diff --git a/vendor/github.com/go-pg/pg/orm/table_drop.go b/vendor/github.com/go-pg/pg/orm/table_drop.go deleted file mode 100644 index de509fd..0000000 --- a/vendor/github.com/go-pg/pg/orm/table_drop.go +++ /dev/null @@ -1,50 +0,0 @@ -package orm - -import "errors" - -type DropTableOptions struct { - IfExists bool - Cascade bool -} - -func DropTable(db DB, model interface{}, opt *DropTableOptions) error { - q := NewQuery(db, model) - _, err := q.db.Exec(dropTableQuery{ - q: q, - opt: opt, - }) - return err -} - -type dropTableQuery struct { - q *Query - opt *DropTableOptions -} - -func (q dropTableQuery) Copy() QueryAppender { - return q -} - -func (q dropTableQuery) Query() *Query { - return q.q -} - -func (q dropTableQuery) AppendQuery(b []byte) ([]byte, error) { - if q.q.stickyErr != nil { - return nil, q.q.stickyErr - } - if q.q.model == nil { - return nil, errors.New("pg: Model(nil)") - } - - b = append(b, "DROP TABLE "...) - if q.opt != nil && q.opt.IfExists { - b = append(b, "IF EXISTS "...) - } - b = q.q.appendTableName(b) - if q.opt != nil && q.opt.Cascade { - b = append(b, " CASCADE"...) - } - - return b, nil -} diff --git a/vendor/github.com/go-pg/pg/orm/table_params.go b/vendor/github.com/go-pg/pg/orm/table_params.go deleted file mode 100644 index db3437a..0000000 --- a/vendor/github.com/go-pg/pg/orm/table_params.go +++ /dev/null @@ -1,29 +0,0 @@ -package orm - -import "reflect" - -type tableParams struct { - table *Table - strct reflect.Value -} - -func newTableParams(strct interface{}) (*tableParams, bool) { - v := reflect.ValueOf(strct) - if !v.IsValid() { - return nil, false - } - - v = reflect.Indirect(v) - if v.Kind() != reflect.Struct { - return nil, false - } - - return &tableParams{ - table: GetTable(v.Type()), - strct: v, - }, true -} - -func (m tableParams) AppendParam(b []byte, f QueryFormatter, name string) ([]byte, bool) { - return m.table.AppendParam(b, m.strct, name) -} diff --git a/vendor/github.com/go-pg/pg/orm/tables.go b/vendor/github.com/go-pg/pg/orm/tables.go deleted file mode 100644 index 0a01b03..0000000 --- a/vendor/github.com/go-pg/pg/orm/tables.go +++ /dev/null @@ -1,112 +0,0 @@ -package orm - -import ( - "fmt" - "reflect" - "sync" -) - -var _tables = newTables() - -type tableInProgress struct { - table *Table - wg sync.WaitGroup -} - -// GetTable returns a Table for a struct type. -func GetTable(typ reflect.Type) *Table { - return _tables.Get(typ) -} - -// RegisterTable registers a struct as SQL table. -// It is usually used to register intermediate table -// in many to many relationship. -func RegisterTable(strct interface{}) { - _tables.Register(strct) -} - -type tables struct { - mu sync.RWMutex - inProgress map[reflect.Type]*tableInProgress - tables map[reflect.Type]*Table -} - -func newTables() *tables { - return &tables{ - inProgress: make(map[reflect.Type]*tableInProgress), - tables: make(map[reflect.Type]*Table), - } -} - -func (t *tables) Register(strct interface{}) { - typ := reflect.TypeOf(strct) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - } - _ = t.Get(typ) -} - -func (t *tables) get(typ reflect.Type, allowInProgress bool) *Table { - if typ.Kind() != reflect.Struct { - panic(fmt.Errorf("got %s, wanted %s", typ.Kind(), reflect.Struct)) - } - - t.mu.RLock() - table, ok := t.tables[typ] - t.mu.RUnlock() - if ok { - return table - } - - t.mu.Lock() - - table, ok = t.tables[typ] - if ok { - t.mu.Unlock() - return table - } - - inProgress := t.inProgress[typ] - if inProgress != nil { - t.mu.Unlock() - if !allowInProgress { - inProgress.wg.Wait() - } - return inProgress.table - } - - table = newTable(typ) - inProgress = &tableInProgress{ - table: table, - } - inProgress.wg.Add(1) - t.inProgress[typ] = inProgress - - t.mu.Unlock() - table.init() - inProgress.wg.Done() - t.mu.Lock() - - delete(t.inProgress, typ) - t.tables[typ] = table - - t.mu.Unlock() - return table -} - -func (t *tables) Get(typ reflect.Type) *Table { - return t.get(typ, false) -} - -func (t *tables) getByName(name string) *Table { - t.mu.RLock() - defer t.mu.RUnlock() - - for _, t := range t.tables { - if string(t.Name) == name || t.ModelName == name { - return t - } - } - - return nil -} diff --git a/vendor/github.com/go-pg/pg/orm/tag.go b/vendor/github.com/go-pg/pg/orm/tag.go deleted file mode 100644 index 8d630d6..0000000 --- a/vendor/github.com/go-pg/pg/orm/tag.go +++ /dev/null @@ -1,145 +0,0 @@ -package orm - -import ( - "github.com/go-pg/pg/internal/parser" -) - -type tag struct { - Name string - Options map[string]string -} - -func parseTag(s string) *tag { - p := &tagParser{ - Parser: parser.NewString(s), - } - p.parseKey() - return &p.tag -} - -type tagParser struct { - *parser.Parser - - tag tag - hasName bool - key string -} - -func (p *tagParser) setTagOption(key, value string) { - if !p.hasName { - p.hasName = true - if key == "" { - p.tag.Name = value - return - } - } - if p.tag.Options == nil { - p.tag.Options = make(map[string]string) - } - if key == "" { - p.tag.Options[value] = "" - } else { - p.tag.Options[key] = value - } -} - -func (p *tagParser) parseKey() { - p.key = "" - - var b []byte - for p.Valid() { - c := p.Read() - switch c { - case ',': - p.Skip(' ') - p.setTagOption("", string(b)) - p.parseKey() - return - case ':': - p.key = string(b) - p.parseValue() - return - case '\'': - p.parseQuotedValue() - return - default: - b = append(b, c) - } - } - - if len(b) > 0 { - p.setTagOption("", string(b)) - } -} - -func (p *tagParser) parseValue() { - const quote = '\'' - - c := p.Peek() - if c == quote { - p.Skip(quote) - p.parseQuotedValue() - return - } - - var b []byte - for p.Valid() { - c = p.Read() - switch c { - case '\\': - c = p.Read() - b = append(b, c) - case ',': - p.Skip(' ') - p.setTagOption(p.key, string(b)) - p.parseKey() - return - default: - b = append(b, c) - } - } - p.setTagOption(p.key, string(b)) -} - -func (p *tagParser) parseQuotedValue() { - const quote = '\'' - - var b []byte - b = append(b, quote) - - for p.Valid() { - bb, ok := p.ReadSep(quote) - if !ok { - b = append(b, bb...) - break - } - - if len(bb) > 0 && bb[len(bb)-1] == '\\' { - b = append(b, bb[:len(bb)-1]...) - b = append(b, quote) - continue - } - - b = append(b, bb...) - b = append(b, quote) - break - } - - p.setTagOption(p.key, string(b)) - if p.Skip(',') { - p.Skip(' ') - } - p.parseKey() -} - -func unquoteTagValue(s string) (string, bool) { - const quote = '\'' - - if len(s) < 2 { - return s, false - } - if s[0] == quote && s[len(s)-1] == quote { - return s[1 : len(s)-1], true - } - return s, false -} diff --git a/vendor/github.com/go-pg/pg/orm/update.go b/vendor/github.com/go-pg/pg/orm/update.go deleted file mode 100644 index 492262f..0000000 --- a/vendor/github.com/go-pg/pg/orm/update.go +++ /dev/null @@ -1,257 +0,0 @@ -package orm - -import ( - "errors" - "fmt" - "reflect" - - "github.com/go-pg/pg/internal" - "github.com/go-pg/pg/types" -) - -func Update(db DB, model interface{}) error { - res, err := NewQuery(db, model).WherePK().Update() - if err != nil { - return err - } - return internal.AssertOneRow(res.RowsAffected()) -} - -type updateQuery struct { - q *Query - omitZero bool -} - -var _ QueryAppender = (*updateQuery)(nil) - -func (q updateQuery) Copy() QueryAppender { - return updateQuery{ - q: q.q.Copy(), - } -} - -func (q updateQuery) Query() *Query { - return q.q -} - -func (q updateQuery) AppendQuery(b []byte) ([]byte, error) { - if q.q.stickyErr != nil { - return nil, q.q.stickyErr - } - - var err error - - if len(q.q.with) > 0 { - b, err = q.q.appendWith(b) - if err != nil { - return nil, err - } - } - - b = append(b, "UPDATE "...) - b = q.q.appendFirstTableWithAlias(b) - - b, err = q.mustAppendSet(b) - if err != nil { - return nil, err - } - - isSliceModel := q.q.isSliceModel() - if q.q.hasMultiTables() || isSliceModel { - b = append(b, " FROM "...) - b = q.q.appendOtherTables(b) - - if isSliceModel { - b, err = q.appendSliceModelData(b) - if err != nil { - return nil, err - } - } - } - - b = append(b, " WHERE "...) - if isSliceModel { - table := q.q.model.Table() - b = appendWhereColumnAndColumn(b, table.Alias, table.PKs) - - if q.q.hasWhere() { - b = append(b, " AND "...) - b = q.q.appendWhere(b) - } - } else { - b, err = q.q.mustAppendWhere(b) - if err != nil { - return nil, err - } - } - - if len(q.q.returning) > 0 { - b = q.q.appendReturning(b) - } - - return b, nil -} - -func (q updateQuery) mustAppendSet(b []byte) ([]byte, error) { - if len(q.q.set) > 0 { - b = q.q.appendSet(b) - return b, nil - } - - if q.q.model == nil { - return nil, errors.New("pg: Model(nil)") - } - - b = append(b, " SET "...) - - value := q.q.model.Value() - var err error - if value.Kind() == reflect.Struct { - b, err = q.appendSetStruct(b, value) - } else { - if value.Len() > 0 { - b, err = q.appendSetSlice(b, value) - } else { - err = fmt.Errorf("pg: can't bulk-update empty slice %s", value.Type()) - } - } - if err != nil { - return nil, err - } - - return b, nil -} - -func (q updateQuery) appendSetStruct(b []byte, strct reflect.Value) ([]byte, error) { - fields, err := q.q.getFields() - if err != nil { - return nil, err - } - - if len(fields) == 0 { - fields = q.q.model.Table().DataFields - } - - pos := len(b) - for _, f := range fields { - omitZero := f.OmitZero() && f.IsZero(strct) - if omitZero && q.omitZero { - continue - } - - if len(b) != pos { - b = append(b, ", "...) - pos = len(b) - } - - b = append(b, f.Column...) - b = append(b, " = "...) - - app, ok := q.q.modelValues[f.SQLName] - if ok { - b = app.AppendFormat(b, q.q) - continue - } - - if f.OmitZero() && f.IsZero(strct) { - b = append(b, "NULL"...) - } else { - b = f.AppendValue(b, strct, 1) - } - } - - return b, nil -} - -func (q updateQuery) appendSetSlice(b []byte, slice reflect.Value) ([]byte, error) { - fields, err := q.q.getFields() - if err != nil { - return nil, err - } - - if len(fields) == 0 { - fields = q.q.model.Table().DataFields - } - - for i, f := range fields { - if i > 0 { - b = append(b, ", "...) - } - - b = append(b, f.Column...) - b = append(b, " = "...) - b = append(b, "_data."...) - b = append(b, f.Column...) - } - - return b, nil -} - -func (q updateQuery) appendSliceModelData(b []byte) ([]byte, error) { - columns, err := q.q.getDataFields() - if err != nil { - return nil, err - } - - if len(columns) > 0 { - columns = append(columns, q.q.model.Table().PKs...) - } else { - columns = q.q.model.Table().Fields - } - - return q.appendSliceValues(b, columns, q.q.model.Value()), nil -} - -func (q updateQuery) appendSliceValues(b []byte, fields []*Field, slice reflect.Value) []byte { - b = append(b, "(VALUES ("...) - for i := 0; i < slice.Len(); i++ { - el := indirect(slice.Index(i)) - b = q.appendValues(b, fields, el) - if i != slice.Len()-1 { - b = append(b, "), ("...) - } - } - b = append(b, ")) AS _data("...) - b = appendColumns(b, "", fields) - b = append(b, ")"...) - return b -} - -func (q updateQuery) appendValues(b []byte, fields []*Field, strct reflect.Value) []byte { - for i, f := range fields { - if i > 0 { - b = append(b, ", "...) - } - - app, ok := q.q.modelValues[f.SQLName] - if ok { - b = app.AppendFormat(b, q.q) - continue - } - - if f.OmitZero() && f.IsZero(strct) { - b = append(b, "NULL"...) - } else { - b = f.AppendValue(b, strct, 1) - } - if f.HasFlag(customTypeFlag) { - b = append(b, "::"...) - b = append(b, f.SQLType...) - } - } - return b -} - -func appendWhereColumnAndColumn(b []byte, alias types.Q, fields []*Field) []byte { - for i, f := range fields { - if i > 0 { - b = append(b, " AND "...) - } - b = append(b, alias...) - b = append(b, '.') - b = append(b, f.Column...) - b = append(b, " = _data."...) - b = append(b, f.Column...) - } - return b -} diff --git a/vendor/github.com/go-pg/pg/orm/url_filter.go b/vendor/github.com/go-pg/pg/orm/url_filter.go deleted file mode 100644 index c3da83b..0000000 --- a/vendor/github.com/go-pg/pg/orm/url_filter.go +++ /dev/null @@ -1,114 +0,0 @@ -package orm - -import ( - "net/url" - "strings" - - "github.com/go-pg/pg/types" -) - -// URLFilter is used with Query.Apply to add WHERE clauses from the URL values: -// - ?foo=bar - Where(`"foo" = 'bar'`) -// - ?foo=hello&foo=world - Where(`"foo" IN ('hello','world')`) -// - ?foo__exclude=bar - Where(`"foo" != 'bar'`) -// - ?foo__ieq=bar - Where(`"foo" ILIKE 'bar'`) -// - ?foo__match=bar - Where(`"foo" SIMILAR TO 'bar'`) -// - ?foo__gt=42 - Where(`"foo" > 42`) -// - ?foo__gte=42 - Where(`"foo" >= 42`) -// - ?foo__lt=42 - Where(`"foo" < 42`) -// - ?foo__lte=42 - Where(`"foo" <= 42`) -type URLFilter struct { - values URLValues - allowed map[string]struct{} -} - -func NewURLFilter(values url.Values) *URLFilter { - return &URLFilter{ - values: URLValues(values), - } -} - -// Values returns URL values. -func (f *URLFilter) Values() URLValues { - return f.values -} - -func (f *URLFilter) Allow(filter string) { - if f.allowed == nil { - f.allowed = make(map[string]struct{}) - } - f.allowed[filter] = struct{}{} -} - -func (f *URLFilter) isAllowed(filter string) bool { - if len(f.allowed) == 0 { - return true - } - _, ok := f.allowed[filter] - return ok -} - -func (f *URLFilter) Filters(q *Query) (*Query, error) { - if f == nil { - return q, nil - } - - for filter, values := range f.values { - if !f.isAllowed(filter) { - continue - } - - var operation string - if i := strings.Index(filter, "__"); i != -1 { - filter, operation = filter[:i], filter[i+2:] - } - - if q.model.Table().HasField(filter) { - q = addOperator(q, filter, operation, values) - } - } - return q, nil -} - -// URLFilters is a shortcut for NewURLFilter(urlValues).Filters. -func URLFilters(urlValues url.Values) func(*Query) (*Query, error) { - return NewURLFilter(urlValues).Filters -} - -func addOperator(q *Query, field, operator string, values []string) *Query { - switch operator { - case "gt": - q = forEachValue(q, field, values, "? > ?") - case "gte": - q = forEachValue(q, field, values, "? >= ?") - case "lt": - q = forEachValue(q, field, values, "? < ?") - case "lte": - q = forEachValue(q, field, values, "? <= ?") - case "ieq": - q = forEachValue(q, field, values, "? ILIKE ?") - case "match": - q = forEachValue(q, field, values, "? SIMILAR TO ?") - case "exclude": - q = forAllValues(q, field, values, "? != ?", "? NOT IN (?)") - case "", "include": - q = forAllValues(q, field, values, "? = ?", "? IN (?)") - } - return q -} - -func forEachValue(q *Query, field string, values []string, queryTemplate string) *Query { - for _, value := range values { - q = q.Where(queryTemplate, types.F(field), value) - } - return q -} - -func forAllValues(q *Query, field string, values []string, queryTemplate, queryArrayTemplate string) *Query { - if len(values) > 1 { - q = q.Where(queryArrayTemplate, types.F(field), types.InSlice(values)) - } else { - q = q.Where(queryTemplate, types.F(field), values[0]) - } - return q -} diff --git a/vendor/github.com/go-pg/pg/orm/url_values.go b/vendor/github.com/go-pg/pg/orm/url_values.go deleted file mode 100644 index 9ba6000..0000000 --- a/vendor/github.com/go-pg/pg/orm/url_values.go +++ /dev/null @@ -1,106 +0,0 @@ -package orm - -import ( - "net/url" - "strconv" - "time" - - "github.com/go-pg/pg/types" -) - -type URLValues map[string][]string - -func (v URLValues) Has(name string) bool { - _, ok := v[name] - return ok -} - -func (v URLValues) SetDefault(name string, values ...string) { - if !v.Has(name) { - v[name] = values - } -} - -func (v URLValues) Strings(name string) []string { - return v[name] -} - -func (v URLValues) String(name string) string { - values := v.Strings(name) - if len(values) == 0 { - return "" - } - return values[0] -} - -func (v URLValues) Bool(name string) (bool, error) { - if !v.Has(name) { - return false, nil - } - s := v.String(name) - if s == "" { - return true, nil - } - return strconv.ParseBool(s) -} - -func (v URLValues) Int(name string) (int, error) { - s := v.String(name) - if s == "" { - return 0, nil - } - return strconv.Atoi(s) -} - -func (v URLValues) MaybeInt(name string) int { - n, _ := v.Int(name) - return n -} - -func (v URLValues) Int64(name string) (int64, error) { - s := v.String(name) - if s == "" { - return 0, nil - } - return strconv.ParseInt(s, 10, 64) -} - -func (v URLValues) MaybeInt64(name string) int64 { - n, _ := v.Int64(name) - return n -} - -func (v URLValues) Time(name string) (time.Time, error) { - s := v.String(name) - if s == "" { - return time.Time{}, nil - } - - n, err := strconv.ParseInt(s, 10, 64) - if err == nil { - return time.Unix(n, 0), nil - } - return types.ParseTimeString(s) -} - -func (v URLValues) MaybeTime(name string) time.Time { - tm, _ := v.Time(name) - return tm -} - -func (v URLValues) Duration(name string) (time.Duration, error) { - s := v.String(name) - if s == "" { - return 0, nil - } - return time.ParseDuration(s) -} - -func (v URLValues) MaybeDuration(name string) time.Duration { - dur, _ := v.Duration(name) - return dur -} - -func (v URLValues) Pager() *Pager { - return NewPager(url.Values(v)) -} diff --git a/vendor/github.com/go-pg/pg/orm/util.go b/vendor/github.com/go-pg/pg/orm/util.go deleted file mode 100644 index 73b1a7f..0000000 --- a/vendor/github.com/go-pg/pg/orm/util.go +++ /dev/null @@ -1,127 +0,0 @@ -package orm - -import ( - "reflect" - - "github.com/go-pg/pg/types" -) - -func indirect(v reflect.Value) reflect.Value { - switch v.Kind() { - case reflect.Interface: - return indirect(v.Elem()) - case reflect.Ptr: - return v.Elem() - default: - return v - } -} - -func indirectType(t reflect.Type) reflect.Type { - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t -} - -func sliceElemType(v reflect.Value) reflect.Type { - elemType := v.Type().Elem() - if elemType.Kind() == reflect.Interface && v.Len() > 0 { - return indirect(v.Index(0).Elem()).Type() - } else { - return indirectType(elemType) - } -} - -func typeByIndex(t reflect.Type, index []int) reflect.Type { - for _, x := range index { - switch t.Kind() { - case reflect.Ptr: - t = t.Elem() - case reflect.Slice: - t = indirectType(t.Elem()) - } - t = t.Field(x).Type - } - return indirectType(t) -} - -func fieldByIndex(v reflect.Value, index []int) reflect.Value { - for i, x := range index { - if i > 0 { - v = indirectNew(v) - } - v = v.Field(x) - } - return v -} - -func indirectNew(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - return v -} - -func walk(v reflect.Value, index []int, fn func(reflect.Value)) { - v = reflect.Indirect(v) - switch v.Kind() { - case reflect.Slice: - for i := 0; i < v.Len(); i++ { - visitField(v.Index(i), index, fn) - } - default: - visitField(v, index, fn) - } -} - -func visitField(v reflect.Value, index []int, fn func(reflect.Value)) { - v = reflect.Indirect(v) - if len(index) > 0 { - v = v.Field(index[0]) - if v.Kind() == reflect.Ptr && v.IsNil() { - return - } - walk(v, index[1:], fn) - } else { - fn(v) - } -} - -func dstValues(model tableModel, fields []*Field) map[string][]reflect.Value { - mp := make(map[string][]reflect.Value) - var id []byte - walk(model.Root(), model.ParentIndex(), func(v reflect.Value) { - id = modelId(id[:0], v, fields) - mp[string(id)] = append(mp[string(id)], v.FieldByIndex(model.Relation().Field.Index)) - }) - return mp -} - -func modelId(b []byte, v reflect.Value, fields []*Field) []byte { - for i, f := range fields { - if i > 0 { - b = append(b, ',') - } - b = f.AppendValue(b, v, 0) - } - return b -} - -func appendColumns(b []byte, table types.Q, fields []*Field) []byte { - for i, f := range fields { - if i > 0 { - b = append(b, ", "...) - } - - if len(table) > 0 { - b = append(b, table...) - b = append(b, '.') - } - b = types.AppendField(b, f.SQLName, 1) - } - return b -} diff --git a/vendor/github.com/go-pg/pg/orm/zero.go b/vendor/github.com/go-pg/pg/orm/zero.go deleted file mode 100644 index 659293f..0000000 --- a/vendor/github.com/go-pg/pg/orm/zero.go +++ /dev/null @@ -1,119 +0,0 @@ -package orm - -import ( - "database/sql/driver" - "reflect" - - "github.com/go-pg/pg/types" -) - -var driverValuerType = reflect.TypeOf((*driver.Valuer)(nil)).Elem() -var appenderType = reflect.TypeOf((*types.ValueAppender)(nil)).Elem() -var isZeroerType = reflect.TypeOf((*isZeroer)(nil)).Elem() - -type isZeroer interface { - IsZero() bool -} - -func isZeroFunc(typ reflect.Type) func(reflect.Value) bool { - if typ.Implements(isZeroerType) { - return isZero - } - - switch typ.Kind() { - case reflect.Array: - if typ.Elem().Kind() == reflect.Uint8 { - return isZeroBytes - } - return isZeroLen - case reflect.Map, reflect.Slice, reflect.String: - return isZeroLen - case reflect.Bool: - return isZeroBool - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return isZeroInt - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return isZeroUint - case reflect.Float32, reflect.Float64: - return isZeroFloat - case reflect.Interface, reflect.Ptr: - return isZeroNil - } - - if typ.Implements(appenderType) { - return isZeroAppenderValue - } - if typ.Implements(driverValuerType) { - return isZeroDriverValue - } - - return isZeroFalse -} - -func isZero(v reflect.Value) bool { - if v.Kind() == reflect.Ptr { - return v.IsNil() - } - return v.Interface().(isZeroer).IsZero() -} - -func isZeroAppenderValue(v reflect.Value) bool { - if v.Kind() == reflect.Ptr { - return v.IsNil() - } - - appender := v.Interface().(types.ValueAppender) - value := appender.AppendValue(nil, 0) - return value == nil -} - -func isZeroDriverValue(v reflect.Value) bool { - if v.Kind() == reflect.Ptr { - return v.IsNil() - } - - valuer := v.Interface().(driver.Valuer) - value, err := valuer.Value() - if err != nil { - return false - } - return value == nil -} - -func isZeroLen(v reflect.Value) bool { - return v.Len() == 0 -} - -func isZeroNil(v reflect.Value) bool { - return v.IsNil() -} - -func isZeroBool(v reflect.Value) bool { - return !v.Bool() -} - -func isZeroInt(v reflect.Value) bool { - return v.Int() == 0 -} - -func isZeroUint(v reflect.Value) bool { - return v.Uint() == 0 -} - -func isZeroFloat(v reflect.Value) bool { - return v.Float() == 0 -} - -func isZeroBytes(v reflect.Value) bool { - b := v.Slice(0, v.Len()).Bytes() - for _, c := range b { - if c != 0 { - return false - } - } - return true -} - -func isZeroFalse(v reflect.Value) bool { - return false -} diff --git a/vendor/github.com/go-pg/pg/pg.go b/vendor/github.com/go-pg/pg/pg.go deleted file mode 100644 index adae486..0000000 --- a/vendor/github.com/go-pg/pg/pg.go +++ /dev/null @@ -1,209 +0,0 @@ -package pg - -import ( - "log" - "os" - "strconv" - - "github.com/go-pg/pg/internal" - "github.com/go-pg/pg/orm" - "github.com/go-pg/pg/types" -) - -// Discard is used with Query and QueryOne to discard rows. -var Discard orm.Discard - -func init() { - SetLogger(log.New(os.Stderr, "pg: ", log.LstdFlags|log.Lshortfile)) -} - -// Model returns new query for the optional model. -func Model(model ...interface{}) *orm.Query { - return orm.NewQuery(nil, model...) -} - -// Scan returns ColumnScanner that copies the columns in the -// row into the values. -func Scan(values ...interface{}) orm.ColumnScanner { - return orm.Scan(values...) -} - -// Q replaces any placeholders found in the query. -func Q(query string, params ...interface{}) types.ValueAppender { - return orm.Q(query, params...) -} - -// F quotes a SQL identifier such as a table or column name replacing any -// placeholders found in the field. -func F(field string) types.ValueAppender { - return types.F(field) -} - -// In accepts a slice and returns a wrapper that can be used with PostgreSQL -// IN operator: -// -// Where("id IN (?)", pg.In([]int{1, 2, 3, 4})) -// -// produces -// -// WHERE id IN (1, 2, 3, 4) -func In(slice interface{}) types.ValueAppender { - return types.InSlice(slice) -} - -// InMulti accepts multiple values and returns a wrapper that can be used -// with PostgreSQL IN operator: -// -// Where("(id1, id2) IN (?)", pg.InMulti([]int{1, 2}, []int{3, 4})) -// -// produces -// -// WHERE (id1, id2) IN ((1, 2), (3, 4)) -func InMulti(values ...interface{}) types.ValueAppender { - return types.In(values...) -} - -// Array accepts a slice and returns a wrapper for working with PostgreSQL -// array data type. -// -// For struct fields you can use array tag: -// -// Emails []string `sql:",array"` -func Array(v interface{}) *types.Array { - return types.NewArray(v) -} - -// Hstore accepts a map and returns a wrapper for working with hstore data type. -// Supported map types are: -// - map[string]string -// -// For struct fields you can use hstore tag: -// -// Attrs map[string]string `sql:",hstore"` -func Hstore(v interface{}) *types.Hstore { - return types.NewHstore(v) -} - -func SetLogger(logger *log.Logger) { - internal.Logger = logger -} - -//------------------------------------------------------------------------------ - -type Strings []string - -var _ orm.HooklessModel = (*Strings)(nil) -var _ types.ValueAppender = (*Strings)(nil) - -func (strings *Strings) Init() error { - if s := *strings; len(s) > 0 { - *strings = s[:0] - } - return nil -} - -func (strings *Strings) NewModel() orm.ColumnScanner { - return strings -} - -func (Strings) AddModel(_ orm.ColumnScanner) error { - return nil -} - -func (strings *Strings) ScanColumn(colIdx int, _ string, b []byte) error { - *strings = append(*strings, string(b)) - return nil -} - -func (strings Strings) AppendValue(dst []byte, quote int) []byte { - if len(strings) <= 0 { - return dst - } - - for _, s := range strings { - dst = types.AppendString(dst, s, 1) - dst = append(dst, ',') - } - dst = dst[:len(dst)-1] - return dst -} - -//------------------------------------------------------------------------------ - -type Ints []int64 - -var _ orm.HooklessModel = (*Ints)(nil) -var _ types.ValueAppender = (*Ints)(nil) - -func (ints *Ints) Init() error { - if s := *ints; len(s) > 0 { - *ints = s[:0] - } - return nil -} - -func (ints *Ints) NewModel() orm.ColumnScanner { - return ints -} - -func (Ints) AddModel(_ orm.ColumnScanner) error { - return nil -} - -func (ints *Ints) ScanColumn(colIdx int, colName string, b []byte) error { - n, err := strconv.ParseInt(internal.BytesToString(b), 10, 64) - if err != nil { - return err - } - *ints = append(*ints, n) - return nil -} - -func (ints Ints) AppendValue(dst []byte, quote int) []byte { - if len(ints) <= 0 { - return dst - } - - for _, v := range ints { - dst = strconv.AppendInt(dst, v, 10) - dst = append(dst, ',') - } - dst = dst[:len(dst)-1] - return dst -} - -//------------------------------------------------------------------------------ - -type IntSet map[int64]struct{} - -var _ orm.HooklessModel = (*IntSet)(nil) - -func (set *IntSet) Init() error { - if len(*set) > 0 { - *set = make(map[int64]struct{}) - } - return nil -} - -func (set *IntSet) NewModel() orm.ColumnScanner { - return set -} - -func (IntSet) AddModel(_ orm.ColumnScanner) error { - return nil -} - -func (setptr *IntSet) ScanColumn(colIdx int, colName string, b []byte) error { - set := *setptr - if set == nil { - *setptr = make(IntSet) - set = *setptr - } - - n, err := strconv.ParseInt(internal.BytesToString(b), 10, 64) - if err != nil { - return err - } - set[n] = struct{}{} - return nil -} diff --git a/vendor/github.com/go-pg/pg/result.go b/vendor/github.com/go-pg/pg/result.go deleted file mode 100644 index 3a61b3b..0000000 --- a/vendor/github.com/go-pg/pg/result.go +++ /dev/null @@ -1,49 +0,0 @@ -package pg - -import ( - "bytes" - "strconv" - - "github.com/go-pg/pg/internal" - "github.com/go-pg/pg/orm" -) - -// A result summarizes an executed SQL command. -type result struct { - model orm.Model - - affected int - returned int -} - -var _ orm.Result = (*result)(nil) - -func (res *result) parse(b []byte) error { - res.affected = -1 - - ind := bytes.LastIndexByte(b, ' ') - if ind == -1 { - return nil - } - - s := internal.BytesToString(b[ind+1 : len(b)-1]) - - affected, err := strconv.Atoi(s) - if err == nil { - res.affected = affected - } - - return nil -} - -func (res *result) Model() orm.Model { - return res.model -} - -func (res *result) RowsAffected() int { - return res.affected -} - -func (res *result) RowsReturned() int { - return res.returned -} diff --git a/vendor/github.com/go-pg/pg/stmt.go b/vendor/github.com/go-pg/pg/stmt.go deleted file mode 100644 index af96df7..0000000 --- a/vendor/github.com/go-pg/pg/stmt.go +++ /dev/null @@ -1,278 +0,0 @@ -package pg - -import ( - "errors" - "sync" - "time" - - "github.com/go-pg/pg/internal" - "github.com/go-pg/pg/internal/pool" - "github.com/go-pg/pg/orm" -) - -var errStmtClosed = errors.New("pg: statement is closed") - -// Stmt is a prepared statement. Stmt is safe for concurrent use by -// multiple goroutines. -type Stmt struct { - db *DB - - mu sync.Mutex - _cn *pool.Conn - inTx bool - - q string - name string - columns [][]byte - - stickyErr error -} - -// Prepare creates a prepared statement for later queries or -// executions. Multiple queries or executions may be run concurrently -// from the returned statement. -func (db *DB) Prepare(q string) (*Stmt, error) { - cn, err := db.conn() - if err != nil { - return nil, err - } - - stmt, err := prepare(db, cn, q) - if err != nil { - db.freeConn(cn, err) - return nil, err - } - - return stmt, nil -} - -func (stmt *Stmt) conn() (*pool.Conn, error) { - if stmt._cn == nil { - if stmt.stickyErr != nil { - return nil, stmt.stickyErr - } - return nil, errStmtClosed - } - return stmt._cn, nil -} - -func (stmt *Stmt) exec(params ...interface{}) (orm.Result, error) { - stmt.mu.Lock() - defer stmt.mu.Unlock() - - cn, err := stmt.conn() - if err != nil { - return nil, err - } - return stmt.extQuery(cn, stmt.name, params...) -} - -// Exec executes a prepared statement with the given parameters. -func (stmt *Stmt) Exec(params ...interface{}) (res orm.Result, err error) { - for attempt := 0; attempt <= stmt.db.opt.MaxRetries; attempt++ { - if attempt >= 1 { - time.Sleep(stmt.db.retryBackoff(attempt - 1)) - } - - start := time.Now() - res, err = stmt.exec(params...) - stmt.db.queryProcessed(stmt.db, start, stmt.q, params, attempt, res, err) - - if !stmt.db.shouldRetry(err) { - break - } - } - if err != nil { - stmt.setErr(err) - } - return -} - -// ExecOne acts like Exec, but query must affect only one row. It -// returns ErrNoRows error when query returns zero rows or -// ErrMultiRows when query returns multiple rows. -func (stmt *Stmt) ExecOne(params ...interface{}) (orm.Result, error) { - res, err := stmt.Exec(params...) - if err != nil { - return nil, err - } - - if err := internal.AssertOneRow(res.RowsAffected()); err != nil { - return nil, err - } - return res, nil -} - -func (stmt *Stmt) query(model interface{}, params ...interface{}) (orm.Result, error) { - stmt.mu.Lock() - defer stmt.mu.Unlock() - - cn, err := stmt.conn() - if err != nil { - return nil, err - } - - res, err := stmt.extQueryData(cn, stmt.name, model, stmt.columns, params...) - if err != nil { - return nil, err - } - - if mod := res.Model(); mod != nil && res.RowsReturned() > 0 { - if err = mod.AfterQuery(stmt.db); err != nil { - return res, err - } - } - - return res, nil -} - -// Query executes a prepared query statement with the given parameters. -func (stmt *Stmt) Query(model interface{}, params ...interface{}) (res orm.Result, err error) { - for attempt := 0; attempt <= stmt.db.opt.MaxRetries; attempt++ { - if attempt >= 1 { - time.Sleep(stmt.db.retryBackoff(attempt - 1)) - } - - start := time.Now() - res, err = stmt.query(model, params...) - stmt.db.queryProcessed(stmt.db, start, stmt.q, params, attempt, res, err) - - if !stmt.db.shouldRetry(err) { - break - } - } - if err != nil { - stmt.setErr(err) - } - return -} - -// QueryOne acts like Query, but query must return only one row. It -// returns ErrNoRows error when query returns zero rows or -// ErrMultiRows when query returns multiple rows. -func (stmt *Stmt) QueryOne(model interface{}, params ...interface{}) (orm.Result, error) { - mod, err := orm.NewModel(model) - if err != nil { - return nil, err - } - - res, err := stmt.Query(mod, params...) - if err != nil { - return nil, err - } - - if err := internal.AssertOneRow(res.RowsAffected()); err != nil { - return nil, err - } - return res, nil -} - -func (stmt *Stmt) setErr(e error) { - if stmt.stickyErr == nil { - stmt.stickyErr = e - } -} - -// Close closes the statement. -func (stmt *Stmt) Close() error { - stmt.mu.Lock() - defer stmt.mu.Unlock() - - if stmt._cn == nil { - return errStmtClosed - } - - err := stmt.closeStmt(stmt._cn, stmt.name) - if !stmt.inTx { - stmt.db.freeConn(stmt._cn, err) - } - stmt._cn = nil - return err -} - -func prepare(db *DB, cn *pool.Conn, q string) (*Stmt, error) { - name := cn.NextId() - err := cn.WithWriter(db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - writeParseDescribeSyncMsg(wb, name, q) - return nil - }) - if err != nil { - return nil, err - } - - var columns [][]byte - cn.WithReader(db.opt.ReadTimeout, func(rd *pool.Reader) error { - columns, err = readParseDescribeSync(rd) - return err - }) - if err != nil { - return nil, err - } - - stmt := &Stmt{ - db: db, - _cn: cn, - q: q, - name: name, - columns: columns, - } - return stmt, nil -} - -func (stmt *Stmt) extQuery(cn *pool.Conn, name string, params ...interface{}) (orm.Result, error) { - err := cn.WithWriter(stmt.db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - return writeBindExecuteMsg(wb, name, params...) - }) - if err != nil { - return nil, err - } - - var res orm.Result - err = cn.WithReader(stmt.db.opt.ReadTimeout, func(rd *pool.Reader) error { - res, err = readExtQuery(rd) - return err - }) - if err != nil { - return nil, err - } - - return res, nil -} - -func (stmt *Stmt) extQueryData( - cn *pool.Conn, name string, model interface{}, columns [][]byte, params ...interface{}, -) (orm.Result, error) { - err := cn.WithWriter(stmt.db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - return writeBindExecuteMsg(wb, name, params...) - }) - if err != nil { - return nil, err - } - - var res orm.Result - err = cn.WithReader(stmt.db.opt.ReadTimeout, func(rd *pool.Reader) error { - res, err = readExtQueryData(rd, model, columns) - return err - }) - if err != nil { - return nil, err - } - - return res, nil -} - -func (stmt *Stmt) closeStmt(cn *pool.Conn, name string) error { - err := cn.WithWriter(stmt.db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - writeCloseMsg(wb, name) - writeFlushMsg(wb) - return nil - }) - if err != nil { - return err - } - - err = cn.WithReader(stmt.db.opt.ReadTimeout, func(rd *pool.Reader) error { - return readCloseCompleteMsg(rd) - }) - return err -} diff --git a/vendor/github.com/go-pg/pg/time.go b/vendor/github.com/go-pg/pg/time.go deleted file mode 100644 index 66964fa..0000000 --- a/vendor/github.com/go-pg/pg/time.go +++ /dev/null @@ -1,58 +0,0 @@ -package pg - -import ( - "bytes" - "database/sql" - "encoding/json" - "time" - - "github.com/go-pg/pg/types" -) - -var jsonNull = []byte("null") - -// NullTime is a time.Time wrapper that marshals zero time as JSON null and -// PostgreSQL NULL. -type NullTime struct { - time.Time -} - -var _ json.Marshaler = (*NullTime)(nil) -var _ json.Unmarshaler = (*NullTime)(nil) -var _ sql.Scanner = (*NullTime)(nil) -var _ types.ValueAppender = (*NullTime)(nil) - -func (tm NullTime) MarshalJSON() ([]byte, error) { - if tm.IsZero() { - return jsonNull, nil - } - return tm.Time.MarshalJSON() -} - -func (tm *NullTime) UnmarshalJSON(b []byte) error { - if bytes.Equal(b, jsonNull) { - tm.Time = time.Time{} - return nil - } - return tm.Time.UnmarshalJSON(b) -} - -func (tm NullTime) AppendValue(b []byte, quote int) []byte { - if tm.IsZero() { - return types.AppendNull(b, quote) - } - return types.AppendTime(b, tm.Time, quote) -} - -func (tm *NullTime) Scan(b interface{}) error { - if b == nil { - tm.Time = time.Time{} - return nil - } - newtm, err := types.ParseTime(b.([]byte)) - if err != nil { - return err - } - tm.Time = newtm - return nil -} diff --git a/vendor/github.com/go-pg/pg/tx.go b/vendor/github.com/go-pg/pg/tx.go deleted file mode 100644 index de2024c..0000000 --- a/vendor/github.com/go-pg/pg/tx.go +++ /dev/null @@ -1,338 +0,0 @@ -package pg - -import ( - "context" - "errors" - "io" - "sync" - "time" - - "github.com/go-pg/pg/internal" - "github.com/go-pg/pg/internal/pool" - "github.com/go-pg/pg/orm" -) - -var errTxDone = errors.New("pg: transaction has already been committed or rolled back") - -// Tx is an in-progress database transaction. It is safe for concurrent use -// by multiple goroutines. -// -// A transaction must end with a call to Commit or Rollback. -// -// After a call to Commit or Rollback, all operations on the transaction fail -// with ErrTxDone. -// -// The statements prepared for a transaction by calling the transaction's -// Prepare or Stmt methods are closed by the call to Commit or Rollback. -type Tx struct { - db *DB - - mu sync.Mutex - cn *pool.Conn - stmts []*Stmt -} - -var _ orm.DB = (*Tx)(nil) - -// Begin starts a transaction. Most callers should use RunInTransaction instead. -func (db *DB) Begin() (*Tx, error) { - tx := &Tx{ - db: db, - } - - cn, err := db.conn() - if err != nil { - return nil, err - } - tx.cn = cn - - if err := tx.begin(); err != nil { - return nil, err - } - - return tx, nil -} - -// RunInTransaction runs a function in a transaction. If function -// returns an error transaction is rollbacked, otherwise transaction -// is committed. -func (db *DB) RunInTransaction(fn func(*Tx) error) error { - tx, err := db.Begin() - if err != nil { - return err - } - return tx.RunInTransaction(fn) -} - -// DB returns a DB which started the Tx. -func (tx *Tx) DB() *DB { - return tx.db -} - -// Begin returns the transaction. -func (tx *Tx) Begin() (*Tx, error) { - return tx, nil -} - -// RunInTransaction runs a function in the transaction. If function -// returns an error transaction is rollbacked, otherwise transaction -// is committed. -func (tx *Tx) RunInTransaction(fn func(*Tx) error) error { - defer func() { - if err := recover(); err != nil { - _ = tx.Rollback() - panic(err) - } - }() - if err := fn(tx); err != nil { - _ = tx.Rollback() - return err - } - return tx.Commit() -} - -func (tx *Tx) conn() (*pool.Conn, error) { - if tx.cn == nil { - return nil, errTxDone - } - return tx.cn, nil -} - -func (tx *Tx) freeConn(cn *pool.Conn, err error) {} - -// Stmt returns a transaction-specific prepared statement -// from an existing statement. -func (tx *Tx) Stmt(stmt *Stmt) *Stmt { - stmt, err := tx.Prepare(stmt.q) - if err != nil { - return &Stmt{stickyErr: err} - } - return stmt -} - -// Prepare creates a prepared statement for use within a transaction. -// -// The returned statement operates within the transaction and can no longer -// be used once the transaction has been committed or rolled back. -// -// To use an existing prepared statement on this transaction, see Tx.Stmt. -func (tx *Tx) Prepare(q string) (*Stmt, error) { - tx.mu.Lock() - defer tx.mu.Unlock() - - cn, err := tx.conn() - if err != nil { - return nil, err - } - - stmt, err := prepare(tx.db, cn, q) - tx.freeConn(cn, err) - if err != nil { - return nil, err - } - - stmt.inTx = true - tx.stmts = append(tx.stmts, stmt) - - return stmt, nil -} - -// Exec is an alias for DB.Exec. -func (tx *Tx) Exec(query interface{}, params ...interface{}) (orm.Result, error) { - tx.mu.Lock() - defer tx.mu.Unlock() - return tx.exec(query, params...) -} - -func (tx *Tx) exec(query interface{}, params ...interface{}) (orm.Result, error) { - cn, err := tx.conn() - if err != nil { - return nil, err - } - - start := time.Now() - res, err := tx.db.simpleQuery(cn, query, params...) - tx.freeConn(cn, err) - tx.db.queryProcessed(tx, start, query, params, 0, res, err) - - return res, err -} - -// ExecOne is an alias for DB.ExecOne. -func (tx *Tx) ExecOne(query interface{}, params ...interface{}) (orm.Result, error) { - res, err := tx.Exec(query, params...) - if err != nil { - return nil, err - } - - if err := internal.AssertOneRow(res.RowsAffected()); err != nil { - return nil, err - } - return res, nil -} - -// Query is an alias for DB.Query. -func (tx *Tx) Query(model interface{}, query interface{}, params ...interface{}) (orm.Result, error) { - tx.mu.Lock() - defer tx.mu.Unlock() - - cn, err := tx.conn() - if err != nil { - return nil, err - } - - start := time.Now() - res, err := tx.db.simpleQueryData(cn, model, query, params...) - tx.freeConn(cn, err) - tx.db.queryProcessed(tx, start, query, params, 0, res, err) - - if err != nil { - return nil, err - } - - if mod := res.Model(); mod != nil && res.RowsReturned() > 0 { - if err = mod.AfterQuery(tx); err != nil { - return res, err - } - } - - return res, err -} - -// QueryOne is an alias for DB.QueryOne. -func (tx *Tx) QueryOne(model interface{}, query interface{}, params ...interface{}) (orm.Result, error) { - mod, err := orm.NewModel(model) - if err != nil { - return nil, err - } - - res, err := tx.Query(mod, query, params...) - if err != nil { - return nil, err - } - - if err := internal.AssertOneRow(res.RowsAffected()); err != nil { - return nil, err - } - return res, nil -} - -// Model is an alias for DB.Model. -func (tx *Tx) Model(model ...interface{}) *orm.Query { - return orm.NewQuery(tx, model...) -} - -// Select is an alias for DB.Select. -func (tx *Tx) Select(model interface{}) error { - return orm.Select(tx, model) -} - -// Insert is an alias for DB.Insert. -func (tx *Tx) Insert(model ...interface{}) error { - return orm.Insert(tx, model...) -} - -// Update is an alias for DB.Update. -func (tx *Tx) Update(model interface{}) error { - return orm.Update(tx, model) -} - -// Delete is an alias for DB.Delete. -func (tx *Tx) Delete(model interface{}) error { - return orm.Delete(tx, model) -} - -// Delete forces delete of the model with deleted_at column. -func (tx *Tx) ForceDelete(model interface{}) error { - return orm.ForceDelete(tx, model) -} - -// CreateTable is an alias for DB.CreateTable. -func (tx *Tx) CreateTable(model interface{}, opt *orm.CreateTableOptions) error { - return orm.CreateTable(tx, model, opt) -} - -// DropTable is an alias for DB.DropTable. -func (tx *Tx) DropTable(model interface{}, opt *orm.DropTableOptions) error { - return orm.DropTable(tx, model, opt) -} - -// CopyFrom is an alias for DB.CopyFrom. -func (tx *Tx) CopyFrom(r io.Reader, query interface{}, params ...interface{}) (orm.Result, error) { - tx.mu.Lock() - defer tx.mu.Unlock() - - cn, err := tx.conn() - if err != nil { - return nil, err - } - - res, err := tx.db.copyFrom(cn, r, query, params...) - tx.freeConn(cn, err) - return res, err -} - -// CopyTo is an alias for DB.CopyTo. -func (tx *Tx) CopyTo(w io.Writer, query interface{}, params ...interface{}) (orm.Result, error) { - tx.mu.Lock() - defer tx.mu.Unlock() - - cn, err := tx.conn() - if err != nil { - return nil, err - } - - res, err := tx.db.copyTo(cn, w, query, params...) - tx.freeConn(cn, err) - return res, err -} - -func (tx *Tx) FormatQuery(dst []byte, query string, params ...interface{}) []byte { - return tx.db.FormatQuery(dst, query, params...) -} - -func (tx *Tx) begin() error { - _, err := tx.Exec("BEGIN") - if err != nil { - tx.close(err) - } - return err -} - -// Commit commits the transaction. -func (tx *Tx) Commit() error { - tx.mu.Lock() - defer tx.mu.Unlock() - - _, err := tx.exec("COMMIT") - tx.close(err) - return err -} - -// Rollback aborts the transaction. -func (tx *Tx) Rollback() error { - tx.mu.Lock() - defer tx.mu.Unlock() - - _, err := tx.exec("ROLLBACK") - tx.close(err) - return err -} - -func (tx *Tx) close(lastErr error) { - if tx.cn == nil { - return - } - - for _, stmt := range tx.stmts { - _ = stmt.Close() - } - tx.stmts = nil - - tx.db.freeConn(tx.cn, lastErr) - tx.cn = nil -} - -func (tx *Tx) Context() context.Context { - return tx.db.Context() -} diff --git a/vendor/github.com/go-pg/pg/types/append.go b/vendor/github.com/go-pg/pg/types/append.go deleted file mode 100644 index e80d124..0000000 --- a/vendor/github.com/go-pg/pg/types/append.go +++ /dev/null @@ -1,202 +0,0 @@ -package types - -import ( - "database/sql/driver" - "encoding/hex" - "math" - "reflect" - "strconv" - "time" -) - -func Append(b []byte, v interface{}, quote int) []byte { - switch v := v.(type) { - case nil: - return AppendNull(b, quote) - case bool: - return appendBool(b, v) - case int8: - return strconv.AppendInt(b, int64(v), 10) - case int16: - return strconv.AppendInt(b, int64(v), 10) - case int32: - return strconv.AppendInt(b, int64(v), 10) - case int64: - return strconv.AppendInt(b, int64(v), 10) - case int: - return strconv.AppendInt(b, int64(v), 10) - case uint8: - return strconv.AppendUint(b, uint64(v), 10) - case uint16: - return strconv.AppendUint(b, uint64(v), 10) - case uint32: - return strconv.AppendUint(b, uint64(v), 10) - case uint64: - return strconv.AppendUint(b, v, 10) - case uint: - return strconv.AppendUint(b, uint64(v), 10) - case float32: - return appendFloat(b, float64(v), quote) - case float64: - return appendFloat(b, v, quote) - case string: - return AppendString(b, v, quote) - case time.Time: - return AppendTime(b, v, quote) - case []byte: - return AppendBytes(b, v, quote) - case ValueAppender: - return appendAppender(b, v, quote) - case driver.Valuer: - return appendDriverValuer(b, v, quote) - default: - return appendValue(b, reflect.ValueOf(v), quote) - } -} - -func AppendError(b []byte, err error) []byte { - b = append(b, "?!("...) - b = append(b, err.Error()...) - b = append(b, ')') - return b -} - -func AppendNull(b []byte, quote int) []byte { - if quote == 1 { - return append(b, "NULL"...) - } else { - return nil - } -} - -func appendBool(dst []byte, v bool) []byte { - if v { - return append(dst, "TRUE"...) - } - return append(dst, "FALSE"...) -} - -func appendFloat(dst []byte, v float64, quote int) []byte { - switch { - case math.IsNaN(v): - if quote == 1 { - return append(dst, "'NaN'"...) - } - return append(dst, "NaN"...) - case math.IsInf(v, 1): - if quote == 1 { - return append(dst, "'Infinity'"...) - } - return append(dst, "Infinity"...) - case math.IsInf(v, -1): - if quote == 1 { - return append(dst, "'-Infinity'"...) - } - return append(dst, "-Infinity"...) - default: - return strconv.AppendFloat(dst, v, 'f', -1, 64) - } -} - -func AppendString(b []byte, s string, quote int) []byte { - if quote == 2 { - b = append(b, '"') - } else if quote == 1 { - b = append(b, '\'') - } - - for i := 0; i < len(s); i++ { - c := s[i] - - if c == '\000' { - continue - } - - if quote >= 1 { - if c == '\'' { - b = append(b, '\'', '\'') - continue - } - } - - if quote == 2 { - if c == '"' { - b = append(b, '\\', '"') - continue - } - if c == '\\' { - b = append(b, '\\', '\\') - continue - } - } - - b = append(b, c) - } - - if quote >= 2 { - b = append(b, '"') - } else if quote == 1 { - b = append(b, '\'') - } - - return b -} - -func AppendBytes(b []byte, bytes []byte, quote int) []byte { - if bytes == nil { - return AppendNull(b, quote) - } - - if quote == 1 { - b = append(b, '\'') - } - - tmp := make([]byte, hex.EncodedLen(len(bytes))) - hex.Encode(tmp, bytes) - b = append(b, "\\x"...) - b = append(b, tmp...) - - if quote == 1 { - b = append(b, '\'') - } - - return b -} - -func AppendStringStringMap(b []byte, m map[string]string, quote int) []byte { - if m == nil { - return AppendNull(b, quote) - } - - if quote == 1 { - b = append(b, '\'') - } - - for key, value := range m { - b = AppendString(b, key, 2) - b = append(b, '=', '>') - b = AppendString(b, value, 2) - b = append(b, ',') - } - if len(m) > 0 { - b = b[:len(b)-1] // Strip trailing comma. - } - - if quote == 1 { - b = append(b, '\'') - } - - return b -} - -func appendDriverValuer(b []byte, v driver.Valuer, quote int) []byte { - value, err := v.Value() - if err != nil { - return AppendError(b, err) - } - return Append(b, value, quote) -} - -func appendAppender(b []byte, v ValueAppender, quote int) []byte { - return v.AppendValue(b, quote) -} diff --git a/vendor/github.com/go-pg/pg/types/append_array.go b/vendor/github.com/go-pg/pg/types/append_array.go deleted file mode 100644 index f554eef..0000000 --- a/vendor/github.com/go-pg/pg/types/append_array.go +++ /dev/null @@ -1,190 +0,0 @@ -package types - -import ( - "reflect" - "strconv" -) - -var stringType = reflect.TypeOf((*string)(nil)).Elem() -var sliceStringType = reflect.TypeOf([]string(nil)) - -var intType = reflect.TypeOf((*int)(nil)).Elem() -var sliceIntType = reflect.TypeOf([]int(nil)) - -var int64Type = reflect.TypeOf((*int64)(nil)).Elem() -var sliceInt64Type = reflect.TypeOf([]int64(nil)) - -var float64Type = reflect.TypeOf((*float64)(nil)).Elem() -var sliceFloat64Type = reflect.TypeOf([]float64(nil)) - -func ArrayAppender(typ reflect.Type) AppenderFunc { - elemType := typ.Elem() - - switch elemType { - case stringType: - return appendSliceStringValue - case intType: - return appendSliceIntValue - case int64Type: - return appendSliceInt64Value - case float64Type: - return appendSliceFloat64Value - } - - appendElem := appender(elemType, true) - return func(b []byte, v reflect.Value, quote int) []byte { - if v.IsNil() { - return AppendNull(b, quote) - } - - if quote == 1 { - b = append(b, '\'') - } - - b = append(b, '{') - for i := 0; i < v.Len(); i++ { - elem := v.Index(i) - b = appendElem(b, elem, 2) - b = append(b, ',') - } - if v.Len() > 0 { - b[len(b)-1] = '}' // Replace trailing comma. - } else { - b = append(b, '}') - } - - if quote == 1 { - b = append(b, '\'') - } - - return b - } -} - -func appendSliceStringValue(b []byte, v reflect.Value, quote int) []byte { - ss := v.Convert(sliceStringType).Interface().([]string) - return appendSliceString(b, ss, quote) -} - -func appendSliceString(b []byte, ss []string, quote int) []byte { - if ss == nil { - return AppendNull(b, quote) - } - - if quote == 1 { - b = append(b, '\'') - } - - b = append(b, '{') - for _, s := range ss { - b = AppendString(b, s, 2) - b = append(b, ',') - } - if len(ss) > 0 { - b[len(b)-1] = '}' // Replace trailing comma. - } else { - b = append(b, '}') - } - - if quote == 1 { - b = append(b, '\'') - } - - return b -} - -func appendSliceIntValue(b []byte, v reflect.Value, quote int) []byte { - ints := v.Convert(sliceIntType).Interface().([]int) - return appendSliceInt(b, ints, quote) -} - -func appendSliceInt(b []byte, ints []int, quote int) []byte { - if ints == nil { - return AppendNull(b, quote) - } - - if quote == 1 { - b = append(b, '\'') - } - - b = append(b, '{') - for _, n := range ints { - b = strconv.AppendInt(b, int64(n), 10) - b = append(b, ',') - } - if len(ints) > 0 { - b[len(b)-1] = '}' // Replace trailing comma. - } else { - b = append(b, '}') - } - - if quote == 1 { - b = append(b, '\'') - } - - return b -} - -func appendSliceInt64Value(b []byte, v reflect.Value, quote int) []byte { - ints := v.Convert(sliceInt64Type).Interface().([]int64) - return appendSliceInt64(b, ints, quote) -} - -func appendSliceInt64(b []byte, ints []int64, quote int) []byte { - if ints == nil { - return AppendNull(b, quote) - } - - if quote == 1 { - b = append(b, '\'') - } - - b = append(b, '{') - for _, n := range ints { - b = strconv.AppendInt(b, n, 10) - b = append(b, ',') - } - if len(ints) > 0 { - b[len(b)-1] = '}' // Replace trailing comma. - } else { - b = append(b, '}') - } - - if quote == 1 { - b = append(b, '\'') - } - - return b -} - -func appendSliceFloat64Value(b []byte, v reflect.Value, quote int) []byte { - floats := v.Convert(sliceFloat64Type).Interface().([]float64) - return appendSliceFloat64(b, floats, quote) -} - -func appendSliceFloat64(b []byte, floats []float64, quote int) []byte { - if floats == nil { - return AppendNull(b, quote) - } - - if quote == 1 { - b = append(b, '\'') - } - - b = append(b, '{') - for _, n := range floats { - b = appendFloat(b, n, 2) - b = append(b, ',') - } - if len(floats) > 0 { - b[len(b)-1] = '}' // Replace trailing comma. - } else { - b = append(b, '}') - } - - if quote == 1 { - b = append(b, '\'') - } - - return b -} diff --git a/vendor/github.com/go-pg/pg/types/append_field.go b/vendor/github.com/go-pg/pg/types/append_field.go deleted file mode 100644 index 2fa86dc..0000000 --- a/vendor/github.com/go-pg/pg/types/append_field.go +++ /dev/null @@ -1,52 +0,0 @@ -package types - -import "github.com/go-pg/pg/internal/parser" - -func AppendField(b []byte, field string, quote int) []byte { - return appendField(b, parser.NewString(field), quote) -} - -func AppendFieldBytes(b []byte, field []byte, quote int) []byte { - return appendField(b, parser.New(field), quote) -} - -func appendField(b []byte, p *parser.Parser, quote int) []byte { - var quoted bool - for p.Valid() { - c := p.Read() - switch c { - case '*': - if !quoted { - b = append(b, '*') - continue - } - case '.': - if quoted && quote == 1 { - b = append(b, '"') - quoted = false - } - b = append(b, '.') - if p.Skip('*') { - b = append(b, '*') - } else if quote == 1 { - b = append(b, '"') - quoted = true - } - continue - } - - if !quoted && quote == 1 { - b = append(b, '"') - quoted = true - } - if c == '"' { - b = append(b, '"', '"') - } else { - b = append(b, c) - } - } - if quoted && quote == 1 { - b = append(b, '"') - } - return b -} diff --git a/vendor/github.com/go-pg/pg/types/append_hstore.go b/vendor/github.com/go-pg/pg/types/append_hstore.go deleted file mode 100644 index 9549074..0000000 --- a/vendor/github.com/go-pg/pg/types/append_hstore.go +++ /dev/null @@ -1,49 +0,0 @@ -package types - -import ( - "fmt" - "reflect" -) - -var mapStringStringType = reflect.TypeOf(map[string]string(nil)) - -func HstoreAppender(typ reflect.Type) AppenderFunc { - if typ.Key() == stringType && typ.Elem() == stringType { - return appendMapStringStringValue - } - return func(b []byte, v reflect.Value, quote int) []byte { - err := fmt.Errorf("pg.Hstore(unsupported %s)", v.Type()) - return AppendError(b, err) - } -} - -func appendMapStringString(b []byte, m map[string]string, quote int) []byte { - if m == nil { - return AppendNull(b, quote) - } - - if quote == 1 { - b = append(b, '\'') - } - - for key, value := range m { - b = AppendString(b, key, 2) - b = append(b, '=', '>') - b = AppendString(b, value, 2) - b = append(b, ',') - } - if len(m) > 0 { - b = b[:len(b)-1] // Strip trailing comma. - } - - if quote == 1 { - b = append(b, '\'') - } - - return b -} - -func appendMapStringStringValue(b []byte, v reflect.Value, quote int) []byte { - m := v.Convert(mapStringStringType).Interface().(map[string]string) - return appendMapStringString(b, m, quote) -} diff --git a/vendor/github.com/go-pg/pg/types/append_jsonb.go b/vendor/github.com/go-pg/pg/types/append_jsonb.go deleted file mode 100644 index 3bede71..0000000 --- a/vendor/github.com/go-pg/pg/types/append_jsonb.go +++ /dev/null @@ -1,41 +0,0 @@ -package types - -import "github.com/go-pg/pg/internal/parser" - -func AppendJSONB(b, jsonb []byte, quote int) []byte { - if quote == 1 { - b = append(b, '\'') - } - - p := parser.New(jsonb) - for p.Valid() { - c := p.Read() - switch c { - case '\'': - if quote == 1 { - b = append(b, '\'', '\'') - } else { - b = append(b, '\'') - } - case '\000': - continue - case '\\': - if p.SkipBytes([]byte("u0000")) { - b = append(b, "\\\\u0000"...) - } else { - b = append(b, '\\') - if p.Valid() { - b = append(b, p.Read()) - } - } - default: - b = append(b, c) - } - } - - if quote == 1 { - b = append(b, '\'') - } - - return b -} diff --git a/vendor/github.com/go-pg/pg/types/append_value.go b/vendor/github.com/go-pg/pg/types/append_value.go deleted file mode 100644 index bd1d543..0000000 --- a/vendor/github.com/go-pg/pg/types/append_value.go +++ /dev/null @@ -1,181 +0,0 @@ -package types - -import ( - "database/sql/driver" - "encoding/json" - "net" - "reflect" - "strconv" - "time" -) - -var driverValuerType = reflect.TypeOf((*driver.Valuer)(nil)).Elem() -var appenderType = reflect.TypeOf((*ValueAppender)(nil)).Elem() - -type AppenderFunc func([]byte, reflect.Value, int) []byte - -var valueAppenders []AppenderFunc - -func init() { - valueAppenders = []AppenderFunc{ - reflect.Bool: appendBoolValue, - reflect.Int: appendIntValue, - reflect.Int8: appendIntValue, - reflect.Int16: appendIntValue, - reflect.Int32: appendIntValue, - reflect.Int64: appendIntValue, - reflect.Uint: appendUintValue, - reflect.Uint8: appendUintValue, - reflect.Uint16: appendUintValue, - reflect.Uint32: appendUintValue, - reflect.Uint64: appendUintValue, - reflect.Uintptr: nil, - reflect.Float32: appendFloatValue, - reflect.Float64: appendFloatValue, - reflect.Complex64: nil, - reflect.Complex128: nil, - reflect.Array: nil, - reflect.Chan: nil, - reflect.Func: nil, - reflect.Interface: appendIfaceValue, - reflect.Map: appendJSONValue, - reflect.Ptr: nil, - reflect.Slice: appendJSONValue, - reflect.String: appendStringValue, - reflect.Struct: appendStructValue, - reflect.UnsafePointer: nil, - } -} - -func Appender(typ reflect.Type) AppenderFunc { - return appender(typ, false) -} - -func appender(typ reflect.Type, pgArray bool) AppenderFunc { - switch typ { - case timeType: - return appendTimeValue - case ipType: - return appendIPValue - case ipNetType: - return appendIPNetValue - } - - if typ.Implements(appenderType) { - return appendAppenderValue - } - - if typ.Implements(driverValuerType) { - return appendDriverValuerValue - } - - kind := typ.Kind() - switch kind { - case reflect.Ptr: - return ptrAppenderFunc(typ) - case reflect.Slice: - if typ.Elem().Kind() == reflect.Uint8 { - return appendBytesValue - } - if pgArray { - return ArrayAppender(typ) - } - case reflect.Array: - if typ.Elem().Kind() == reflect.Uint8 { - return appendArrayBytesValue - } - } - return valueAppenders[kind] -} - -func ptrAppenderFunc(typ reflect.Type) AppenderFunc { - appender := Appender(typ.Elem()) - return func(b []byte, v reflect.Value, quote int) []byte { - if v.IsNil() { - return AppendNull(b, quote) - } - return appender(b, v.Elem(), quote) - } -} - -func appendValue(b []byte, v reflect.Value, quote int) []byte { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return AppendNull(b, quote) - } - return appendValue(b, v.Elem(), quote) - } - - appender := Appender(v.Type()) - return appender(b, v, quote) -} - -func appendIfaceValue(b []byte, v reflect.Value, quote int) []byte { - return Append(b, v.Interface(), quote) -} - -func appendBoolValue(b []byte, v reflect.Value, _ int) []byte { - return appendBool(b, v.Bool()) -} - -func appendIntValue(b []byte, v reflect.Value, _ int) []byte { - return strconv.AppendInt(b, v.Int(), 10) -} - -func appendUintValue(b []byte, v reflect.Value, _ int) []byte { - return strconv.AppendUint(b, v.Uint(), 10) -} - -func appendFloatValue(b []byte, v reflect.Value, quote int) []byte { - return appendFloat(b, v.Float(), quote) -} - -func appendBytesValue(b []byte, v reflect.Value, quote int) []byte { - return AppendBytes(b, v.Bytes(), quote) -} - -func appendArrayBytesValue(b []byte, v reflect.Value, quote int) []byte { - return AppendBytes(b, v.Slice(0, v.Len()).Bytes(), quote) -} - -func appendStringValue(b []byte, v reflect.Value, quote int) []byte { - return AppendString(b, v.String(), quote) -} - -func appendStructValue(b []byte, v reflect.Value, quote int) []byte { - if v.Type() == timeType { - return appendTimeValue(b, v, quote) - } - return appendJSONValue(b, v, quote) -} - -func appendJSONValue(b []byte, v reflect.Value, quote int) []byte { - bytes, err := json.Marshal(v.Interface()) - if err != nil { - return AppendError(b, err) - } - return AppendJSONB(b, bytes, quote) -} - -func appendTimeValue(b []byte, v reflect.Value, quote int) []byte { - tm := v.Interface().(time.Time) - return AppendTime(b, tm, quote) -} - -func appendIPValue(b []byte, v reflect.Value, quote int) []byte { - ip := v.Interface().(net.IP) - return AppendString(b, ip.String(), quote) -} - -func appendIPNetValue(b []byte, v reflect.Value, quote int) []byte { - ipnet := v.Interface().(net.IPNet) - return AppendString(b, ipnet.String(), quote) -} - -func appendAppenderValue(b []byte, v reflect.Value, quote int) []byte { - return appendAppender(b, v.Interface().(ValueAppender), quote) -} - -func appendDriverValuerValue(b []byte, v reflect.Value, quote int) []byte { - return appendDriverValuer(b, v.Interface().(driver.Valuer), quote) -} diff --git a/vendor/github.com/go-pg/pg/types/array.go b/vendor/github.com/go-pg/pg/types/array.go deleted file mode 100644 index 613b6c9..0000000 --- a/vendor/github.com/go-pg/pg/types/array.go +++ /dev/null @@ -1,52 +0,0 @@ -package types - -import ( - "database/sql" - "fmt" - "reflect" -) - -type Array struct { - v reflect.Value - - append AppenderFunc - scan ScannerFunc -} - -var _ ValueAppender = (*Array)(nil) -var _ sql.Scanner = (*Array)(nil) - -func NewArray(vi interface{}) *Array { - v := reflect.ValueOf(vi) - if !v.IsValid() { - panic(fmt.Errorf("pg.Array(nil)")) - } - v = reflect.Indirect(v) - if v.Kind() != reflect.Slice { - panic(fmt.Errorf("pg.Array(unsupported %s)", v.Type())) - } - return &Array{ - v: v, - - append: ArrayAppender(v.Type()), - scan: ArrayScanner(v.Type()), - } -} - -func (a *Array) Value() interface{} { - if a.v.IsValid() { - return a.v.Interface() - } - return nil -} - -func (a *Array) AppendValue(b []byte, quote int) []byte { - return a.append(b, a.v, quote) -} - -func (a *Array) Scan(b interface{}) error { - if b == nil { - return a.scan(a.v, nil) - } - return a.scan(a.v, b.([]byte)) -} diff --git a/vendor/github.com/go-pg/pg/types/hstore.go b/vendor/github.com/go-pg/pg/types/hstore.go deleted file mode 100644 index b6241e6..0000000 --- a/vendor/github.com/go-pg/pg/types/hstore.go +++ /dev/null @@ -1,52 +0,0 @@ -package types - -import ( - "database/sql" - "fmt" - "reflect" -) - -type Hstore struct { - v reflect.Value - - append AppenderFunc - scan ScannerFunc -} - -var _ ValueAppender = (*Hstore)(nil) -var _ sql.Scanner = (*Hstore)(nil) - -func NewHstore(vi interface{}) *Hstore { - v := reflect.ValueOf(vi) - if !v.IsValid() { - panic(fmt.Errorf("pg.Hstore(nil)")) - } - v = reflect.Indirect(v) - if v.Kind() != reflect.Map { - panic(fmt.Errorf("pg.Hstore(unsupported %s)", v.Type())) - } - return &Hstore{ - v: v, - - append: HstoreAppender(v.Type()), - scan: HstoreScanner(v.Type()), - } -} - -func (h *Hstore) Value() interface{} { - if h.v.IsValid() { - return h.v.Interface() - } - return nil -} - -func (h *Hstore) AppendValue(b []byte, quote int) []byte { - return h.append(b, h.v, quote) -} - -func (h *Hstore) Scan(b interface{}) error { - if b == nil { - return h.scan(h.v, nil) - } - return h.scan(h.v, b.([]byte)) -} diff --git a/vendor/github.com/go-pg/pg/types/in_op.go b/vendor/github.com/go-pg/pg/types/in_op.go deleted file mode 100644 index bfe395c..0000000 --- a/vendor/github.com/go-pg/pg/types/in_op.go +++ /dev/null @@ -1,49 +0,0 @@ -package types - -import ( - "reflect" -) - -type inOp struct { - slice reflect.Value -} - -var _ ValueAppender = (*inOp)(nil) - -func In(values ...interface{}) ValueAppender { - return &inOp{ - slice: reflect.ValueOf(values), - } -} - -func InSlice(slice interface{}) ValueAppender { - return &inOp{ - slice: reflect.ValueOf(slice), - } -} - -func (in *inOp) AppendValue(b []byte, quote int) []byte { - return appendIn(b, in.slice, quote) -} - -func appendIn(b []byte, slice reflect.Value, quote int) []byte { - for i := 0; i < slice.Len(); i++ { - if i > 0 { - b = append(b, ',') - } - - elem := slice.Index(i) - if elem.Kind() == reflect.Interface { - elem = elem.Elem() - } - - if elem.Kind() == reflect.Slice { - b = append(b, '(') - b = appendIn(b, elem, quote) - b = append(b, ')') - } else { - b = appendValue(b, elem, quote) - } - } - return b -} diff --git a/vendor/github.com/go-pg/pg/types/interface.go b/vendor/github.com/go-pg/pg/types/interface.go deleted file mode 100644 index b62ad7e..0000000 --- a/vendor/github.com/go-pg/pg/types/interface.go +++ /dev/null @@ -1,27 +0,0 @@ -package types - -type ValueAppender interface { - AppendValue(b []byte, quote int) []byte -} - -//------------------------------------------------------------------------------ - -// Q represents safe SQL query. -type Q string - -var _ ValueAppender = Q("") - -func (q Q) AppendValue(b []byte, quote int) []byte { - return append(b, q...) -} - -//------------------------------------------------------------------------------ - -// F represents a SQL field, e.g. table or column name. -type F string - -var _ ValueAppender = F("") - -func (f F) AppendValue(b []byte, quote int) []byte { - return AppendField(b, string(f), quote) -} diff --git a/vendor/github.com/go-pg/pg/types/scan.go b/vendor/github.com/go-pg/pg/types/scan.go deleted file mode 100644 index 602f848..0000000 --- a/vendor/github.com/go-pg/pg/types/scan.go +++ /dev/null @@ -1,83 +0,0 @@ -package types - -import ( - "database/sql" - "encoding/hex" - "errors" - "fmt" - "reflect" - "time" - - "github.com/go-pg/pg/internal" -) - -func Scan(v interface{}, b []byte) error { - switch v := v.(type) { - case *string: - *v = string(b) - return nil - case *[]byte: - if b == nil { - *v = nil - return nil - } - var err error - *v, err = ScanBytes(b) - return err - case *int: - if b == nil { - *v = 0 - return nil - } - var err error - *v, err = internal.Atoi(b) - return err - case *int64: - if b == nil { - *v = 0 - return nil - } - var err error - *v, err = internal.ParseInt(b, 10, 64) - return err - case *time.Time: - if b == nil { - *v = time.Time{} - return nil - } - var err error - *v, err = ParseTime(b) - return err - } - - vv := reflect.ValueOf(v) - if !vv.IsValid() { - return errors.New("pg: Scan(nil)") - } - if vv.Kind() != reflect.Ptr { - return fmt.Errorf("pg: Scan(nonsettable %T)", v) - } - vv = vv.Elem() - if !vv.IsValid() { - return fmt.Errorf("pg: Scan(nonsettable %T)", v) - } - return ScanValue(vv, b) -} - -func scanSQLScanner(scanner sql.Scanner, b []byte) error { - if b == nil { - return scanner.Scan(nil) - } - return scanner.Scan(b) -} - -func ScanBytes(b []byte) ([]byte, error) { - if len(b) < 2 { - return nil, fmt.Errorf("pg: can't parse bytes: %q", b) - } - - b = b[2:] // Trim off "\\x". - tmp := make([]byte, hex.DecodedLen(len(b))) - _, err := hex.Decode(tmp, b) - return tmp, err -} diff --git a/vendor/github.com/go-pg/pg/types/scan_array.go b/vendor/github.com/go-pg/pg/types/scan_array.go deleted file mode 100644 index 2f000a4..0000000 --- a/vendor/github.com/go-pg/pg/types/scan_array.go +++ /dev/null @@ -1,197 +0,0 @@ -package types - -import ( - "fmt" - "reflect" - - "github.com/go-pg/pg/internal" - "github.com/go-pg/pg/internal/parser" -) - -func ArrayScanner(typ reflect.Type) ScannerFunc { - elemType := typ.Elem() - - switch elemType { - case stringType: - return scanSliceStringValue - case intType: - return scanSliceIntValue - case int64Type: - return scanSliceInt64Value - case float64Type: - return scanSliceFloat64Value - } - - scanElem := scanner(elemType, true) - return func(v reflect.Value, b []byte) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - if b == nil { - if !v.IsNil() { - v.Set(reflect.Zero(v.Type())) - } - return nil - } - - if v.IsNil() { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } else if v.Len() > 0 { - v.Set(v.Slice(0, 0)) - } - - p := parser.NewArrayParser(b) - nextValue := internal.MakeSliceNextElemFunc(v) - for p.Valid() { - elem, err := p.NextElem() - if err != nil { - return err - } - - elemValue := nextValue() - err = scanElem(elemValue, elem) - if err != nil { - return err - } - } - - return nil - } -} - -func scanSliceStringValue(v reflect.Value, b []byte) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - strings, err := decodeSliceString(b) - if err != nil { - return err - } - v.Set(reflect.ValueOf(strings)) - return nil -} - -func decodeSliceString(b []byte) ([]string, error) { - if b == nil { - return nil, nil - } - p := parser.NewArrayParser(b) - s := make([]string, 0) - for p.Valid() { - elem, err := p.NextElem() - if err != nil { - return nil, err - } - s = append(s, string(elem)) - } - return s, nil -} - -func scanSliceIntValue(v reflect.Value, b []byte) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - ints, err := decodeSliceInt(b) - if err != nil { - return err - } - v.Set(reflect.ValueOf(ints)) - return nil -} - -func decodeSliceInt(b []byte) ([]int, error) { - if b == nil { - return nil, nil - } - p := parser.NewArrayParser(b) - slice := make([]int, 0) - for p.Valid() { - elem, err := p.NextElem() - if err != nil { - return nil, err - } - if elem == nil { - slice = append(slice, 0) - continue - } - n, err := internal.Atoi(elem) - if err != nil { - return nil, err - } - slice = append(slice, n) - } - return slice, nil -} - -func scanSliceInt64Value(v reflect.Value, b []byte) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - ints, err := decodeSliceInt64(b) - if err != nil { - return err - } - v.Set(reflect.ValueOf(ints)) - return nil -} - -func decodeSliceInt64(b []byte) ([]int64, error) { - if b == nil { - return nil, nil - } - p := parser.NewArrayParser(b) - slice := make([]int64, 0) - for p.Valid() { - elem, err := p.NextElem() - if err != nil { - return nil, err - } - if elem == nil { - slice = append(slice, 0) - continue - } - n, err := internal.ParseInt(elem, 10, 64) - if err != nil { - return nil, err - } - slice = append(slice, n) - } - return slice, nil -} - -func scanSliceFloat64Value(v reflect.Value, b []byte) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - floats, err := decodeSliceFloat64(b) - if err != nil { - return err - } - v.Set(reflect.ValueOf(floats)) - return nil -} - -func decodeSliceFloat64(b []byte) ([]float64, error) { - if b == nil { - return nil, nil - } - p := parser.NewArrayParser(b) - slice := make([]float64, 0) - for p.Valid() { - elem, err := p.NextElem() - if err != nil { - return nil, err - } - if elem == nil { - slice = append(slice, 0) - continue - } - n, err := internal.ParseFloat(elem, 64) - if err != nil { - return nil, err - } - slice = append(slice, n) - } - return slice, nil -} diff --git a/vendor/github.com/go-pg/pg/types/scan_hstore.go b/vendor/github.com/go-pg/pg/types/scan_hstore.go deleted file mode 100644 index ee5b628..0000000 --- a/vendor/github.com/go-pg/pg/types/scan_hstore.go +++ /dev/null @@ -1,58 +0,0 @@ -package types - -import ( - "fmt" - "reflect" - - "github.com/go-pg/pg/internal/parser" -) - -func HstoreScanner(typ reflect.Type) ScannerFunc { - if typ.Key() == stringType && typ.Elem() == stringType { - return scanMapStringStringValue - } - return func(v reflect.Value, b []byte) error { - return fmt.Errorf("pg.Hstore(unsupported %s)", v.Type()) - } -} - -func scanMapStringString(b []byte) (map[string]string, error) { - if b == nil { - return nil, nil - } - - p := parser.NewHstoreParser(b) - m := make(map[string]string) - for p.Valid() { - key, err := p.NextKey() - if err != nil { - return nil, err - } - if key == nil { - return nil, fmt.Errorf("pg: unexpected NULL: %q", b) - } - - value, err := p.NextValue() - if err != nil { - return nil, err - } - if value == nil { - return nil, fmt.Errorf("pg: unexpected NULL: %q", b) - } - - m[string(key)] = string(value) - } - return m, nil -} - -func scanMapStringStringValue(v reflect.Value, b []byte) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - m, err := scanMapStringString(b) - if err != nil { - return err - } - v.Set(reflect.ValueOf(m)) - return nil -} diff --git a/vendor/github.com/go-pg/pg/types/scan_value.go b/vendor/github.com/go-pg/pg/types/scan_value.go deleted file mode 100644 index 0ff35f4..0000000 --- a/vendor/github.com/go-pg/pg/types/scan_value.go +++ /dev/null @@ -1,304 +0,0 @@ -package types - -import ( - "database/sql" - "encoding/json" - "errors" - "fmt" - "net" - "reflect" - "time" - - "github.com/go-pg/pg/internal" -) - -var scannerType = reflect.TypeOf((*sql.Scanner)(nil)).Elem() -var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() -var ipType = reflect.TypeOf((*net.IP)(nil)).Elem() -var ipNetType = reflect.TypeOf((*net.IPNet)(nil)).Elem() - -type ScannerFunc func(reflect.Value, []byte) error - -var valueScanners []ScannerFunc - -func init() { - valueScanners = []ScannerFunc{ - reflect.Bool: scanBoolValue, - reflect.Int: scanIntValue, - reflect.Int8: scanIntValue, - reflect.Int16: scanIntValue, - reflect.Int32: scanIntValue, - reflect.Int64: scanIntValue, - reflect.Uint: scanUintValue, - reflect.Uint8: scanUintValue, - reflect.Uint16: scanUintValue, - reflect.Uint32: scanUintValue, - reflect.Uint64: scanUintValue, - reflect.Uintptr: nil, - reflect.Float32: scanFloatValue, - reflect.Float64: scanFloatValue, - reflect.Complex64: nil, - reflect.Complex128: nil, - reflect.Array: nil, - reflect.Chan: nil, - reflect.Func: nil, - reflect.Interface: scanIfaceValue, - reflect.Map: scanJSONValue, - reflect.Ptr: nil, - reflect.Slice: scanJSONValue, - reflect.String: scanStringValue, - reflect.Struct: scanJSONValue, - reflect.UnsafePointer: nil, - } -} - -func Scanner(typ reflect.Type) ScannerFunc { - return scanner(typ, false) -} - -func scanner(typ reflect.Type, pgArray bool) ScannerFunc { - switch typ { - case timeType: - return scanTimeValue - case ipType: - return scanIPValue - case ipNetType: - return scanIPNetValue - } - - if typ.Implements(scannerType) { - return scanSQLScannerValue - } - if reflect.PtrTo(typ).Implements(scannerType) { - return scanSQLScannerAddrValue - } - - kind := typ.Kind() - switch kind { - case reflect.Ptr: - return ptrScannerFunc(typ) - case reflect.Slice: - if typ.Elem().Kind() == reflect.Uint8 { - return scanBytesValue - } - if pgArray { - return ArrayScanner(typ) - } - } - return valueScanners[kind] -} - -func ptrScannerFunc(typ reflect.Type) ScannerFunc { - scanner := Scanner(typ.Elem()) - return func(v reflect.Value, b []byte) error { - if scanner == nil { - return fmt.Errorf("pg: Scan(unsupported %s)", v.Type()) - } - if b == nil { - if v.IsNil() { - return nil - } - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - v.Set(reflect.Zero(v.Type())) - return nil - } - if v.IsNil() { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - v.Set(reflect.New(v.Type().Elem())) - } - return scanner(v.Elem(), b) - } -} - -func scanIfaceValue(v reflect.Value, b []byte) error { - if v.IsNil() { - return scanJSONValue(v, b) - } - return ScanValue(v.Elem(), b) -} - -func ScanValue(v reflect.Value, b []byte) error { - if !v.IsValid() { - return errors.New("pg: Scan(nil)") - } - - scanner := Scanner(v.Type()) - if scanner != nil { - return scanner(v, b) - } - - if v.Kind() == reflect.Interface { - return errors.New("pg: Scan(nil)") - } - return fmt.Errorf("pg: Scan(unsupported %s)", v.Type()) -} - -func scanBoolValue(v reflect.Value, b []byte) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - if b == nil { - v.SetBool(false) - return nil - } - v.SetBool(len(b) == 1 && (b[0] == 't' || b[0] == '1')) - return nil -} - -func scanIntValue(v reflect.Value, b []byte) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - if b == nil { - v.SetInt(0) - return nil - } - n, err := internal.ParseInt(b, 10, 64) - if err != nil { - return err - } - v.SetInt(n) - return nil -} - -func scanUintValue(v reflect.Value, b []byte) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - if b == nil { - v.SetUint(0) - return nil - } - n, err := internal.ParseUint(b, 10, 64) - if err != nil { - return err - } - v.SetUint(n) - return nil -} - -func scanFloatValue(v reflect.Value, b []byte) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - if b == nil { - v.SetFloat(0) - return nil - } - n, err := internal.ParseFloat(b, 64) - if err != nil { - return err - } - v.SetFloat(n) - return nil -} - -func scanStringValue(v reflect.Value, b []byte) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - v.SetString(string(b)) - return nil -} - -func scanJSONValue(v reflect.Value, b []byte) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - if b == nil { - v.Set(reflect.New(v.Type()).Elem()) - return nil - } - return json.Unmarshal(b, v.Addr().Interface()) -} - -var zeroTimeValue = reflect.ValueOf(time.Time{}) - -func scanTimeValue(v reflect.Value, b []byte) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - if b == nil { - v.Set(zeroTimeValue) - return nil - } - tm, err := ParseTime(b) - if err != nil { - return err - } - v.Set(reflect.ValueOf(tm)) - return nil -} - -func scanIPValue(v reflect.Value, b []byte) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - if b == nil { - return nil - } - ip := net.ParseIP(internal.BytesToString(b)) - if ip == nil { - return fmt.Errorf("pg: invalid ip=%q", b) - } - v.Set(reflect.ValueOf(ip)) - return nil -} - -var zeroIPNetValue = reflect.ValueOf(net.IPNet{}) - -func scanIPNetValue(v reflect.Value, b []byte) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - if b == nil { - v.Set(zeroIPNetValue) - return nil - } - _, ipnet, err := net.ParseCIDR(internal.BytesToString(b)) - if err != nil { - return err - } - v.Set(reflect.ValueOf(*ipnet)) - return nil -} - -func scanBytesValue(v reflect.Value, b []byte) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - if b == nil { - v.SetBytes(nil) - return nil - } - bs, err := ScanBytes(b) - if err != nil { - return err - } - v.SetBytes(bs) - return nil -} - -func scanSQLScannerValue(v reflect.Value, b []byte) error { - if b == nil { - if v.IsNil() { - return nil - } - return scanSQLScanner(v.Interface().(sql.Scanner), nil) - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - return scanSQLScanner(v.Interface().(sql.Scanner), b) -} - -func scanSQLScannerAddrValue(v reflect.Value, b []byte) error { - if !v.CanAddr() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - return scanSQLScanner(v.Addr().Interface().(sql.Scanner), b) -} diff --git a/vendor/github.com/go-pg/pg/types/time.go b/vendor/github.com/go-pg/pg/types/time.go deleted file mode 100644 index 30c3ed2..0000000 --- a/vendor/github.com/go-pg/pg/types/time.go +++ /dev/null @@ -1,56 +0,0 @@ -package types - -import ( - "time" - - "github.com/go-pg/pg/internal" -) - -const ( - dateFormat = "2006-01-02" - timeFormat = "15:04:05.999999999" - timestampFormat = "2006-01-02 15:04:05.999999999" - timestamptzFormat = "2006-01-02 15:04:05.999999999-07:00:00" - timestamptzFormat2 = "2006-01-02 15:04:05.999999999-07:00" - timestamptzFormat3 = "2006-01-02 15:04:05.999999999-07" -) - -func ParseTime(b []byte) (time.Time, error) { - s := internal.BytesToString(b) - return ParseTimeString(s) -} - -func ParseTimeString(s string) (time.Time, error) { - switch l := len(s); { - case l <= len(timeFormat): - if s[2] == ':' { - return time.ParseInLocation(timeFormat, s, time.UTC) - } - return time.ParseInLocation(dateFormat, s, time.UTC) - default: - if s[10] == 'T' { - return time.Parse(time.RFC3339Nano, s) - } - if c := s[l-9]; c == '+' || c == '-' { - return time.Parse(timestamptzFormat, s) - } - if c := s[l-6]; c == '+' || c == '-' { - return time.Parse(timestamptzFormat2, s) - } - if c := s[l-3]; c == '+' || c == '-' { - return time.Parse(timestamptzFormat3, s) - } - return time.ParseInLocation(timestampFormat, s, time.UTC) - } -} - -func AppendTime(b []byte, tm time.Time, quote int) []byte { - if quote == 1 { - b = append(b, '\'') - } - b = tm.UTC().AppendFormat(b, timestamptzFormat) - if quote == 1 { - b = append(b, '\'') - } - return b -} diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/github.com/golang/glog/LICENSE deleted file mode 100644 index 37ec93a..0000000 --- a/vendor/github.com/golang/glog/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/golang/glog/README b/vendor/github.com/golang/glog/README deleted file mode 100644 index 387b4eb..0000000 --- a/vendor/github.com/golang/glog/README +++ /dev/null @@ -1,44 +0,0 @@ -glog -==== - -Leveled execution logs for Go. - -This is an efficient pure Go implementation of leveled logs in the -manner of the open source C++ package - https://github.com/google/glog - -By binding methods to booleans it is possible to use the log package -without paying the expense of evaluating the arguments to the log. -Through the -vmodule flag, the package also provides fine-grained -control over logging at the file level. - -The comment from glog.go introduces the ideas: - - Package glog implements logging analogous to the Google-internal - C++ INFO/ERROR/V setup. It provides functions Info, Warning, - Error, Fatal, plus formatting variants such as Infof. It - also provides V-style logging controlled by the -v and - -vmodule=file=2 flags. - - Basic examples: - - glog.Info("Prepare to repel boarders") - - glog.Fatalf("Initialization failed: %s", err) - - See the documentation for the V function for an explanation - of these examples: - - if glog.V(2) { - glog.Info("Starting transaction...") - } - - glog.V(2).Infoln("Processed", nItems, "elements") - - -The repository contains an open source version of the log package -used inside Google. The master copy of the source lives inside -Google, not here. The code in this repo is for export only and is not itself -under development. Feature requests will be ignored. - -Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go deleted file mode 100644 index 54bd7af..0000000 --- a/vendor/github.com/golang/glog/glog.go +++ /dev/null @@ -1,1180 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. -// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as -// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. -// -// Basic examples: -// -// glog.Info("Prepare to repel boarders") -// -// glog.Fatalf("Initialization failed: %s", err) -// -// See the documentation for the V function for an explanation of these examples: -// -// if glog.V(2) { -// glog.Info("Starting transaction...") -// } -// -// glog.V(2).Infoln("Processed", nItems, "elements") -// -// Log output is buffered and written periodically using Flush. Programs -// should call Flush before exiting to guarantee all log output is written. -// -// By default, all log statements write to files in a temporary directory. -// This package provides several flags that modify this behavior. -// As a result, flag.Parse must be called before any logging is done. -// -// -logtostderr=false -// Logs are written to standard error instead of to files. -// -alsologtostderr=false -// Logs are written to standard error as well as to files. -// -stderrthreshold=ERROR -// Log events at or above this severity are logged to standard -// error as well as to files. -// -log_dir="" -// Log files will be written to this directory instead of the -// default temporary directory. -// -// Other flags provide aids to debugging. -// -// -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as -// -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) -// -v=0 -// Enable V-leveled logging at the specified level. -// -vmodule="" -// The syntax of the argument is a comma-separated list of pattern=N, -// where pattern is a literal file name (minus the ".go" suffix) or -// "glob" pattern and N is a V level. For instance, -// -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". -// -package glog - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "io" - stdLog "log" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// severity identifies the sort of log: info, warning etc. It also implements -// the flag.Value interface. The -stderrthreshold flag is of type severity and -// should be modified only through the flag.Value interface. The values match -// the corresponding constants in C++. -type severity int32 // sync/atomic int32 - -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 -) - -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) -} - -// set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) -} - -// String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) -} - -// Get is part of the flag.Value interface. -func (s *severity) Get() interface{} { - return *s -} - -// Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity - // Is it a known name? - if v, ok := severityByName(value); ok { - threshold = v - } else { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - threshold = severity(v) - } - logging.stderrThreshold.set(threshold) - return nil -} - -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} - -// OutputStats tracks the number of output lines and bytes written. -type OutputStats struct { - lines int64 - bytes int64 -} - -// Lines returns the number of lines written. -func (s *OutputStats) Lines() int64 { - return atomic.LoadInt64(&s.lines) -} - -// Bytes returns the number of bytes written. -func (s *OutputStats) Bytes() int64 { - return atomic.LoadInt64(&s.bytes) -} - -// Stats tracks the number of lines of output and number of bytes -// per severity level. Values must be read with atomic.LoadInt64. -var Stats struct { - Info, Warning, Error OutputStats -} - -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, -} - -// Level is exported because it appears in the arguments to V and is -// the type of the v flag, which can be set programmatically. -// It's a distinct type because we want to discriminate it from logType. -// Variables of type level are only changed under logging.mu. -// The -v flag is read only with atomic ops, so the state of the logging -// module is consistent. - -// Level is treated as a sync/atomic int32. - -// Level specifies a level of verbosity for V logs. *Level implements -// flag.Value; the -v flag is of type Level and should be modified -// only through the flag.Value interface. -type Level int32 - -// get returns the value of the Level. -func (l *Level) get() Level { - return Level(atomic.LoadInt32((*int32)(l))) -} - -// set sets the value of the Level. -func (l *Level) set(val Level) { - atomic.StoreInt32((*int32)(l), int32(val)) -} - -// String is part of the flag.Value interface. -func (l *Level) String() string { - return strconv.FormatInt(int64(*l), 10) -} - -// Get is part of the flag.Value interface. -func (l *Level) Get() interface{} { - return *l -} - -// Set is part of the flag.Value interface. -func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(Level(v), logging.vmodule.filter, false) - return nil -} - -// moduleSpec represents the setting of the -vmodule flag. -type moduleSpec struct { - filter []modulePat -} - -// modulePat contains a filter for the -vmodule flag. -// It holds a verbosity level and a file pattern to match. -type modulePat struct { - pattern string - literal bool // The pattern is a literal string - level Level -} - -// match reports whether the file matches the pattern. It uses a string -// comparison if the pattern contains no metacharacters. -func (m *modulePat) match(file string) bool { - if m.literal { - return file == m.pattern - } - match, _ := filepath.Match(m.pattern, file) - return match -} - -func (m *moduleSpec) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - var b bytes.Buffer - for i, f := range m.filter { - if i > 0 { - b.WriteRune(',') - } - fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) - } - return b.String() -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported. -func (m *moduleSpec) Get() interface{} { - return nil -} - -var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") - -// Syntax: -vmodule=recordio=2,file=1,gfs*=3 -func (m *moduleSpec) Set(value string) error { - var filter []modulePat - for _, pat := range strings.Split(value, ",") { - if len(pat) == 0 { - // Empty strings such as from a trailing comma can be ignored. - continue - } - patLev := strings.Split(pat, "=") - if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax - } - pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) - if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") - } - if v < 0 { - return errors.New("negative value for vmodule level") - } - if v == 0 { - continue // Ignore. It's harmless but no point in paying the overhead. - } - // TODO: check syntax of filter? - filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil -} - -// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters -// that require filepath.Match to be called to match the pattern. -func isLiteral(pattern string) bool { - return !strings.ContainsAny(pattern, `\*?[]`) -} - -// traceLocation represents the setting of the -log_backtrace_at flag. -type traceLocation struct { - file string - line int -} - -// isSet reports whether the trace location has been specified. -// logging.mu is held. -func (t *traceLocation) isSet() bool { - return t.line > 0 -} - -// match reports whether the specified file and line matches the trace location. -// The argument file name is the full path, not the basename specified in the flag. -// logging.mu is held. -func (t *traceLocation) match(file string, line int) bool { - if t.line != line { - return false - } - if i := strings.LastIndex(file, "/"); i >= 0 { - file = file[i+1:] - } - return t.file == file -} - -func (t *traceLocation) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - return fmt.Sprintf("%s:%d", t.file, t.line) -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported -func (t *traceLocation) Get() interface{} { - return nil -} - -var errTraceSyntax = errors.New("syntax error: expect file.go:234") - -// Syntax: -log_backtrace_at=gopherflakes.go:234 -// Note that unlike vmodule the file extension is included here. -func (t *traceLocation) Set(value string) error { - if value == "" { - // Unset. - t.line = 0 - t.file = "" - } - fields := strings.Split(value, ":") - if len(fields) != 2 { - return errTraceSyntax - } - file, line := fields[0], fields[1] - if !strings.Contains(file, ".") { - return errTraceSyntax - } - v, err := strconv.Atoi(line) - if err != nil { - return errTraceSyntax - } - if v <= 0 { - return errors.New("negative or zero value for level") - } - logging.mu.Lock() - defer logging.mu.Unlock() - t.line = v - t.file = file - return nil -} - -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - -func init() { - flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") - flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") - flag.Var(&logging.verbosity, "v", "log level for V logs") - flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") - - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - - logging.setVState(0, nil, false) - go logging.flushDaemon() -} - -// Flush flushes all pending log I/O. -func Flush() { - logging.lockAndFlushAll() -} - -// loggingT collects all the global state of the logging setup. -type loggingT struct { - // Boolean flags. Not handled atomically because the flag.Value interface - // does not let us avoid the =true, and that shorthand is necessary for - // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. - - // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. - - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - freeListMu sync.Mutex - - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex - // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level - // filterLength stores the length of the vmodule filter chain. If greater - // than zero, it means vmodule is enabled. It may be read safely - // using sync.LoadInt32, but is only modified under mu. - filterLength int32 - // traceLocation is the state of the -log_backtrace_at flag. - traceLocation traceLocation - // These flags are modified only under lock, although verbosity may be fetched - // safely using atomic.LoadInt32. - vmodule moduleSpec // The state of the -vmodule flag. - verbosity Level // V logging level, the value of the -v flag/ -} - -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - -var logging loggingT - -// setVState sets a consistent state for V logging. -// l.mu is held. -func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { - // Turn verbosity off so V will not fire while we are in transition. - logging.verbosity.set(0) - // Ditto for filter length. - atomic.StoreInt32(&logging.filterLength, 0) - - // Set the new filters and wipe the pc->Level map if the filter has changed. - if setFilter { - logging.vmodule.filter = filter - logging.vmap = make(map[uintptr]Level) - } - - // Things are consistent now, so enable filtering and verbosity. - // They are enabled in order opposite to that in V. - atomic.StoreInt32(&logging.filterLength, int32(len(filter))) - logging.verbosity.set(verbosity) -} - -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - -var timeNow = time.Now // Stubbed out for testing. - -/* -header formats a log header as defined by the C++ implementation. -It returns a buffer containing the formatted header and the user's file and line number. -The depth specifies how many stack frames above lives the source line to be identified in the log message. - -Log lines have this form: - Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... -where the fields are defined as follows: - L A single character, representing the log level (eg 'I' for INFO) - mm The month (zero padded; ie May is '05') - dd The day (zero padded) - hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds - threadid The space-padded thread ID as returned by GetTID() - file The file name - line The line number - msg The user-supplied message -*/ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { - _, file, line, ok := runtime.Caller(3 + depth) - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - return l.formatHeader(s, file, line), file, line -} - -// formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits - } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) - return buf -} - -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } - } - return copy(buf.tmp[i:], buf.tmp[j:]) -} - -func (l *loggingT) println(s severity, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintln(buf, args...) - l.output(s, buf, file, line, false) -} - -func (l *loggingT) print(s severity, args ...interface{}) { - l.printDepth(s, 1, args...) -} - -func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { - buf, file, line := l.header(s, depth) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -func (l *loggingT) printf(s severity, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintf(buf, format, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -// printWithFileLine behaves like print but uses the provided file and line number. If -// alsoLogToStderr is true, the log message always appears on standard error; it -// will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, alsoToStderr) -} - -// output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { - l.mu.Lock() - if l.traceLocation.isSet() { - if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) - } - } - data := buf.Bytes() - if !flag.Parsed() { - os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) - os.Stderr.Write(data) - } else if l.toStderr { - os.Stderr.Write(data) - } else { - if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { - os.Stderr.Write(data) - } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) - } - } - if s == fatalLog { - // If we got here via Exit rather than Fatal, print no stacks. - if atomic.LoadUint32(&fatalNoStacks) > 0 { - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) - } - // Dump all goroutine stacks before exiting. - // First, make sure we see the trace for the current goroutine on standard error. - // If -logtostderr has been specified, the loop below will do that anyway - // as the first stack in the full dump. - if !l.toStderr { - os.Stderr.Write(stacks(false)) - } - // Write the stack trace for all goroutines to the files. - trace := stacks(true) - logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { - if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) - } - } - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. - } - l.putBuffer(buf) - l.mu.Unlock() - if stats := severityStats[s]; stats != nil { - atomic.AddInt64(&stats.lines, 1) - atomic.AddInt64(&stats.bytes, int64(len(data))) - } -} - -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) - } -} - -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - -// logExitFunc provides a simple mechanism to override the default behavior -// of exiting on error. Used in testing and to guarantee we reach a required exit -// for fatal logs. Instead, exit could be a function rather than a method but that -// would make its use clumsier. -var logExitFunc func(error) - -// exit is called if there is trouble creating or writing log files. -// It flushes the logs and exits the program; there's no point in hanging around. -// l.mu is held. -func (l *loggingT) exit(err error) { - fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) - // If logExitFunc is set, we do that instead of exiting. - if logExitFunc != nil { - logExitFunc(err) - return - } - l.flushAll() - os.Exit(2) -} - -// syncBuffer joins a bufio.Writer to its underlying file, providing access to the -// file's Sync method and providing a wrapper for the Write method that provides log -// file rotation. There are conflicting methods, so the file cannot be embedded. -// l.mu is held for all its methods. -type syncBuffer struct { - logger *loggingT - *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file -} - -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - -func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { - sb.logger.exit(err) - } - } - n, err = sb.Writer.Write(p) - sb.nbytes += uint64(n) - if err != nil { - sb.logger.exit(err) - } - return -} - -// rotateFile closes the syncBuffer's file and starts a new one. -func (sb *syncBuffer) rotateFile(now time.Time) error { - if sb.file != nil { - sb.Flush() - sb.file.Close() - } - var err error - sb.file, _, err = create(severityName[sb.sev], now) - sb.nbytes = 0 - if err != nil { - return err - } - - sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) - - // Write header. - var buf bytes.Buffer - fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) - fmt.Fprintf(&buf, "Running on machine: %s\n", host) - fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) - fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") - n, err := sb.file.Write(buf.Bytes()) - sb.nbytes += uint64(n) - return err -} - -// bufferSize sizes the buffer associated with each log file. It's large -// so that log records can accumulate without the logging thread blocking -// on disk I/O. The flushDaemon will block instead. -const bufferSize = 256 * 1024 - -// createFiles creates all the log files for severity from sev down to infoLog. -// l.mu is held. -func (l *loggingT) createFiles(sev severity) error { - now := time.Now() - // Files are created in decreasing severity order, so as soon as we find one - // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { - sb := &syncBuffer{ - logger: l, - sev: s, - } - if err := sb.rotateFile(now); err != nil { - return err - } - l.file[s] = sb - } - return nil -} - -const flushInterval = 30 * time.Second - -// flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for _ = range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() - } -} - -// lockAndFlushAll is like flushAll but locks l.mu first. -func (l *loggingT) lockAndFlushAll() { - l.mu.Lock() - l.flushAll() - l.mu.Unlock() -} - -// flushAll flushes all the logs and attempts to "sync" their data to disk. -// l.mu is held. -func (l *loggingT) flushAll() { - // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { - file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error - } - } -} - -// CopyStandardLogTo arranges for messages written to the Go "log" package's -// default logs to also appear in the Google logs for the named and lower -// severities. Subsequent changes to the standard log's default output location -// or format may break this behavior. -// -// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not -// recognized, CopyStandardLogTo panics. -func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) - } - // Set a log format that captures the user's file and line: - // d.go:23: message - stdLog.SetFlags(stdLog.Lshortfile) - stdLog.SetOutput(logBridge(sev)) -} - -// logBridge provides the Write method that enables CopyStandardLogTo to connect -// Go's standard logs to the logs provided by this package. -type logBridge severity - -// Write parses the standard logging line and passes its components to the -// logger for severity(lb). -func (lb logBridge) Write(b []byte) (n int, err error) { - var ( - file = "???" - line = 1 - text string - ) - // Split "d.go:23: message" into "d.go", "23", and "message". - if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { - text = fmt.Sprintf("bad log format: %s", b) - } else { - file = string(parts[0]) - text = string(parts[2][1:]) // skip leading space - line, err = strconv.Atoi(string(parts[1])) - if err != nil { - text = fmt.Sprintf("bad line number: %s", b) - line = 1 - } - } - // printWithFileLine with alsoToStderr=true, so standard log messages - // always appear on standard error. - logging.printWithFileLine(severity(lb), file, line, true, text) - return len(b), nil -} - -// setV computes and remembers the V level for a given PC -// when vmodule is enabled. -// File pattern matching takes the basename of the file, stripped -// of its .go suffix, and uses filepath.Match, which is a little more -// general than the *? matching used in C++. -// l.mu is held. -func (l *loggingT) setV(pc uintptr) Level { - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] - } - if slash := strings.LastIndex(file, "/"); slash >= 0 { - file = file[slash+1:] - } - for _, filter := range l.vmodule.filter { - if filter.match(file) { - l.vmap[pc] = filter.level - return filter.level - } - } - l.vmap[pc] = 0 - return 0 -} - -// Verbose is a boolean type that implements Infof (like Printf) etc. -// See the documentation of V for more information. -type Verbose bool - -// V reports whether verbosity at the call site is at least the requested level. -// The returned value is a boolean of type Verbose, which implements Info, Infoln -// and Infof. These methods will write to the Info log if called. -// Thus, one may write either -// if glog.V(2) { glog.Info("log this") } -// or -// glog.V(2).Info("log this") -// The second form is shorter but the first is cheaper if logging is off because it does -// not evaluate its arguments. -// -// Whether an individual call to V generates a log record depends on the setting of -// the -v and --vmodule flags; both are off by default. If the level in the call to -// V is at least the value of -v, or of -vmodule for the source file containing the -// call, the V call will log. -func V(level Level) Verbose { - // This function tries hard to be cheap unless there's work to do. - // The fast path is two atomic loads and compares. - - // Here is a cheap but safe test to see if V logging is enabled globally. - if logging.verbosity.get() >= level { - return Verbose(true) - } - - // It's off globally but it vmodule may still be set. - // Here is another cheap but safe test to see if vmodule is enabled. - if atomic.LoadInt32(&logging.filterLength) > 0 { - // Now we need a proper lock to use the logging structure. The pcs field - // is shared so we must lock before accessing it. This is fairly expensive, - // but if V logging is enabled we're slow anyway. - logging.mu.Lock() - defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { - return Verbose(false) - } - v, ok := logging.vmap[logging.pcs[0]] - if !ok { - v = logging.setV(logging.pcs[0]) - } - return Verbose(v >= level) - } - return Verbose(false) -} - -// Info is equivalent to the global Info function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Info(args ...interface{}) { - if v { - logging.print(infoLog, args...) - } -} - -// Infoln is equivalent to the global Infoln function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infoln(args ...interface{}) { - if v { - logging.println(infoLog, args...) - } -} - -// Infof is equivalent to the global Infof function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infof(format string, args ...interface{}) { - if v { - logging.printf(infoLog, format, args...) - } -} - -// Info logs to the INFO log. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Info(args ...interface{}) { - logging.print(infoLog, args...) -} - -// InfoDepth acts as Info but uses depth to determine which call frame to log. -// InfoDepth(0, "msg") is the same as Info("msg"). -func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, depth, args...) -} - -// Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Infoln(args ...interface{}) { - logging.println(infoLog, args...) -} - -// Infof logs to the INFO log. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Infof(format string, args ...interface{}) { - logging.printf(infoLog, format, args...) -} - -// Warning logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Warning(args ...interface{}) { - logging.print(warningLog, args...) -} - -// WarningDepth acts as Warning but uses depth to determine which call frame to log. -// WarningDepth(0, "msg") is the same as Warning("msg"). -func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, depth, args...) -} - -// Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Warningln(args ...interface{}) { - logging.println(warningLog, args...) -} - -// Warningf logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, format, args...) -} - -// Error logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Error(args ...interface{}) { - logging.print(errorLog, args...) -} - -// ErrorDepth acts as Error but uses depth to determine which call frame to log. -// ErrorDepth(0, "msg") is the same as Error("msg"). -func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, depth, args...) -} - -// Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Errorln(args ...interface{}) { - logging.println(errorLog, args...) -} - -// Errorf logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, format, args...) -} - -// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Fatal(args ...interface{}) { - logging.print(fatalLog, args...) -} - -// FatalDepth acts as Fatal but uses depth to determine which call frame to log. -// FatalDepth(0, "msg") is the same as Fatal("msg"). -func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, depth, args...) -} - -// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Fatalln(args ...interface{}) { - logging.println(fatalLog, args...) -} - -// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, format, args...) -} - -// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. -// It allows Exit and relatives to use the Fatal logs. -var fatalNoStacks uint32 - -// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Exit(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, args...) -} - -// ExitDepth acts as Exit but uses depth to determine which call frame to log. -// ExitDepth(0, "msg") is the same as Exit("msg"). -func ExitDepth(depth int, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, depth, args...) -} - -// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -func Exitln(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, args...) -} - -// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Exitf(format string, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, format, args...) -} diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go deleted file mode 100644 index 65075d2..0000000 --- a/vendor/github.com/golang/glog/glog_file.go +++ /dev/null @@ -1,124 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// File I/O for logs. - -package glog - -import ( - "errors" - "flag" - "fmt" - "os" - "os/user" - "path/filepath" - "strings" - "sync" - "time" -) - -// MaxSize is the maximum size of a log file in bytes. -var MaxSize uint64 = 1024 * 1024 * 1800 - -// logDirs lists the candidate directories for new log files. -var logDirs []string - -// If non-empty, overrides the choice of directory in which to write logs. -// See createLogDirs for the full list of possible destinations. -var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") - -func createLogDirs() { - if *logDir != "" { - logDirs = append(logDirs, *logDir) - } - logDirs = append(logDirs, os.TempDir()) -} - -var ( - pid = os.Getpid() - program = filepath.Base(os.Args[0]) - host = "unknownhost" - userName = "unknownuser" -) - -func init() { - h, err := os.Hostname() - if err == nil { - host = shortHostname(h) - } - - current, err := user.Current() - if err == nil { - userName = current.Username - } - - // Sanitize userName since it may contain filepath separators on Windows. - userName = strings.Replace(userName, `\`, "_", -1) -} - -// shortHostname returns its argument, truncating at the first period. -// For instance, given "www.google.com" it returns "www". -func shortHostname(hostname string) string { - if i := strings.Index(hostname, "."); i >= 0 { - return hostname[:i] - } - return hostname -} - -// logName returns a new log file name containing tag, with start time t, and -// the name for the symlink for tag. -func logName(tag string, t time.Time) (name, link string) { - name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", - program, - host, - userName, - tag, - t.Year(), - t.Month(), - t.Day(), - t.Hour(), - t.Minute(), - t.Second(), - pid) - return name, program + "." + tag -} - -var onceLogDirs sync.Once - -// create creates a new log file and returns the file and its filename, which -// contains tag ("INFO", "FATAL", etc.) and t. If the file is created -// successfully, create also attempts to update the symlink for that tag, ignoring -// errors. -func create(tag string, t time.Time) (f *os.File, filename string, err error) { - onceLogDirs.Do(createLogDirs) - if len(logDirs) == 0 { - return nil, "", errors.New("log: no log dirs") - } - name, link := logName(tag, t) - var lastErr error - for _, dir := range logDirs { - fname := filepath.Join(dir, name) - f, err := os.Create(fname) - if err == nil { - symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err - return f, fname, nil - } - lastErr = err - } - return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) -} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a6..0000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md deleted file mode 100644 index 04fdf09..0000000 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS deleted file mode 100644 index b4bb97f..0000000 --- a/vendor/github.com/google/uuid/CONTRIBUTORS +++ /dev/null @@ -1,9 +0,0 @@ -Paul Borman -bmatsuo -shawnps -theory -jboverfelt -dsymonds -cd1 -wallclockbuilder -dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE deleted file mode 100644 index 5dc6826..0000000 --- a/vendor/github.com/google/uuid/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md deleted file mode 100644 index 9d92c11..0000000 --- a/vendor/github.com/google/uuid/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) -and DCE 1.1: Authentication and Security Services. - -This package is based on the github.com/pborman/uuid package (previously named -code.google.com/p/go-uuid). It differs from these earlier packages in that -a UUID is a 16 byte array rather than a byte slice. One loss due to this -change is the ability to represent an invalid UUID (vs a NIL UUID). - -###### Install -`go get github.com/google/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) - -Full `go doc` style documentation for the package can be viewed online without -installing this package by using the GoDoc site here: -http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go deleted file mode 100644 index fa820b9..0000000 --- a/vendor/github.com/google/uuid/dce.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) (UUID, error) { - uuid, err := NewUUID() - if err == nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid, err -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCESecurity(Person, uint32(os.Getuid())) -func NewDCEPerson() (UUID, error) { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCESecurity(Group, uint32(os.Getgid())) -func NewDCEGroup() (UUID, error) { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID. Domains are only defined -// for Version 2 UUIDs. -func (uuid UUID) Domain() Domain { - return Domain(uuid[9]) -} - -// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 -// UUIDs. -func (uuid UUID) ID() uint32 { - return binary.BigEndian.Uint32(uuid[0:4]) -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go deleted file mode 100644 index 5b8a4b9..0000000 --- a/vendor/github.com/google/uuid/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uuid generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to -// maps or compared directly. -package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go deleted file mode 100644 index b174616..0000000 --- a/vendor/github.com/google/uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known namespace IDs and UUIDs -var ( - NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) - Nil UUID // empty UUID, all zeros -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space[:]) - h.Write(data) - s := h.Sum(nil) - var uuid UUID - copy(uuid[:], s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go deleted file mode 100644 index 7f9e0c6..0000000 --- a/vendor/github.com/google/uuid/marshal.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "fmt" - -// MarshalText implements encoding.TextMarshaler. -func (uuid UUID) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], uuid) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (uuid *UUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err == nil { - *uuid = id - } - return err -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (uuid UUID) MarshalBinary() ([]byte, error) { - return uuid[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (uuid *UUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(uuid[:], data) - return nil -} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go deleted file mode 100644 index 3e4e90d..0000000 --- a/vendor/github.com/google/uuid/node.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "sync" -) - -var ( - nodeMu sync.Mutex - ifname string // name of interface being used - nodeID [6]byte // hardware for version 1 UUIDs - zeroID [6]byte // nodeID with only 0's -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - defer nodeMu.Unlock() - nodeMu.Lock() - return ifname -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - defer nodeMu.Unlock() - nodeMu.Lock() - return setNodeInterface(name) -} - -func setNodeInterface(name string) bool { - iname, addr := getHardwareInterface(name) // null implementation for js - if iname != "" && addr != nil { - ifname = iname - copy(nodeID[:], addr) - return true - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - randomBits(nodeID[:]) - return true - } - return false -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - defer nodeMu.Unlock() - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nid := nodeID - return nid[:] -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - defer nodeMu.Unlock() - nodeMu.Lock() - copy(nodeID[:], id) - ifname = "user" - return true -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - var node [6]byte - copy(node[:], uuid[10:]) - return node[:] -} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go deleted file mode 100644 index 24b78ed..0000000 --- a/vendor/github.com/google/uuid/node_js.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build js - -package uuid - -// getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. -// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. -func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go deleted file mode 100644 index 0cbbcdd..0000000 --- a/vendor/github.com/google/uuid/node_net.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !js - -package uuid - -import "net" - -var interfaces []net.Interface // cached list of interfaces - -// getHardwareInterface returns the name and hardware address of interface name. -// If name is "" then the name and hardware address of one of the system's -// interfaces is returned. If no interfaces are found (name does not exist or -// there are no interfaces) then "", nil is returned. -// -// Only addresses of at least 6 bytes are returned. -func getHardwareInterface(name string) (string, []byte) { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil { - return "", nil - } - } - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - return ifs.Name, ifs.HardwareAddr - } - } - return "", nil -} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go deleted file mode 100644 index f326b54..0000000 --- a/vendor/github.com/google/uuid/sql.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case string: - // if an empty UUID comes from a table, we return a null UUID - if src == "" { - return nil - } - - // see Parse for required string format - u, err := Parse(src) - if err != nil { - return fmt.Errorf("Scan: %v", err) - } - - *uuid = u - - case []byte: - // if an empty UUID comes from a table, we return a null UUID - if len(src) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(src) != 16 { - return uuid.Scan(string(src)) - } - copy((*uuid)[:], src) - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go deleted file mode 100644 index e6ef06c..0000000 --- a/vendor/github.com/google/uuid/time.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "sync" - "time" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - timeMu sync.Mutex - lasttime uint64 // last time we returned - clockSeq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { - defer timeMu.Unlock() - timeMu.Lock() - return getTime() -} - -func getTime() (Time, uint16, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clockSeq == 0 { - setClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), clockSeq, nil -} - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence is used, a new -// random clock sequence is generated the first time a clock sequence is -// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) -func ClockSequence() int { - defer timeMu.Unlock() - timeMu.Lock() - return clockSequence() -} - -func clockSequence() int { - if clockSeq == 0 { - setClockSequence(-1) - } - return int(clockSeq & 0x3fff) -} - -// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - defer timeMu.Unlock() - timeMu.Lock() - setClockSequence(seq) -} - -func setClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - oldSeq := clockSeq - clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if oldSeq != clockSeq { - lasttime = 0 - } -} - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. -func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) -} - -// ClockSequence returns the clock sequence encoded in uuid. -// The clock sequence is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) ClockSequence() int { - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff -} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go deleted file mode 100644 index 5ea6c73..0000000 --- a/vendor/github.com/google/uuid/util.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts hex characters x1 and x2 into a byte. -func xtob(x1, x2 byte) (byte, bool) { - b1 := xvalues[x1] - b2 := xvalues[x2] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go deleted file mode 100644 index 7f3643f..0000000 --- a/vendor/github.com/google/uuid/uuid.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" -) - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID [16]byte - -// A Version represents a UUID's version. -type Version byte - -// A Variant represents a UUID's variant. -type Variant byte - -// Constants returned by Variant. -const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. -) - -var rander = rand.Reader // random function - -// Parse decodes s into a UUID or returns an error. Both the UUID form of -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. -func Parse(s string) (UUID, error) { - var uuid UUID - if len(s) != 36 { - if len(s) != 36+9 { - return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) - } - if strings.ToLower(s[:9]) != "urn:uuid:" { - return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) - } - s = s[9:] - } - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(s[x], s[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - var uuid UUID - if len(b) != 36 { - if len(b) != 36+9 { - return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) - } - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { - return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) - } - b = b[9:] - } - if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(b[x], b[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// FromBytes creates a new UUID from a byte slice. Returns an error if the slice -// does not have a length of 16. The bytes are copied from the slice. -func FromBytes(b []byte) (uuid UUID, err error) { - err = uuid.UnmarshalBinary(b) - return uuid, err -} - -// Must returns uuid if err is nil and panics otherwise. -func Must(uuid UUID, err error) UUID { - if err != nil { - panic(err) - } - return uuid -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst[:], uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. -func (uuid UUID) Variant() Variant { - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. -func (uuid UUID) Version() Version { - return Version(uuid[6] >> 4) -} - -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r -} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go deleted file mode 100644 index 199a1ac..0000000 --- a/vendor/github.com/google/uuid/version1.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil and an error. -// -// In most cases, New should be used. -func NewUUID() (UUID, error) { - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nodeMu.Unlock() - - var uuid UUID - now, seq, err := GetTime() - if err != nil { - return uuid, err - } - - timeLow := uint32(now & 0xffffffff) - timeMid := uint16((now >> 32) & 0xffff) - timeHi := uint16((now >> 48) & 0x0fff) - timeHi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], timeLow) - binary.BigEndian.PutUint16(uuid[4:], timeMid) - binary.BigEndian.PutUint16(uuid[6:], timeHi) - binary.BigEndian.PutUint16(uuid[8:], seq) - copy(uuid[10:], nodeID[:]) - - return uuid, nil -} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go deleted file mode 100644 index 84af91c..0000000 --- a/vendor/github.com/google/uuid/version4.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "io" - -// New creates a new random UUID or panics. New is equivalent to -// the expression -// -// uuid.Must(uuid.NewRandom()) -func New() UUID { - return Must(NewRandom()) -} - -// NewRandom returns a Random (Version 4) UUID. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() (UUID, error) { - var uuid UUID - _, err := io.ReadFull(rander, uuid[:]) - if err != nil { - return Nil, err - } - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} diff --git a/vendor/github.com/hashicorp/consul/LICENSE b/vendor/github.com/hashicorp/consul/LICENSE deleted file mode 100644 index c33dcc7..0000000 --- a/vendor/github.com/hashicorp/consul/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/consul/NOTICE.md b/vendor/github.com/hashicorp/consul/NOTICE.md deleted file mode 100644 index fe34b5e..0000000 --- a/vendor/github.com/hashicorp/consul/NOTICE.md +++ /dev/null @@ -1,3 +0,0 @@ -Copyright © 2014-2018 HashiCorp, Inc. - -This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this project, you can obtain one at http://mozilla.org/MPL/2.0/. diff --git a/vendor/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md deleted file mode 100644 index 3255cbb..0000000 --- a/vendor/github.com/hashicorp/consul/api/README.md +++ /dev/null @@ -1,67 +0,0 @@ -Consul API client -================= - -This package provides the `api` package which attempts to -provide programmatic access to the full Consul API. - -Currently, all of the Consul APIs included in version 0.6.0 are supported. - -Documentation -============= - -The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api) - -Usage -===== - -Below is an example of using the Consul client: - -```go -package main - -import "github.com/hashicorp/consul/api" -import "fmt" - -func main() { - // Get a new client - client, err := api.NewClient(api.DefaultConfig()) - if err != nil { - panic(err) - } - - // Get a handle to the KV API - kv := client.KV() - - // PUT a new KV pair - p := &api.KVPair{Key: "REDIS_MAXCLIENTS", Value: []byte("1000")} - _, err = kv.Put(p, nil) - if err != nil { - panic(err) - } - - // Lookup the pair - pair, _, err := kv.Get("REDIS_MAXCLIENTS", nil) - if err != nil { - panic(err) - } - fmt.Printf("KV: %v %s\n", pair.Key, pair.Value) -} -``` - -To run this example, start a Consul server: - -```bash -consul agent -dev -``` - -Copy the code above into a file such as `main.go`. - -Install and run. You'll see a key (`REDIS_MAXCLIENTS`) and value (`1000`) printed. - -```bash -$ go get -$ go run main.go -KV: REDIS_MAXCLIENTS 1000 -``` - -After running the code, you can also view the values in the Consul UI on your local machine at http://localhost:8500/ui/dc1/kv diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go deleted file mode 100644 index 8ec9aa5..0000000 --- a/vendor/github.com/hashicorp/consul/api/acl.go +++ /dev/null @@ -1,193 +0,0 @@ -package api - -import ( - "time" -) - -const ( - // ACLClientType is the client type token - ACLClientType = "client" - - // ACLManagementType is the management type token - ACLManagementType = "management" -) - -// ACLEntry is used to represent an ACL entry -type ACLEntry struct { - CreateIndex uint64 - ModifyIndex uint64 - ID string - Name string - Type string - Rules string -} - -// ACLReplicationStatus is used to represent the status of ACL replication. -type ACLReplicationStatus struct { - Enabled bool - Running bool - SourceDatacenter string - ReplicatedIndex uint64 - LastSuccess time.Time - LastError time.Time -} - -// ACL can be used to query the ACL endpoints -type ACL struct { - c *Client -} - -// ACL returns a handle to the ACL endpoints -func (c *Client) ACL() *ACL { - return &ACL{c} -} - -// Bootstrap is used to perform a one-time ACL bootstrap operation on a cluster -// to get the first management token. -func (a *ACL) Bootstrap() (string, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/bootstrap") - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Create is used to generate a new token with the given parameters -func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/create") - r.setWriteOptions(q) - r.obj = acl - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Update is used to update the rules of an existing token -func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/update") - r.setWriteOptions(q) - r.obj = acl - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// Destroy is used to destroy a given ACL token ID -func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) - r.setWriteOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// Clone is used to return a new token cloned from an existing one -func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) - r.setWriteOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Info is used to query for information about an ACL token -func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/info/"+id) - r.setQueryOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List is used to get all the ACL tokens -func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/list") - r.setQueryOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// Replication returns the status of the ACL replication process in the datacenter -func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/replication") - r.setQueryOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries *ACLReplicationStatus - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go deleted file mode 100644 index 8cb81fc..0000000 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ /dev/null @@ -1,791 +0,0 @@ -package api - -import ( - "bufio" - "fmt" -) - -// ServiceKind is the kind of service being registered. -type ServiceKind string - -const ( - // ServiceKindTypical is a typical, classic Consul service. This is - // represented by the absence of a value. This was chosen for ease of - // backwards compatibility: existing services in the catalog would - // default to the typical service. - ServiceKindTypical ServiceKind = "" - - // ServiceKindConnectProxy is a proxy for the Connect feature. This - // service proxies another service within Consul and speaks the connect - // protocol. - ServiceKindConnectProxy ServiceKind = "connect-proxy" -) - -// ProxyExecMode is the execution mode for a managed Connect proxy. -type ProxyExecMode string - -const ( - // ProxyExecModeDaemon indicates that the proxy command should be long-running - // and should be started and supervised by the agent until it's target service - // is deregistered. - ProxyExecModeDaemon ProxyExecMode = "daemon" - - // ProxyExecModeScript indicates that the proxy command should be invoke to - // completion on each change to the configuration of lifecycle event. The - // script typically fetches the config and certificates from the agent API and - // then configures an externally managed daemon, perhaps starting and stopping - // it if necessary. - ProxyExecModeScript ProxyExecMode = "script" -) - -// AgentCheck represents a check known to the agent -type AgentCheck struct { - Node string - CheckID string - Name string - Status string - Notes string - Output string - ServiceID string - ServiceName string - Definition HealthCheckDefinition -} - -// AgentService represents a service known to the agent -type AgentService struct { - Kind ServiceKind - ID string - Service string - Tags []string - Meta map[string]string - Port int - Address string - EnableTagOverride bool - CreateIndex uint64 - ModifyIndex uint64 - ProxyDestination string - Connect *AgentServiceConnect -} - -// AgentServiceConnect represents the Connect configuration of a service. -type AgentServiceConnect struct { - Native bool - Proxy *AgentServiceConnectProxy -} - -// AgentServiceConnectProxy represents the Connect Proxy configuration of a -// service. -type AgentServiceConnectProxy struct { - ExecMode ProxyExecMode - Command []string - Config map[string]interface{} -} - -// AgentMember represents a cluster member known to the agent -type AgentMember struct { - Name string - Addr string - Port uint16 - Tags map[string]string - Status int - ProtocolMin uint8 - ProtocolMax uint8 - ProtocolCur uint8 - DelegateMin uint8 - DelegateMax uint8 - DelegateCur uint8 -} - -// AllSegments is used to select for all segments in MembersOpts. -const AllSegments = "_all" - -// MembersOpts is used for querying member information. -type MembersOpts struct { - // WAN is whether to show members from the WAN. - WAN bool - - // Segment is the LAN segment to show members for. Setting this to the - // AllSegments value above will show members in all segments. - Segment string -} - -// AgentServiceRegistration is used to register a new service -type AgentServiceRegistration struct { - Kind ServiceKind `json:",omitempty"` - ID string `json:",omitempty"` - Name string `json:",omitempty"` - Tags []string `json:",omitempty"` - Port int `json:",omitempty"` - Address string `json:",omitempty"` - EnableTagOverride bool `json:",omitempty"` - Meta map[string]string `json:",omitempty"` - Check *AgentServiceCheck - Checks AgentServiceChecks - ProxyDestination string `json:",omitempty"` - Connect *AgentServiceConnect `json:",omitempty"` -} - -// AgentCheckRegistration is used to register a new check -type AgentCheckRegistration struct { - ID string `json:",omitempty"` - Name string `json:",omitempty"` - Notes string `json:",omitempty"` - ServiceID string `json:",omitempty"` - AgentServiceCheck -} - -// AgentServiceCheck is used to define a node or service level check -type AgentServiceCheck struct { - CheckID string `json:",omitempty"` - Name string `json:",omitempty"` - Args []string `json:"ScriptArgs,omitempty"` - DockerContainerID string `json:",omitempty"` - Shell string `json:",omitempty"` // Only supported for Docker. - Interval string `json:",omitempty"` - Timeout string `json:",omitempty"` - TTL string `json:",omitempty"` - HTTP string `json:",omitempty"` - Header map[string][]string `json:",omitempty"` - Method string `json:",omitempty"` - TCP string `json:",omitempty"` - Status string `json:",omitempty"` - Notes string `json:",omitempty"` - TLSSkipVerify bool `json:",omitempty"` - GRPC string `json:",omitempty"` - GRPCUseTLS bool `json:",omitempty"` - - // In Consul 0.7 and later, checks that are associated with a service - // may also contain this optional DeregisterCriticalServiceAfter field, - // which is a timeout in the same Go time format as Interval and TTL. If - // a check is in the critical state for more than this configured value, - // then its associated service (and all of its associated checks) will - // automatically be deregistered. - DeregisterCriticalServiceAfter string `json:",omitempty"` -} -type AgentServiceChecks []*AgentServiceCheck - -// AgentToken is used when updating ACL tokens for an agent. -type AgentToken struct { - Token string -} - -// Metrics info is used to store different types of metric values from the agent. -type MetricsInfo struct { - Timestamp string - Gauges []GaugeValue - Points []PointValue - Counters []SampledValue - Samples []SampledValue -} - -// GaugeValue stores one value that is updated as time goes on, such as -// the amount of memory allocated. -type GaugeValue struct { - Name string - Value float32 - Labels map[string]string -} - -// PointValue holds a series of points for a metric. -type PointValue struct { - Name string - Points []float32 -} - -// SampledValue stores info about a metric that is incremented over time, -// such as the number of requests to an HTTP endpoint. -type SampledValue struct { - Name string - Count int - Sum float64 - Min float64 - Max float64 - Mean float64 - Stddev float64 - Labels map[string]string -} - -// AgentAuthorizeParams are the request parameters for authorizing a request. -type AgentAuthorizeParams struct { - Target string - ClientCertURI string - ClientCertSerial string -} - -// AgentAuthorize is the response structure for Connect authorization. -type AgentAuthorize struct { - Authorized bool - Reason string -} - -// ConnectProxyConfig is the response structure for agent-local proxy -// configuration. -type ConnectProxyConfig struct { - ProxyServiceID string - TargetServiceID string - TargetServiceName string - ContentHash string - ExecMode ProxyExecMode - Command []string - Config map[string]interface{} -} - -// Agent can be used to query the Agent endpoints -type Agent struct { - c *Client - - // cache the node name - nodeName string -} - -// Agent returns a handle to the agent endpoints -func (c *Client) Agent() *Agent { - return &Agent{c: c} -} - -// Self is used to query the agent we are speaking to for -// information about itself -func (a *Agent) Self() (map[string]map[string]interface{}, error) { - r := a.c.newRequest("GET", "/v1/agent/self") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out map[string]map[string]interface{} - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Metrics is used to query the agent we are speaking to for -// its current internal metric data -func (a *Agent) Metrics() (*MetricsInfo, error) { - r := a.c.newRequest("GET", "/v1/agent/metrics") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out *MetricsInfo - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Reload triggers a configuration reload for the agent we are connected to. -func (a *Agent) Reload() error { - r := a.c.newRequest("PUT", "/v1/agent/reload") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// NodeName is used to get the node name of the agent -func (a *Agent) NodeName() (string, error) { - if a.nodeName != "" { - return a.nodeName, nil - } - info, err := a.Self() - if err != nil { - return "", err - } - name := info["Config"]["NodeName"].(string) - a.nodeName = name - return name, nil -} - -// Checks returns the locally registered checks -func (a *Agent) Checks() (map[string]*AgentCheck, error) { - r := a.c.newRequest("GET", "/v1/agent/checks") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out map[string]*AgentCheck - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Services returns the locally registered services -func (a *Agent) Services() (map[string]*AgentService, error) { - r := a.c.newRequest("GET", "/v1/agent/services") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out map[string]*AgentService - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - - return out, nil -} - -// Members returns the known gossip members. The WAN -// flag can be used to query a server for WAN members. -func (a *Agent) Members(wan bool) ([]*AgentMember, error) { - r := a.c.newRequest("GET", "/v1/agent/members") - if wan { - r.params.Set("wan", "1") - } - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []*AgentMember - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// MembersOpts returns the known gossip members and can be passed -// additional options for WAN/segment filtering. -func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) { - r := a.c.newRequest("GET", "/v1/agent/members") - r.params.Set("segment", opts.Segment) - if opts.WAN { - r.params.Set("wan", "1") - } - - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []*AgentMember - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// ServiceRegister is used to register a new service with -// the local agent -func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { - r := a.c.newRequest("PUT", "/v1/agent/service/register") - r.obj = service - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ServiceDeregister is used to deregister a service with -// the local agent -func (a *Agent) ServiceDeregister(serviceID string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// PassTTL is used to set a TTL check to the passing state. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 or changed to use -// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. -func (a *Agent) PassTTL(checkID, note string) error { - return a.updateTTL(checkID, note, "pass") -} - -// WarnTTL is used to set a TTL check to the warning state. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 or changed to use -// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. -func (a *Agent) WarnTTL(checkID, note string) error { - return a.updateTTL(checkID, note, "warn") -} - -// FailTTL is used to set a TTL check to the failing state. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 or changed to use -// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. -func (a *Agent) FailTTL(checkID, note string) error { - return a.updateTTL(checkID, note, "fail") -} - -// updateTTL is used to update the TTL of a check. This is the internal -// method that uses the old API that's present in Consul versions prior to -// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed -// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below, -// but keep the old Pass/Warn/Fail methods using the old API under the hood. -// -// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). -// The client interface will be removed in 0.8 and the server endpoints will -// be removed in 0.9. -func (a *Agent) updateTTL(checkID, note, status string) error { - switch status { - case "pass": - case "warn": - case "fail": - default: - return fmt.Errorf("Invalid status: %s", status) - } - endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) - r := a.c.newRequest("PUT", endpoint) - r.params.Set("note", note) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// checkUpdate is the payload for a PUT for a check update. -type checkUpdate struct { - // Status is one of the api.Health* states: HealthPassing - // ("passing"), HealthWarning ("warning"), or HealthCritical - // ("critical"). - Status string - - // Output is the information to post to the UI for operators as the - // output of the process that decided to hit the TTL check. This is - // different from the note field that's associated with the check - // itself. - Output string -} - -// UpdateTTL is used to update the TTL of a check. This uses the newer API -// that was introduced in Consul 0.6.4 and later. We translate the old status -// strings for compatibility (though a newer version of Consul will still be -// required to use this API). -func (a *Agent) UpdateTTL(checkID, output, status string) error { - switch status { - case "pass", HealthPassing: - status = HealthPassing - case "warn", HealthWarning: - status = HealthWarning - case "fail", HealthCritical: - status = HealthCritical - default: - return fmt.Errorf("Invalid status: %s", status) - } - - endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID) - r := a.c.newRequest("PUT", endpoint) - r.obj = &checkUpdate{ - Status: status, - Output: output, - } - - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// CheckRegister is used to register a new check with -// the local agent -func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { - r := a.c.newRequest("PUT", "/v1/agent/check/register") - r.obj = check - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// CheckDeregister is used to deregister a check with -// the local agent -func (a *Agent) CheckDeregister(checkID string) error { - r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// Join is used to instruct the agent to attempt a join to -// another cluster member -func (a *Agent) Join(addr string, wan bool) error { - r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) - if wan { - r.params.Set("wan", "1") - } - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// Leave is used to have the agent gracefully leave the cluster and shutdown -func (a *Agent) Leave() error { - r := a.c.newRequest("PUT", "/v1/agent/leave") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ForceLeave is used to have the agent eject a failed node -func (a *Agent) ForceLeave(node string) error { - r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ConnectAuthorize is used to authorize an incoming connection -// to a natively integrated Connect service. -func (a *Agent) ConnectAuthorize(auth *AgentAuthorizeParams) (*AgentAuthorize, error) { - r := a.c.newRequest("POST", "/v1/agent/connect/authorize") - r.obj = auth - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out AgentAuthorize - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return &out, nil -} - -// ConnectCARoots returns the list of roots. -func (a *Agent) ConnectCARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/agent/connect/ca/roots") - r.setQueryOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out CARootList - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, qm, nil -} - -// ConnectCALeaf gets the leaf certificate for the given service ID. -func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*LeafCert, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/agent/connect/ca/leaf/"+serviceID) - r.setQueryOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out LeafCert - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, qm, nil -} - -// ConnectProxyConfig gets the configuration for a local managed proxy instance. -// -// Note that this uses an unconventional blocking mechanism since it's -// agent-local state. That means there is no persistent raft index so we block -// based on object hash instead. -func (a *Agent) ConnectProxyConfig(proxyServiceID string, q *QueryOptions) (*ConnectProxyConfig, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/agent/connect/proxy/"+proxyServiceID) - r.setQueryOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out ConnectProxyConfig - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, qm, nil -} - -// EnableServiceMaintenance toggles service maintenance mode on -// for the given service ID. -func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) - r.params.Set("enable", "true") - r.params.Set("reason", reason) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// DisableServiceMaintenance toggles service maintenance mode off -// for the given service ID. -func (a *Agent) DisableServiceMaintenance(serviceID string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) - r.params.Set("enable", "false") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// EnableNodeMaintenance toggles node maintenance mode on for the -// agent we are connected to. -func (a *Agent) EnableNodeMaintenance(reason string) error { - r := a.c.newRequest("PUT", "/v1/agent/maintenance") - r.params.Set("enable", "true") - r.params.Set("reason", reason) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// DisableNodeMaintenance toggles node maintenance mode off for the -// agent we are connected to. -func (a *Agent) DisableNodeMaintenance() error { - r := a.c.newRequest("PUT", "/v1/agent/maintenance") - r.params.Set("enable", "false") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// Monitor returns a channel which will receive streaming logs from the agent -// Providing a non-nil stopCh can be used to close the connection and stop the -// log stream. An empty string will be sent down the given channel when there's -// nothing left to stream, after which the caller should close the stopCh. -func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { - r := a.c.newRequest("GET", "/v1/agent/monitor") - r.setQueryOptions(q) - if loglevel != "" { - r.params.Add("loglevel", loglevel) - } - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - - logCh := make(chan string, 64) - go func() { - defer resp.Body.Close() - - scanner := bufio.NewScanner(resp.Body) - for { - select { - case <-stopCh: - close(logCh) - return - default: - } - if scanner.Scan() { - // An empty string signals to the caller that - // the scan is done, so make sure we only emit - // that when the scanner says it's done, not if - // we happen to ingest an empty line. - if text := scanner.Text(); text != "" { - logCh <- text - } else { - logCh <- " " - } - } else { - logCh <- "" - } - } - }() - - return logCh, nil -} - -// UpdateACLToken updates the agent's "acl_token". See updateToken for more -// details. -func (a *Agent) UpdateACLToken(token string, q *WriteOptions) (*WriteMeta, error) { - return a.updateToken("acl_token", token, q) -} - -// UpdateACLAgentToken updates the agent's "acl_agent_token". See updateToken -// for more details. -func (a *Agent) UpdateACLAgentToken(token string, q *WriteOptions) (*WriteMeta, error) { - return a.updateToken("acl_agent_token", token, q) -} - -// UpdateACLAgentMasterToken updates the agent's "acl_agent_master_token". See -// updateToken for more details. -func (a *Agent) UpdateACLAgentMasterToken(token string, q *WriteOptions) (*WriteMeta, error) { - return a.updateToken("acl_agent_master_token", token, q) -} - -// UpdateACLReplicationToken updates the agent's "acl_replication_token". See -// updateToken for more details. -func (a *Agent) UpdateACLReplicationToken(token string, q *WriteOptions) (*WriteMeta, error) { - return a.updateToken("acl_replication_token", token, q) -} - -// updateToken can be used to update an agent's ACL token after the agent has -// started. The tokens are not persisted, so will need to be updated again if -// the agent is restarted. -func (a *Agent) updateToken(target, token string, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("PUT", fmt.Sprintf("/v1/agent/token/%s", target)) - r.setWriteOptions(q) - r.obj = &AgentToken{Token: token} - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go deleted file mode 100644 index 6492383..0000000 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ /dev/null @@ -1,839 +0,0 @@ -package api - -import ( - "bytes" - "context" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/go-rootcerts" -) - -const ( - // HTTPAddrEnvName defines an environment variable name which sets - // the HTTP address if there is no -http-addr specified. - HTTPAddrEnvName = "CONSUL_HTTP_ADDR" - - // HTTPTokenEnvName defines an environment variable name which sets - // the HTTP token. - HTTPTokenEnvName = "CONSUL_HTTP_TOKEN" - - // HTTPAuthEnvName defines an environment variable name which sets - // the HTTP authentication header. - HTTPAuthEnvName = "CONSUL_HTTP_AUTH" - - // HTTPSSLEnvName defines an environment variable name which sets - // whether or not to use HTTPS. - HTTPSSLEnvName = "CONSUL_HTTP_SSL" - - // HTTPCAFile defines an environment variable name which sets the - // CA file to use for talking to Consul over TLS. - HTTPCAFile = "CONSUL_CACERT" - - // HTTPCAPath defines an environment variable name which sets the - // path to a directory of CA certs to use for talking to Consul over TLS. - HTTPCAPath = "CONSUL_CAPATH" - - // HTTPClientCert defines an environment variable name which sets the - // client cert file to use for talking to Consul over TLS. - HTTPClientCert = "CONSUL_CLIENT_CERT" - - // HTTPClientKey defines an environment variable name which sets the - // client key file to use for talking to Consul over TLS. - HTTPClientKey = "CONSUL_CLIENT_KEY" - - // HTTPTLSServerName defines an environment variable name which sets the - // server name to use as the SNI host when connecting via TLS - HTTPTLSServerName = "CONSUL_TLS_SERVER_NAME" - - // HTTPSSLVerifyEnvName defines an environment variable name which sets - // whether or not to disable certificate checking. - HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY" -) - -// QueryOptions are used to parameterize a query -type QueryOptions struct { - // Providing a datacenter overwrites the DC provided - // by the Config - Datacenter string - - // AllowStale allows any Consul server (non-leader) to service - // a read. This allows for lower latency and higher throughput - AllowStale bool - - // RequireConsistent forces the read to be fully consistent. - // This is more expensive but prevents ever performing a stale - // read. - RequireConsistent bool - - // WaitIndex is used to enable a blocking query. Waits - // until the timeout or the next index is reached - WaitIndex uint64 - - // WaitHash is used by some endpoints instead of WaitIndex to perform blocking - // on state based on a hash of the response rather than a monotonic index. - // This is required when the state being blocked on is not stored in Raft, for - // example agent-local proxy configuration. - WaitHash string - - // WaitTime is used to bound the duration of a wait. - // Defaults to that of the Config, but can be overridden. - WaitTime time.Duration - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string - - // Near is used to provide a node name that will sort the results - // in ascending order based on the estimated round trip time from - // that node. Setting this to "_agent" will use the agent's node - // for the sort. - Near string - - // NodeMeta is used to filter results by nodes with the given - // metadata key/value pairs. Currently, only one key/value pair can - // be provided for filtering. - NodeMeta map[string]string - - // RelayFactor is used in keyring operations to cause responses to be - // relayed back to the sender through N other random nodes. Must be - // a value from 0 to 5 (inclusive). - RelayFactor uint8 - - // Connect filters prepared query execution to only include Connect-capable - // services. This currently affects prepared query execution. - Connect bool - - // ctx is an optional context pass through to the underlying HTTP - // request layer. Use Context() and WithContext() to manage this. - ctx context.Context -} - -func (o *QueryOptions) Context() context.Context { - if o != nil && o.ctx != nil { - return o.ctx - } - return context.Background() -} - -func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions { - o2 := new(QueryOptions) - if o != nil { - *o2 = *o - } - o2.ctx = ctx - return o2 -} - -// WriteOptions are used to parameterize a write -type WriteOptions struct { - // Providing a datacenter overwrites the DC provided - // by the Config - Datacenter string - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string - - // RelayFactor is used in keyring operations to cause responses to be - // relayed back to the sender through N other random nodes. Must be - // a value from 0 to 5 (inclusive). - RelayFactor uint8 - - // ctx is an optional context pass through to the underlying HTTP - // request layer. Use Context() and WithContext() to manage this. - ctx context.Context -} - -func (o *WriteOptions) Context() context.Context { - if o != nil && o.ctx != nil { - return o.ctx - } - return context.Background() -} - -func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions { - o2 := new(WriteOptions) - if o != nil { - *o2 = *o - } - o2.ctx = ctx - return o2 -} - -// QueryMeta is used to return meta data about a query -type QueryMeta struct { - // LastIndex. This can be used as a WaitIndex to perform - // a blocking query - LastIndex uint64 - - // LastContentHash. This can be used as a WaitHash to perform a blocking query - // for endpoints that support hash-based blocking. Endpoints that do not - // support it will return an empty hash. - LastContentHash string - - // Time of last contact from the leader for the - // server servicing the request - LastContact time.Duration - - // Is there a known leader - KnownLeader bool - - // How long did the request take - RequestTime time.Duration - - // Is address translation enabled for HTTP responses on this agent - AddressTranslationEnabled bool -} - -// WriteMeta is used to return meta data about a write -type WriteMeta struct { - // How long did the request take - RequestTime time.Duration -} - -// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication -type HttpBasicAuth struct { - // Username to use for HTTP Basic Authentication - Username string - - // Password to use for HTTP Basic Authentication - Password string -} - -// Config is used to configure the creation of a client -type Config struct { - // Address is the address of the Consul server - Address string - - // Scheme is the URI scheme for the Consul server - Scheme string - - // Datacenter to use. If not provided, the default agent datacenter is used. - Datacenter string - - // Transport is the Transport to use for the http client. - Transport *http.Transport - - // HttpClient is the client to use. Default will be - // used if not provided. - HttpClient *http.Client - - // HttpAuth is the auth info to use for http access. - HttpAuth *HttpBasicAuth - - // WaitTime limits how long a Watch will block. If not provided, - // the agent default values will be used. - WaitTime time.Duration - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string - - TLSConfig TLSConfig -} - -// TLSConfig is used to generate a TLSClientConfig that's useful for talking to -// Consul using TLS. -type TLSConfig struct { - // Address is the optional address of the Consul server. The port, if any - // will be removed from here and this will be set to the ServerName of the - // resulting config. - Address string - - // CAFile is the optional path to the CA certificate used for Consul - // communication, defaults to the system bundle if not specified. - CAFile string - - // CAPath is the optional path to a directory of CA certificates to use for - // Consul communication, defaults to the system bundle if not specified. - CAPath string - - // CertFile is the optional path to the certificate for Consul - // communication. If this is set then you need to also set KeyFile. - CertFile string - - // KeyFile is the optional path to the private key for Consul communication. - // If this is set then you need to also set CertFile. - KeyFile string - - // InsecureSkipVerify if set to true will disable TLS host verification. - InsecureSkipVerify bool -} - -// DefaultConfig returns a default configuration for the client. By default this -// will pool and reuse idle connections to Consul. If you have a long-lived -// client object, this is the desired behavior and should make the most efficient -// use of the connections to Consul. If you don't reuse a client object , which -// is not recommended, then you may notice idle connections building up over -// time. To avoid this, use the DefaultNonPooledConfig() instead. -func DefaultConfig() *Config { - return defaultConfig(cleanhttp.DefaultPooledTransport) -} - -// DefaultNonPooledConfig returns a default configuration for the client which -// does not pool connections. This isn't a recommended configuration because it -// will reconnect to Consul on every request, but this is useful to avoid the -// accumulation of idle connections if you make many client objects during the -// lifetime of your application. -func DefaultNonPooledConfig() *Config { - return defaultConfig(cleanhttp.DefaultTransport) -} - -// defaultConfig returns the default configuration for the client, using the -// given function to make the transport. -func defaultConfig(transportFn func() *http.Transport) *Config { - config := &Config{ - Address: "127.0.0.1:8500", - Scheme: "http", - Transport: transportFn(), - } - - if addr := os.Getenv(HTTPAddrEnvName); addr != "" { - config.Address = addr - } - - if token := os.Getenv(HTTPTokenEnvName); token != "" { - config.Token = token - } - - if auth := os.Getenv(HTTPAuthEnvName); auth != "" { - var username, password string - if strings.Contains(auth, ":") { - split := strings.SplitN(auth, ":", 2) - username = split[0] - password = split[1] - } else { - username = auth - } - - config.HttpAuth = &HttpBasicAuth{ - Username: username, - Password: password, - } - } - - if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" { - enabled, err := strconv.ParseBool(ssl) - if err != nil { - log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err) - } - - if enabled { - config.Scheme = "https" - } - } - - if v := os.Getenv(HTTPTLSServerName); v != "" { - config.TLSConfig.Address = v - } - if v := os.Getenv(HTTPCAFile); v != "" { - config.TLSConfig.CAFile = v - } - if v := os.Getenv(HTTPCAPath); v != "" { - config.TLSConfig.CAPath = v - } - if v := os.Getenv(HTTPClientCert); v != "" { - config.TLSConfig.CertFile = v - } - if v := os.Getenv(HTTPClientKey); v != "" { - config.TLSConfig.KeyFile = v - } - if v := os.Getenv(HTTPSSLVerifyEnvName); v != "" { - doVerify, err := strconv.ParseBool(v) - if err != nil { - log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err) - } - if !doVerify { - config.TLSConfig.InsecureSkipVerify = true - } - } - - return config -} - -// TLSConfig is used to generate a TLSClientConfig that's useful for talking to -// Consul using TLS. -func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) { - tlsClientConfig := &tls.Config{ - InsecureSkipVerify: tlsConfig.InsecureSkipVerify, - } - - if tlsConfig.Address != "" { - server := tlsConfig.Address - hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]") - if hasPort { - var err error - server, _, err = net.SplitHostPort(server) - if err != nil { - return nil, err - } - } - tlsClientConfig.ServerName = server - } - - if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" { - tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile) - if err != nil { - return nil, err - } - tlsClientConfig.Certificates = []tls.Certificate{tlsCert} - } - - if tlsConfig.CAFile != "" || tlsConfig.CAPath != "" { - rootConfig := &rootcerts.Config{ - CAFile: tlsConfig.CAFile, - CAPath: tlsConfig.CAPath, - } - if err := rootcerts.ConfigureTLS(tlsClientConfig, rootConfig); err != nil { - return nil, err - } - } - - return tlsClientConfig, nil -} - -func (c *Config) GenerateEnv() []string { - env := make([]string, 0, 10) - - env = append(env, - fmt.Sprintf("%s=%s", HTTPAddrEnvName, c.Address), - fmt.Sprintf("%s=%s", HTTPTokenEnvName, c.Token), - fmt.Sprintf("%s=%t", HTTPSSLEnvName, c.Scheme == "https"), - fmt.Sprintf("%s=%s", HTTPCAFile, c.TLSConfig.CAFile), - fmt.Sprintf("%s=%s", HTTPCAPath, c.TLSConfig.CAPath), - fmt.Sprintf("%s=%s", HTTPClientCert, c.TLSConfig.CertFile), - fmt.Sprintf("%s=%s", HTTPClientKey, c.TLSConfig.KeyFile), - fmt.Sprintf("%s=%s", HTTPTLSServerName, c.TLSConfig.Address), - fmt.Sprintf("%s=%t", HTTPSSLVerifyEnvName, !c.TLSConfig.InsecureSkipVerify)) - - if c.HttpAuth != nil { - env = append(env, fmt.Sprintf("%s=%s:%s", HTTPAuthEnvName, c.HttpAuth.Username, c.HttpAuth.Password)) - } else { - env = append(env, fmt.Sprintf("%s=", HTTPAuthEnvName)) - } - - return env -} - -// Client provides a client to the Consul API -type Client struct { - config Config -} - -// NewClient returns a new client -func NewClient(config *Config) (*Client, error) { - // bootstrap the config - defConfig := DefaultConfig() - - if len(config.Address) == 0 { - config.Address = defConfig.Address - } - - if len(config.Scheme) == 0 { - config.Scheme = defConfig.Scheme - } - - if config.Transport == nil { - config.Transport = defConfig.Transport - } - - if config.TLSConfig.Address == "" { - config.TLSConfig.Address = defConfig.TLSConfig.Address - } - - if config.TLSConfig.CAFile == "" { - config.TLSConfig.CAFile = defConfig.TLSConfig.CAFile - } - - if config.TLSConfig.CAPath == "" { - config.TLSConfig.CAPath = defConfig.TLSConfig.CAPath - } - - if config.TLSConfig.CertFile == "" { - config.TLSConfig.CertFile = defConfig.TLSConfig.CertFile - } - - if config.TLSConfig.KeyFile == "" { - config.TLSConfig.KeyFile = defConfig.TLSConfig.KeyFile - } - - if !config.TLSConfig.InsecureSkipVerify { - config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify - } - - if config.HttpClient == nil { - var err error - config.HttpClient, err = NewHttpClient(config.Transport, config.TLSConfig) - if err != nil { - return nil, err - } - } - - parts := strings.SplitN(config.Address, "://", 2) - if len(parts) == 2 { - switch parts[0] { - case "http": - config.Scheme = "http" - case "https": - config.Scheme = "https" - case "unix": - trans := cleanhttp.DefaultTransport() - trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial("unix", parts[1]) - } - config.HttpClient = &http.Client{ - Transport: trans, - } - default: - return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0]) - } - config.Address = parts[1] - } - - if config.Token == "" { - config.Token = defConfig.Token - } - - return &Client{config: *config}, nil -} - -// NewHttpClient returns an http client configured with the given Transport and TLS -// config. -func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) { - client := &http.Client{ - Transport: transport, - } - - // TODO (slackpad) - Once we get some run time on the HTTP/2 support we - // should turn it on by default if TLS is enabled. We would basically - // just need to call http2.ConfigureTransport(transport) here. We also - // don't want to introduce another external dependency on - // golang.org/x/net/http2 at this time. For a complete recipe for how - // to enable HTTP/2 support on a transport suitable for the API client - // library see agent/http_test.go:TestHTTPServer_H2. - - if transport.TLSClientConfig == nil { - tlsClientConfig, err := SetupTLSConfig(&tlsConf) - - if err != nil { - return nil, err - } - - transport.TLSClientConfig = tlsClientConfig - } - - return client, nil -} - -// request is used to help build up a request -type request struct { - config *Config - method string - url *url.URL - params url.Values - body io.Reader - header http.Header - obj interface{} - ctx context.Context -} - -// setQueryOptions is used to annotate the request with -// additional query options -func (r *request) setQueryOptions(q *QueryOptions) { - if q == nil { - return - } - if q.Datacenter != "" { - r.params.Set("dc", q.Datacenter) - } - if q.AllowStale { - r.params.Set("stale", "") - } - if q.RequireConsistent { - r.params.Set("consistent", "") - } - if q.WaitIndex != 0 { - r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) - } - if q.WaitTime != 0 { - r.params.Set("wait", durToMsec(q.WaitTime)) - } - if q.WaitHash != "" { - r.params.Set("hash", q.WaitHash) - } - if q.Token != "" { - r.header.Set("X-Consul-Token", q.Token) - } - if q.Near != "" { - r.params.Set("near", q.Near) - } - if len(q.NodeMeta) > 0 { - for key, value := range q.NodeMeta { - r.params.Add("node-meta", key+":"+value) - } - } - if q.RelayFactor != 0 { - r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) - } - if q.Connect { - r.params.Set("connect", "true") - } - r.ctx = q.ctx -} - -// durToMsec converts a duration to a millisecond specified string. If the -// user selected a positive value that rounds to 0 ms, then we will use 1 ms -// so they get a short delay, otherwise Consul will translate the 0 ms into -// a huge default delay. -func durToMsec(dur time.Duration) string { - ms := dur / time.Millisecond - if dur > 0 && ms == 0 { - ms = 1 - } - return fmt.Sprintf("%dms", ms) -} - -// serverError is a string we look for to detect 500 errors. -const serverError = "Unexpected response code: 500" - -// IsRetryableError returns true for 500 errors from the Consul servers, and -// network connection errors. These are usually retryable at a later time. -// This applies to reads but NOT to writes. This may return true for errors -// on writes that may have still gone through, so do not use this to retry -// any write operations. -func IsRetryableError(err error) bool { - if err == nil { - return false - } - - if _, ok := err.(net.Error); ok { - return true - } - - // TODO (slackpad) - Make a real error type here instead of using - // a string check. - return strings.Contains(err.Error(), serverError) -} - -// setWriteOptions is used to annotate the request with -// additional write options -func (r *request) setWriteOptions(q *WriteOptions) { - if q == nil { - return - } - if q.Datacenter != "" { - r.params.Set("dc", q.Datacenter) - } - if q.Token != "" { - r.header.Set("X-Consul-Token", q.Token) - } - if q.RelayFactor != 0 { - r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) - } - r.ctx = q.ctx -} - -// toHTTP converts the request to an HTTP request -func (r *request) toHTTP() (*http.Request, error) { - // Encode the query parameters - r.url.RawQuery = r.params.Encode() - - // Check if we should encode the body - if r.body == nil && r.obj != nil { - b, err := encodeBody(r.obj) - if err != nil { - return nil, err - } - r.body = b - } - - // Create the HTTP request - req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) - if err != nil { - return nil, err - } - - req.URL.Host = r.url.Host - req.URL.Scheme = r.url.Scheme - req.Host = r.url.Host - req.Header = r.header - - // Setup auth - if r.config.HttpAuth != nil { - req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) - } - if r.ctx != nil { - return req.WithContext(r.ctx), nil - } - - return req, nil -} - -// newRequest is used to create a new request -func (c *Client) newRequest(method, path string) *request { - r := &request{ - config: &c.config, - method: method, - url: &url.URL{ - Scheme: c.config.Scheme, - Host: c.config.Address, - Path: path, - }, - params: make(map[string][]string), - header: make(http.Header), - } - if c.config.Datacenter != "" { - r.params.Set("dc", c.config.Datacenter) - } - if c.config.WaitTime != 0 { - r.params.Set("wait", durToMsec(r.config.WaitTime)) - } - if c.config.Token != "" { - r.header.Set("X-Consul-Token", r.config.Token) - } - return r -} - -// doRequest runs a request with our client -func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { - req, err := r.toHTTP() - if err != nil { - return 0, nil, err - } - start := time.Now() - resp, err := c.config.HttpClient.Do(req) - diff := time.Since(start) - return diff, resp, err -} - -// Query is used to do a GET request against an endpoint -// and deserialize the response into an interface using -// standard Consul conventions. -func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { - r := c.newRequest("GET", endpoint) - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if err := decodeBody(resp, out); err != nil { - return nil, err - } - return qm, nil -} - -// write is used to do a PUT request against an endpoint -// and serialize/deserialized using the standard Consul conventions. -func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { - r := c.newRequest("PUT", endpoint) - r.setWriteOptions(q) - r.obj = in - rtt, resp, err := requireOK(c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - if out != nil { - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - } else if _, err := ioutil.ReadAll(resp.Body); err != nil { - return nil, err - } - return wm, nil -} - -// parseQueryMeta is used to help parse query meta-data -func parseQueryMeta(resp *http.Response, q *QueryMeta) error { - header := resp.Header - - // Parse the X-Consul-Index (if it's set - hash based blocking queries don't - // set this) - if indexStr := header.Get("X-Consul-Index"); indexStr != "" { - index, err := strconv.ParseUint(indexStr, 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) - } - q.LastIndex = index - } - q.LastContentHash = header.Get("X-Consul-ContentHash") - - // Parse the X-Consul-LastContact - last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) - } - q.LastContact = time.Duration(last) * time.Millisecond - - // Parse the X-Consul-KnownLeader - switch header.Get("X-Consul-KnownLeader") { - case "true": - q.KnownLeader = true - default: - q.KnownLeader = false - } - - // Parse X-Consul-Translate-Addresses - switch header.Get("X-Consul-Translate-Addresses") { - case "true": - q.AddressTranslationEnabled = true - default: - q.AddressTranslationEnabled = false - } - - return nil -} - -// decodeBody is used to JSON decode a body -func decodeBody(resp *http.Response, out interface{}) error { - dec := json.NewDecoder(resp.Body) - return dec.Decode(out) -} - -// encodeBody is used to encode a request body -func encodeBody(obj interface{}) (io.Reader, error) { - buf := bytes.NewBuffer(nil) - enc := json.NewEncoder(buf) - if err := enc.Encode(obj); err != nil { - return nil, err - } - return buf, nil -} - -// requireOK is used to wrap doRequest and check for a 200 -func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { - if e != nil { - if resp != nil { - resp.Body.Close() - } - return d, nil, e - } - if resp.StatusCode != 200 { - var buf bytes.Buffer - io.Copy(&buf, resp.Body) - resp.Body.Close() - return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) - } - return d, resp, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go deleted file mode 100644 index 1a6bbc3..0000000 --- a/vendor/github.com/hashicorp/consul/api/catalog.go +++ /dev/null @@ -1,213 +0,0 @@ -package api - -type Node struct { - ID string - Node string - Address string - Datacenter string - TaggedAddresses map[string]string - Meta map[string]string - CreateIndex uint64 - ModifyIndex uint64 -} - -type CatalogService struct { - ID string - Node string - Address string - Datacenter string - TaggedAddresses map[string]string - NodeMeta map[string]string - ServiceID string - ServiceName string - ServiceAddress string - ServiceTags []string - ServiceMeta map[string]string - ServicePort int - ServiceEnableTagOverride bool - CreateIndex uint64 - ModifyIndex uint64 -} - -type CatalogNode struct { - Node *Node - Services map[string]*AgentService -} - -type CatalogRegistration struct { - ID string - Node string - Address string - TaggedAddresses map[string]string - NodeMeta map[string]string - Datacenter string - Service *AgentService - Check *AgentCheck - SkipNodeUpdate bool -} - -type CatalogDeregistration struct { - Node string - Address string // Obsolete. - Datacenter string - ServiceID string - CheckID string -} - -// Catalog can be used to query the Catalog endpoints -type Catalog struct { - c *Client -} - -// Catalog returns a handle to the catalog endpoints -func (c *Client) Catalog() *Catalog { - return &Catalog{c} -} - -func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/catalog/register") - r.setWriteOptions(q) - r.obj = reg - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - return wm, nil -} - -func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/catalog/deregister") - r.setWriteOptions(q) - r.obj = dereg - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - return wm, nil -} - -// Datacenters is used to query for all the known datacenters -func (c *Catalog) Datacenters() ([]string, error) { - r := c.c.newRequest("GET", "/v1/catalog/datacenters") - _, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []string - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Nodes is used to query all the known nodes -func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/nodes") - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*Node - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Services is used to query for all known services -func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/services") - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out map[string][]string - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Service is used to query catalog entries for a given service -func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { - return c.service(service, tag, q, false) -} - -// Connect is used to query catalog entries for a given Connect-enabled service -func (c *Catalog) Connect(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { - return c.service(service, tag, q, true) -} - -func (c *Catalog) service(service, tag string, q *QueryOptions, connect bool) ([]*CatalogService, *QueryMeta, error) { - path := "/v1/catalog/service/" + service - if connect { - path = "/v1/catalog/connect/" + service - } - r := c.c.newRequest("GET", path) - r.setQueryOptions(q) - if tag != "" { - r.params.Set("tag", tag) - } - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*CatalogService - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Node is used to query for service information about a single node -func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/node/"+node) - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out *CatalogNode - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/connect.go b/vendor/github.com/hashicorp/consul/api/connect.go deleted file mode 100644 index a40d1e2..0000000 --- a/vendor/github.com/hashicorp/consul/api/connect.go +++ /dev/null @@ -1,12 +0,0 @@ -package api - -// Connect can be used to work with endpoints related to Connect, the -// feature for securely connecting services within Consul. -type Connect struct { - c *Client -} - -// Connect returns a handle to the connect-related endpoints -func (c *Client) Connect() *Connect { - return &Connect{c} -} diff --git a/vendor/github.com/hashicorp/consul/api/connect_ca.go b/vendor/github.com/hashicorp/consul/api/connect_ca.go deleted file mode 100644 index a863d21..0000000 --- a/vendor/github.com/hashicorp/consul/api/connect_ca.go +++ /dev/null @@ -1,172 +0,0 @@ -package api - -import ( - "fmt" - "time" - - "github.com/mitchellh/mapstructure" -) - -// CAConfig is the structure for the Connect CA configuration. -type CAConfig struct { - // Provider is the CA provider implementation to use. - Provider string - - // Configuration is arbitrary configuration for the provider. This - // should only contain primitive values and containers (such as lists - // and maps). - Config map[string]interface{} - - CreateIndex uint64 - ModifyIndex uint64 -} - -// CommonCAProviderConfig is the common options available to all CA providers. -type CommonCAProviderConfig struct { - LeafCertTTL time.Duration -} - -// ConsulCAProviderConfig is the config for the built-in Consul CA provider. -type ConsulCAProviderConfig struct { - CommonCAProviderConfig `mapstructure:",squash"` - - PrivateKey string - RootCert string - RotationPeriod time.Duration -} - -// ParseConsulCAConfig takes a raw config map and returns a parsed -// ConsulCAProviderConfig. -func ParseConsulCAConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, error) { - var config ConsulCAProviderConfig - decodeConf := &mapstructure.DecoderConfig{ - DecodeHook: mapstructure.StringToTimeDurationHookFunc(), - ErrorUnused: true, - Result: &config, - WeaklyTypedInput: true, - } - - decoder, err := mapstructure.NewDecoder(decodeConf) - if err != nil { - return nil, err - } - - if err := decoder.Decode(raw); err != nil { - return nil, fmt.Errorf("error decoding config: %s", err) - } - - return &config, nil -} - -// CARootList is the structure for the results of listing roots. -type CARootList struct { - ActiveRootID string - TrustDomain string - Roots []*CARoot -} - -// CARoot represents a root CA certificate that is trusted. -type CARoot struct { - // ID is a globally unique ID (UUID) representing this CA root. - ID string - - // Name is a human-friendly name for this CA root. This value is - // opaque to Consul and is not used for anything internally. - Name string - - // RootCertPEM is the PEM-encoded public certificate. - RootCertPEM string `json:"RootCert"` - - // Active is true if this is the current active CA. This must only - // be true for exactly one CA. For any method that modifies roots in the - // state store, tests should be written to verify that multiple roots - // cannot be active. - Active bool - - CreateIndex uint64 - ModifyIndex uint64 -} - -// LeafCert is a certificate that has been issued by a Connect CA. -type LeafCert struct { - // SerialNumber is the unique serial number for this certificate. - // This is encoded in standard hex separated by :. - SerialNumber string - - // CertPEM and PrivateKeyPEM are the PEM-encoded certificate and private - // key for that cert, respectively. This should not be stored in the - // state store, but is present in the sign API response. - CertPEM string `json:",omitempty"` - PrivateKeyPEM string `json:",omitempty"` - - // Service is the name of the service for which the cert was issued. - // ServiceURI is the cert URI value. - Service string - ServiceURI string - - // ValidAfter and ValidBefore are the validity periods for the - // certificate. - ValidAfter time.Time - ValidBefore time.Time - - CreateIndex uint64 - ModifyIndex uint64 -} - -// CARoots queries the list of available roots. -func (h *Connect) CARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/connect/ca/roots") - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out CARootList - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, qm, nil -} - -// CAGetConfig returns the current CA configuration. -func (h *Connect) CAGetConfig(q *QueryOptions) (*CAConfig, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/connect/ca/configuration") - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out CAConfig - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, qm, nil -} - -// CASetConfig sets the current CA configuration. -func (h *Connect) CASetConfig(conf *CAConfig, q *WriteOptions) (*WriteMeta, error) { - r := h.c.newRequest("PUT", "/v1/connect/ca/configuration") - r.setWriteOptions(q) - r.obj = conf - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - return wm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/connect_intention.go b/vendor/github.com/hashicorp/consul/api/connect_intention.go deleted file mode 100644 index a996c03..0000000 --- a/vendor/github.com/hashicorp/consul/api/connect_intention.go +++ /dev/null @@ -1,302 +0,0 @@ -package api - -import ( - "bytes" - "fmt" - "io" - "time" -) - -// Intention defines an intention for the Connect Service Graph. This defines -// the allowed or denied behavior of a connection between two services using -// Connect. -type Intention struct { - // ID is the UUID-based ID for the intention, always generated by Consul. - ID string - - // Description is a human-friendly description of this intention. - // It is opaque to Consul and is only stored and transferred in API - // requests. - Description string - - // SourceNS, SourceName are the namespace and name, respectively, of - // the source service. Either of these may be the wildcard "*", but only - // the full value can be a wildcard. Partial wildcards are not allowed. - // The source may also be a non-Consul service, as specified by SourceType. - // - // DestinationNS, DestinationName is the same, but for the destination - // service. The same rules apply. The destination is always a Consul - // service. - SourceNS, SourceName string - DestinationNS, DestinationName string - - // SourceType is the type of the value for the source. - SourceType IntentionSourceType - - // Action is whether this is a whitelist or blacklist intention. - Action IntentionAction - - // DefaultAddr, DefaultPort of the local listening proxy (if any) to - // make this connection. - DefaultAddr string - DefaultPort int - - // Meta is arbitrary metadata associated with the intention. This is - // opaque to Consul but is served in API responses. - Meta map[string]string - - // Precedence is the order that the intention will be applied, with - // larger numbers being applied first. This is a read-only field, on - // any intention update it is updated. - Precedence int - - // CreatedAt and UpdatedAt keep track of when this record was created - // or modified. - CreatedAt, UpdatedAt time.Time - - CreateIndex uint64 - ModifyIndex uint64 -} - -// String returns human-friendly output describing ths intention. -func (i *Intention) String() string { - return fmt.Sprintf("%s => %s (%s)", - i.SourceString(), - i.DestinationString(), - i.Action) -} - -// SourceString returns the namespace/name format for the source, or -// just "name" if the namespace is the default namespace. -func (i *Intention) SourceString() string { - return i.partString(i.SourceNS, i.SourceName) -} - -// DestinationString returns the namespace/name format for the source, or -// just "name" if the namespace is the default namespace. -func (i *Intention) DestinationString() string { - return i.partString(i.DestinationNS, i.DestinationName) -} - -func (i *Intention) partString(ns, n string) string { - // For now we omit the default namespace from the output. In the future - // we might want to look at this and show this in a multi-namespace world. - if ns != "" && ns != IntentionDefaultNamespace { - n = ns + "/" + n - } - - return n -} - -// IntentionDefaultNamespace is the default namespace value. -const IntentionDefaultNamespace = "default" - -// IntentionAction is the action that the intention represents. This -// can be "allow" or "deny" to whitelist or blacklist intentions. -type IntentionAction string - -const ( - IntentionActionAllow IntentionAction = "allow" - IntentionActionDeny IntentionAction = "deny" -) - -// IntentionSourceType is the type of the source within an intention. -type IntentionSourceType string - -const ( - // IntentionSourceConsul is a service within the Consul catalog. - IntentionSourceConsul IntentionSourceType = "consul" -) - -// IntentionMatch are the arguments for the intention match API. -type IntentionMatch struct { - By IntentionMatchType - Names []string -} - -// IntentionMatchType is the target for a match request. For example, -// matching by source will look for all intentions that match the given -// source value. -type IntentionMatchType string - -const ( - IntentionMatchSource IntentionMatchType = "source" - IntentionMatchDestination IntentionMatchType = "destination" -) - -// IntentionCheck are the arguments for the intention check API. For -// more documentation see the IntentionCheck function. -type IntentionCheck struct { - // Source and Destination are the source and destination values to - // check. The destination is always a Consul service, but the source - // may be other values as defined by the SourceType. - Source, Destination string - - // SourceType is the type of the value for the source. - SourceType IntentionSourceType -} - -// Intentions returns the list of intentions. -func (h *Connect) Intentions(q *QueryOptions) ([]*Intention, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/connect/intentions") - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*Intention - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// IntentionGet retrieves a single intention. -func (h *Connect) IntentionGet(id string, q *QueryOptions) (*Intention, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/connect/intentions/"+id) - r.setQueryOptions(q) - rtt, resp, err := h.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if resp.StatusCode == 404 { - return nil, qm, nil - } else if resp.StatusCode != 200 { - var buf bytes.Buffer - io.Copy(&buf, resp.Body) - return nil, nil, fmt.Errorf( - "Unexpected response %d: %s", resp.StatusCode, buf.String()) - } - - var out Intention - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, qm, nil -} - -// IntentionDelete deletes a single intention. -func (h *Connect) IntentionDelete(id string, q *WriteOptions) (*WriteMeta, error) { - r := h.c.newRequest("DELETE", "/v1/connect/intentions/"+id) - r.setWriteOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - qm := &WriteMeta{} - qm.RequestTime = rtt - - return qm, nil -} - -// IntentionMatch returns the list of intentions that match a given source -// or destination. The returned intentions are ordered by precedence where -// result[0] is the highest precedence (if that matches, then that rule overrides -// all other rules). -// -// Matching can be done for multiple names at the same time. The resulting -// map is keyed by the given names. Casing is preserved. -func (h *Connect) IntentionMatch(args *IntentionMatch, q *QueryOptions) (map[string][]*Intention, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/connect/intentions/match") - r.setQueryOptions(q) - r.params.Set("by", string(args.By)) - for _, name := range args.Names { - r.params.Add("name", name) - } - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out map[string][]*Intention - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// IntentionCheck returns whether a given source/destination would be allowed -// or not given the current set of intentions and the configuration of Consul. -func (h *Connect) IntentionCheck(args *IntentionCheck, q *QueryOptions) (bool, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/connect/intentions/check") - r.setQueryOptions(q) - r.params.Set("source", args.Source) - r.params.Set("destination", args.Destination) - if args.SourceType != "" { - r.params.Set("source-type", string(args.SourceType)) - } - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return false, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out struct{ Allowed bool } - if err := decodeBody(resp, &out); err != nil { - return false, nil, err - } - return out.Allowed, qm, nil -} - -// IntentionCreate will create a new intention. The ID in the given -// structure must be empty and a generate ID will be returned on -// success. -func (c *Connect) IntentionCreate(ixn *Intention, q *WriteOptions) (string, *WriteMeta, error) { - r := c.c.newRequest("POST", "/v1/connect/intentions") - r.setWriteOptions(q) - r.obj = ixn - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// IntentionUpdate will update an existing intention. The ID in the given -// structure must be non-empty. -func (c *Connect) IntentionUpdate(ixn *Intention, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/connect/intentions/"+ixn.ID) - r.setWriteOptions(q) - r.obj = ixn - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - return wm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go deleted file mode 100644 index 53318f1..0000000 --- a/vendor/github.com/hashicorp/consul/api/coordinate.go +++ /dev/null @@ -1,106 +0,0 @@ -package api - -import ( - "github.com/hashicorp/serf/coordinate" -) - -// CoordinateEntry represents a node and its associated network coordinate. -type CoordinateEntry struct { - Node string - Segment string - Coord *coordinate.Coordinate -} - -// CoordinateDatacenterMap has the coordinates for servers in a given datacenter -// and area. Network coordinates are only compatible within the same area. -type CoordinateDatacenterMap struct { - Datacenter string - AreaID string - Coordinates []CoordinateEntry -} - -// Coordinate can be used to query the coordinate endpoints -type Coordinate struct { - c *Client -} - -// Coordinate returns a handle to the coordinate endpoints -func (c *Client) Coordinate() *Coordinate { - return &Coordinate{c} -} - -// Datacenters is used to return the coordinates of all the servers in the WAN -// pool. -func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) { - r := c.c.newRequest("GET", "/v1/coordinate/datacenters") - _, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []*CoordinateDatacenterMap - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Nodes is used to return the coordinates of all the nodes in the LAN pool. -func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/coordinate/nodes") - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*CoordinateEntry - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Update inserts or updates the LAN coordinate of a node. -func (c *Coordinate) Update(coord *CoordinateEntry, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/coordinate/update") - r.setWriteOptions(q) - r.obj = coord - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - return wm, nil -} - -// Node is used to return the coordinates of a single in the LAN pool. -func (c *Coordinate) Node(node string, q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/coordinate/node/"+node) - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*CoordinateEntry - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go deleted file mode 100644 index 85b5b06..0000000 --- a/vendor/github.com/hashicorp/consul/api/event.go +++ /dev/null @@ -1,104 +0,0 @@ -package api - -import ( - "bytes" - "strconv" -) - -// Event can be used to query the Event endpoints -type Event struct { - c *Client -} - -// UserEvent represents an event that was fired by the user -type UserEvent struct { - ID string - Name string - Payload []byte - NodeFilter string - ServiceFilter string - TagFilter string - Version int - LTime uint64 -} - -// Event returns a handle to the event endpoints -func (c *Client) Event() *Event { - return &Event{c} -} - -// Fire is used to fire a new user event. Only the Name, Payload and Filters -// are respected. This returns the ID or an associated error. Cross DC requests -// are supported. -func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { - r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) - r.setWriteOptions(q) - if params.NodeFilter != "" { - r.params.Set("node", params.NodeFilter) - } - if params.ServiceFilter != "" { - r.params.Set("service", params.ServiceFilter) - } - if params.TagFilter != "" { - r.params.Set("tag", params.TagFilter) - } - if params.Payload != nil { - r.body = bytes.NewReader(params.Payload) - } - - rtt, resp, err := requireOK(e.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out UserEvent - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// List is used to get the most recent events an agent has received. -// This list can be optionally filtered by the name. This endpoint supports -// quasi-blocking queries. The index is not monotonic, nor does it provide provide -// LastContact or KnownLeader. -func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { - r := e.c.newRequest("GET", "/v1/event/list") - r.setQueryOptions(q) - if name != "" { - r.params.Set("name", name) - } - rtt, resp, err := requireOK(e.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*UserEvent - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// IDToIndex is a bit of a hack. This simulates the index generation to -// convert an event ID into a WaitIndex. -func (e *Event) IDToIndex(uuid string) uint64 { - lower := uuid[0:8] + uuid[9:13] + uuid[14:18] - upper := uuid[19:23] + uuid[24:36] - lowVal, err := strconv.ParseUint(lower, 16, 64) - if err != nil { - panic("Failed to convert " + lower) - } - highVal, err := strconv.ParseUint(upper, 16, 64) - if err != nil { - panic("Failed to convert " + upper) - } - return lowVal ^ highVal -} diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go deleted file mode 100644 index 1835da5..0000000 --- a/vendor/github.com/hashicorp/consul/api/health.go +++ /dev/null @@ -1,232 +0,0 @@ -package api - -import ( - "fmt" - "strings" -) - -const ( - // HealthAny is special, and is used as a wild card, - // not as a specific state. - HealthAny = "any" - HealthPassing = "passing" - HealthWarning = "warning" - HealthCritical = "critical" - HealthMaint = "maintenance" -) - -const ( - // NodeMaint is the special key set by a node in maintenance mode. - NodeMaint = "_node_maintenance" - - // ServiceMaintPrefix is the prefix for a service in maintenance mode. - ServiceMaintPrefix = "_service_maintenance:" -) - -// HealthCheck is used to represent a single check -type HealthCheck struct { - Node string - CheckID string - Name string - Status string - Notes string - Output string - ServiceID string - ServiceName string - ServiceTags []string - - Definition HealthCheckDefinition -} - -// HealthCheckDefinition is used to store the details about -// a health check's execution. -type HealthCheckDefinition struct { - HTTP string - Header map[string][]string - Method string - TLSSkipVerify bool - TCP string - Interval ReadableDuration - Timeout ReadableDuration - DeregisterCriticalServiceAfter ReadableDuration -} - -// HealthChecks is a collection of HealthCheck structs. -type HealthChecks []*HealthCheck - -// AggregatedStatus returns the "best" status for the list of health checks. -// Because a given entry may have many service and node-level health checks -// attached, this function determines the best representative of the status as -// as single string using the following heuristic: -// -// maintenance > critical > warning > passing -// -func (c HealthChecks) AggregatedStatus() string { - var passing, warning, critical, maintenance bool - for _, check := range c { - id := string(check.CheckID) - if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) { - maintenance = true - continue - } - - switch check.Status { - case HealthPassing: - passing = true - case HealthWarning: - warning = true - case HealthCritical: - critical = true - default: - return "" - } - } - - switch { - case maintenance: - return HealthMaint - case critical: - return HealthCritical - case warning: - return HealthWarning - case passing: - return HealthPassing - default: - return HealthPassing - } -} - -// ServiceEntry is used for the health service endpoint -type ServiceEntry struct { - Node *Node - Service *AgentService - Checks HealthChecks -} - -// Health can be used to query the Health endpoints -type Health struct { - c *Client -} - -// Health returns a handle to the health endpoints -func (c *Client) Health() *Health { - return &Health{c} -} - -// Node is used to query for checks belonging to a given node -func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/node/"+node) - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out HealthChecks - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Checks is used to return the checks associated with a service -func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/checks/"+service) - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out HealthChecks - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Service is used to query health information along with service info -// for a given service. It can optionally do server-side filtering on a tag -// or nodes with passing health checks only. -func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { - return h.service(service, tag, passingOnly, q, false) -} - -// Connect is equivalent to Service except that it will only return services -// which are Connect-enabled and will returns the connection address for Connect -// client's to use which may be a proxy in front of the named service. If -// passingOnly is true only instances where both the service and any proxy are -// healthy will be returned. -func (h *Health) Connect(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { - return h.service(service, tag, passingOnly, q, true) -} - -func (h *Health) service(service, tag string, passingOnly bool, q *QueryOptions, connect bool) ([]*ServiceEntry, *QueryMeta, error) { - path := "/v1/health/service/" + service - if connect { - path = "/v1/health/connect/" + service - } - r := h.c.newRequest("GET", path) - r.setQueryOptions(q) - if tag != "" { - r.params.Set("tag", tag) - } - if passingOnly { - r.params.Set(HealthPassing, "1") - } - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*ServiceEntry - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// State is used to retrieve all the checks in a given state. -// The wildcard "any" state can also be used for all checks. -func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { - switch state { - case HealthAny: - case HealthWarning: - case HealthCritical: - case HealthPassing: - default: - return nil, nil, fmt.Errorf("Unsupported state: %v", state) - } - r := h.c.newRequest("GET", "/v1/health/state/"+state) - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out HealthChecks - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go deleted file mode 100644 index 97f5156..0000000 --- a/vendor/github.com/hashicorp/consul/api/kv.go +++ /dev/null @@ -1,420 +0,0 @@ -package api - -import ( - "bytes" - "fmt" - "io" - "net/http" - "strconv" - "strings" -) - -// KVPair is used to represent a single K/V entry -type KVPair struct { - // Key is the name of the key. It is also part of the URL path when accessed - // via the API. - Key string - - // CreateIndex holds the index corresponding the creation of this KVPair. This - // is a read-only field. - CreateIndex uint64 - - // ModifyIndex is used for the Check-And-Set operations and can also be fed - // back into the WaitIndex of the QueryOptions in order to perform blocking - // queries. - ModifyIndex uint64 - - // LockIndex holds the index corresponding to a lock on this key, if any. This - // is a read-only field. - LockIndex uint64 - - // Flags are any user-defined flags on the key. It is up to the implementer - // to check these values, since Consul does not treat them specially. - Flags uint64 - - // Value is the value for the key. This can be any value, but it will be - // base64 encoded upon transport. - Value []byte - - // Session is a string representing the ID of the session. Any other - // interactions with this key over the same session must specify the same - // session ID. - Session string -} - -// KVPairs is a list of KVPair objects -type KVPairs []*KVPair - -// KVOp constants give possible operations available in a KVTxn. -type KVOp string - -const ( - KVSet KVOp = "set" - KVDelete KVOp = "delete" - KVDeleteCAS KVOp = "delete-cas" - KVDeleteTree KVOp = "delete-tree" - KVCAS KVOp = "cas" - KVLock KVOp = "lock" - KVUnlock KVOp = "unlock" - KVGet KVOp = "get" - KVGetTree KVOp = "get-tree" - KVCheckSession KVOp = "check-session" - KVCheckIndex KVOp = "check-index" - KVCheckNotExists KVOp = "check-not-exists" -) - -// KVTxnOp defines a single operation inside a transaction. -type KVTxnOp struct { - Verb KVOp - Key string - Value []byte - Flags uint64 - Index uint64 - Session string -} - -// KVTxnOps defines a set of operations to be performed inside a single -// transaction. -type KVTxnOps []*KVTxnOp - -// KVTxnResponse has the outcome of a transaction. -type KVTxnResponse struct { - Results []*KVPair - Errors TxnErrors -} - -// KV is used to manipulate the K/V API -type KV struct { - c *Client -} - -// KV is used to return a handle to the K/V apis -func (c *Client) KV() *KV { - return &KV{c} -} - -// Get is used to lookup a single key. The returned pointer -// to the KVPair will be nil if the key does not exist. -func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { - resp, qm, err := k.getInternal(key, nil, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer resp.Body.Close() - - var entries []*KVPair - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List is used to lookup all keys under a prefix -func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { - resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer resp.Body.Close() - - var entries []*KVPair - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// Keys is used to list all the keys under a prefix. Optionally, -// a separator can be used to limit the responses. -func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { - params := map[string]string{"keys": ""} - if separator != "" { - params["separator"] = separator - } - resp, qm, err := k.getInternal(prefix, params, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer resp.Body.Close() - - var entries []string - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { - r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/")) - r.setQueryOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - rtt, resp, err := k.c.doRequest(r) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if resp.StatusCode == 404 { - resp.Body.Close() - return nil, qm, nil - } else if resp.StatusCode != 200 { - resp.Body.Close() - return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) - } - return resp, qm, nil -} - -// Put is used to write a new value. Only the -// Key, Flags and Value is respected. -func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { - params := make(map[string]string, 1) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - _, wm, err := k.put(p.Key, params, p.Value, q) - return wm, err -} - -// CAS is used for a Check-And-Set operation. The Key, -// ModifyIndex, Flags and Value are respected. Returns true -// on success or false on failures. -func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) - return k.put(p.Key, params, p.Value, q) -} - -// Acquire is used for a lock acquisition operation. The Key, -// Flags, Value and Session are respected. Returns true -// on success or false on failures. -func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["acquire"] = p.Session - return k.put(p.Key, params, p.Value, q) -} - -// Release is used for a lock release operation. The Key, -// Flags, Value and Session are respected. Returns true -// on success or false on failures. -func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["release"] = p.Session - return k.put(p.Key, params, p.Value, q) -} - -func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { - if len(key) > 0 && key[0] == '/' { - return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key) - } - - r := k.c.newRequest("PUT", "/v1/kv/"+key) - r.setWriteOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - r.body = bytes.NewReader(body) - rtt, resp, err := requireOK(k.c.doRequest(r)) - if err != nil { - return false, nil, err - } - defer resp.Body.Close() - - qm := &WriteMeta{} - qm.RequestTime = rtt - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(buf.String(), "true") - return res, qm, nil -} - -// Delete is used to delete a single key -func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { - _, qm, err := k.deleteInternal(key, nil, w) - return qm, err -} - -// DeleteCAS is used for a Delete Check-And-Set operation. The Key -// and ModifyIndex are respected. Returns true on success or false on failures. -func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := map[string]string{ - "cas": strconv.FormatUint(p.ModifyIndex, 10), - } - return k.deleteInternal(p.Key, params, q) -} - -// DeleteTree is used to delete all keys under a prefix -func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { - _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w) - return qm, err -} - -func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { - r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/")) - r.setWriteOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - rtt, resp, err := requireOK(k.c.doRequest(r)) - if err != nil { - return false, nil, err - } - defer resp.Body.Close() - - qm := &WriteMeta{} - qm.RequestTime = rtt - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(buf.String(), "true") - return res, qm, nil -} - -// TxnOp is the internal format we send to Consul. It's not specific to KV, -// though currently only KV operations are supported. -type TxnOp struct { - KV *KVTxnOp -} - -// TxnOps is a list of transaction operations. -type TxnOps []*TxnOp - -// TxnResult is the internal format we receive from Consul. -type TxnResult struct { - KV *KVPair -} - -// TxnResults is a list of TxnResult objects. -type TxnResults []*TxnResult - -// TxnError is used to return information about an operation in a transaction. -type TxnError struct { - OpIndex int - What string -} - -// TxnErrors is a list of TxnError objects. -type TxnErrors []*TxnError - -// TxnResponse is the internal format we receive from Consul. -type TxnResponse struct { - Results TxnResults - Errors TxnErrors -} - -// Txn is used to apply multiple KV operations in a single, atomic transaction. -// -// Note that Go will perform the required base64 encoding on the values -// automatically because the type is a byte slice. Transactions are defined as a -// list of operations to perform, using the KVOp constants and KVTxnOp structure -// to define operations. If any operation fails, none of the changes are applied -// to the state store. Note that this hides the internal raw transaction interface -// and munges the input and output types into KV-specific ones for ease of use. -// If there are more non-KV operations in the future we may break out a new -// transaction API client, but it will be easy to keep this KV-specific variant -// supported. -// -// Even though this is generally a write operation, we take a QueryOptions input -// and return a QueryMeta output. If the transaction contains only read ops, then -// Consul will fast-path it to a different endpoint internally which supports -// consistency controls, but not blocking. If there are write operations then -// the request will always be routed through raft and any consistency settings -// will be ignored. -// -// Here's an example: -// -// ops := KVTxnOps{ -// &KVTxnOp{ -// Verb: KVLock, -// Key: "test/lock", -// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e", -// Value: []byte("hello"), -// }, -// &KVTxnOp{ -// Verb: KVGet, -// Key: "another/key", -// }, -// } -// ok, response, _, err := kv.Txn(&ops, nil) -// -// If there is a problem making the transaction request then an error will be -// returned. Otherwise, the ok value will be true if the transaction succeeded -// or false if it was rolled back. The response is a structured return value which -// will have the outcome of the transaction. Its Results member will have entries -// for each operation. Deleted keys will have a nil entry in the, and to save -// space, the Value of each key in the Results will be nil unless the operation -// is a KVGet. If the transaction was rolled back, the Errors member will have -// entries referencing the index of the operation that failed along with an error -// message. -func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) { - r := k.c.newRequest("PUT", "/v1/txn") - r.setQueryOptions(q) - - // Convert into the internal format since this is an all-KV txn. - ops := make(TxnOps, 0, len(txn)) - for _, kvOp := range txn { - ops = append(ops, &TxnOp{KV: kvOp}) - } - r.obj = ops - rtt, resp, err := k.c.doRequest(r) - if err != nil { - return false, nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict { - var txnResp TxnResponse - if err := decodeBody(resp, &txnResp); err != nil { - return false, nil, nil, err - } - - // Convert from the internal format. - kvResp := KVTxnResponse{ - Errors: txnResp.Errors, - } - for _, result := range txnResp.Results { - kvResp.Results = append(kvResp.Results, result.KV) - } - return resp.StatusCode == http.StatusOK, &kvResp, qm, nil - } - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, nil, fmt.Errorf("Failed to read response: %v", err) - } - return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String()) -} diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go deleted file mode 100644 index 82339cb..0000000 --- a/vendor/github.com/hashicorp/consul/api/lock.go +++ /dev/null @@ -1,386 +0,0 @@ -package api - -import ( - "fmt" - "sync" - "time" -) - -const ( - // DefaultLockSessionName is the Session Name we assign if none is provided - DefaultLockSessionName = "Consul API Lock" - - // DefaultLockSessionTTL is the default session TTL if no Session is provided - // when creating a new Lock. This is used because we do not have another - // other check to depend upon. - DefaultLockSessionTTL = "15s" - - // DefaultLockWaitTime is how long we block for at a time to check if lock - // acquisition is possible. This affects the minimum time it takes to cancel - // a Lock acquisition. - DefaultLockWaitTime = 15 * time.Second - - // DefaultLockRetryTime is how long we wait after a failed lock acquisition - // before attempting to do the lock again. This is so that once a lock-delay - // is in effect, we do not hot loop retrying the acquisition. - DefaultLockRetryTime = 5 * time.Second - - // DefaultMonitorRetryTime is how long we wait after a failed monitor check - // of a lock (500 response code). This allows the monitor to ride out brief - // periods of unavailability, subject to the MonitorRetries setting in the - // lock options which is by default set to 0, disabling this feature. This - // affects locks and semaphores. - DefaultMonitorRetryTime = 2 * time.Second - - // LockFlagValue is a magic flag we set to indicate a key - // is being used for a lock. It is used to detect a potential - // conflict with a semaphore. - LockFlagValue = 0x2ddccbc058a50c18 -) - -var ( - // ErrLockHeld is returned if we attempt to double lock - ErrLockHeld = fmt.Errorf("Lock already held") - - // ErrLockNotHeld is returned if we attempt to unlock a lock - // that we do not hold. - ErrLockNotHeld = fmt.Errorf("Lock not held") - - // ErrLockInUse is returned if we attempt to destroy a lock - // that is in use. - ErrLockInUse = fmt.Errorf("Lock in use") - - // ErrLockConflict is returned if the flags on a key - // used for a lock do not match expectation - ErrLockConflict = fmt.Errorf("Existing key does not match lock use") -) - -// Lock is used to implement client-side leader election. It is follows the -// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html. -type Lock struct { - c *Client - opts *LockOptions - - isHeld bool - sessionRenew chan struct{} - lockSession string - l sync.Mutex -} - -// LockOptions is used to parameterize the Lock behavior. -type LockOptions struct { - Key string // Must be set and have write permissions - Value []byte // Optional, value to associate with the lock - Session string // Optional, created if not specified - SessionOpts *SessionEntry // Optional, options to use when creating a session - SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given) - SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given) - MonitorRetries int // Optional, defaults to 0 which means no retries - MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime - LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime - LockTryOnce bool // Optional, defaults to false which means try forever -} - -// LockKey returns a handle to a lock struct which can be used -// to acquire and release the mutex. The key used must have -// write permissions. -func (c *Client) LockKey(key string) (*Lock, error) { - opts := &LockOptions{ - Key: key, - } - return c.LockOpts(opts) -} - -// LockOpts returns a handle to a lock struct which can be used -// to acquire and release the mutex. The key used must have -// write permissions. -func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) { - if opts.Key == "" { - return nil, fmt.Errorf("missing key") - } - if opts.SessionName == "" { - opts.SessionName = DefaultLockSessionName - } - if opts.SessionTTL == "" { - opts.SessionTTL = DefaultLockSessionTTL - } else { - if _, err := time.ParseDuration(opts.SessionTTL); err != nil { - return nil, fmt.Errorf("invalid SessionTTL: %v", err) - } - } - if opts.MonitorRetryTime == 0 { - opts.MonitorRetryTime = DefaultMonitorRetryTime - } - if opts.LockWaitTime == 0 { - opts.LockWaitTime = DefaultLockWaitTime - } - l := &Lock{ - c: c, - opts: opts, - } - return l, nil -} - -// Lock attempts to acquire the lock and blocks while doing so. -// Providing a non-nil stopCh can be used to abort the lock attempt. -// Returns a channel that is closed if our lock is lost or an error. -// This channel could be closed at any time due to session invalidation, -// communication errors, operator intervention, etc. It is NOT safe to -// assume that the lock is held until Unlock() unless the Session is specifically -// created without any associated health checks. By default Consul sessions -// prefer liveness over safety and an application must be able to handle -// the lock being lost. -func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { - // Hold the lock as we try to acquire - l.l.Lock() - defer l.l.Unlock() - - // Check if we already hold the lock - if l.isHeld { - return nil, ErrLockHeld - } - - // Check if we need to create a session first - l.lockSession = l.opts.Session - if l.lockSession == "" { - s, err := l.createSession() - if err != nil { - return nil, fmt.Errorf("failed to create session: %v", err) - } - - l.sessionRenew = make(chan struct{}) - l.lockSession = s - session := l.c.Session() - go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew) - - // If we fail to acquire the lock, cleanup the session - defer func() { - if !l.isHeld { - close(l.sessionRenew) - l.sessionRenew = nil - } - }() - } - - // Setup the query options - kv := l.c.KV() - qOpts := &QueryOptions{ - WaitTime: l.opts.LockWaitTime, - } - - start := time.Now() - attempts := 0 -WAIT: - // Check if we should quit - select { - case <-stopCh: - return nil, nil - default: - } - - // Handle the one-shot mode. - if l.opts.LockTryOnce && attempts > 0 { - elapsed := time.Since(start) - if elapsed > l.opts.LockWaitTime { - return nil, nil - } - - // Query wait time should not exceed the lock wait time - qOpts.WaitTime = l.opts.LockWaitTime - elapsed - } - attempts++ - - // Look for an existing lock, blocking until not taken - pair, meta, err := kv.Get(l.opts.Key, qOpts) - if err != nil { - return nil, fmt.Errorf("failed to read lock: %v", err) - } - if pair != nil && pair.Flags != LockFlagValue { - return nil, ErrLockConflict - } - locked := false - if pair != nil && pair.Session == l.lockSession { - goto HELD - } - if pair != nil && pair.Session != "" { - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } - - // Try to acquire the lock - pair = l.lockEntry(l.lockSession) - locked, _, err = kv.Acquire(pair, nil) - if err != nil { - return nil, fmt.Errorf("failed to acquire lock: %v", err) - } - - // Handle the case of not getting the lock - if !locked { - // Determine why the lock failed - qOpts.WaitIndex = 0 - pair, meta, err = kv.Get(l.opts.Key, qOpts) - if pair != nil && pair.Session != "" { - //If the session is not null, this means that a wait can safely happen - //using a long poll - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } else { - // If the session is empty and the lock failed to acquire, then it means - // a lock-delay is in effect and a timed wait must be used - select { - case <-time.After(DefaultLockRetryTime): - goto WAIT - case <-stopCh: - return nil, nil - } - } - } - -HELD: - // Watch to ensure we maintain leadership - leaderCh := make(chan struct{}) - go l.monitorLock(l.lockSession, leaderCh) - - // Set that we own the lock - l.isHeld = true - - // Locked! All done - return leaderCh, nil -} - -// Unlock released the lock. It is an error to call this -// if the lock is not currently held. -func (l *Lock) Unlock() error { - // Hold the lock as we try to release - l.l.Lock() - defer l.l.Unlock() - - // Ensure the lock is actually held - if !l.isHeld { - return ErrLockNotHeld - } - - // Set that we no longer own the lock - l.isHeld = false - - // Stop the session renew - if l.sessionRenew != nil { - defer func() { - close(l.sessionRenew) - l.sessionRenew = nil - }() - } - - // Get the lock entry, and clear the lock session - lockEnt := l.lockEntry(l.lockSession) - l.lockSession = "" - - // Release the lock explicitly - kv := l.c.KV() - _, _, err := kv.Release(lockEnt, nil) - if err != nil { - return fmt.Errorf("failed to release lock: %v", err) - } - return nil -} - -// Destroy is used to cleanup the lock entry. It is not necessary -// to invoke. It will fail if the lock is in use. -func (l *Lock) Destroy() error { - // Hold the lock as we try to release - l.l.Lock() - defer l.l.Unlock() - - // Check if we already hold the lock - if l.isHeld { - return ErrLockHeld - } - - // Look for an existing lock - kv := l.c.KV() - pair, _, err := kv.Get(l.opts.Key, nil) - if err != nil { - return fmt.Errorf("failed to read lock: %v", err) - } - - // Nothing to do if the lock does not exist - if pair == nil { - return nil - } - - // Check for possible flag conflict - if pair.Flags != LockFlagValue { - return ErrLockConflict - } - - // Check if it is in use - if pair.Session != "" { - return ErrLockInUse - } - - // Attempt the delete - didRemove, _, err := kv.DeleteCAS(pair, nil) - if err != nil { - return fmt.Errorf("failed to remove lock: %v", err) - } - if !didRemove { - return ErrLockInUse - } - return nil -} - -// createSession is used to create a new managed session -func (l *Lock) createSession() (string, error) { - session := l.c.Session() - se := l.opts.SessionOpts - if se == nil { - se = &SessionEntry{ - Name: l.opts.SessionName, - TTL: l.opts.SessionTTL, - } - } - id, _, err := session.Create(se, nil) - if err != nil { - return "", err - } - return id, nil -} - -// lockEntry returns a formatted KVPair for the lock -func (l *Lock) lockEntry(session string) *KVPair { - return &KVPair{ - Key: l.opts.Key, - Value: l.opts.Value, - Session: session, - Flags: LockFlagValue, - } -} - -// monitorLock is a long running routine to monitor a lock ownership -// It closes the stopCh if we lose our leadership. -func (l *Lock) monitorLock(session string, stopCh chan struct{}) { - defer close(stopCh) - kv := l.c.KV() - opts := &QueryOptions{RequireConsistent: true} -WAIT: - retries := l.opts.MonitorRetries -RETRY: - pair, meta, err := kv.Get(l.opts.Key, opts) - if err != nil { - // If configured we can try to ride out a brief Consul unavailability - // by doing retries. Note that we have to attempt the retry in a non- - // blocking fashion so that we have a clean place to reset the retry - // counter if service is restored. - if retries > 0 && IsRetryableError(err) { - time.Sleep(l.opts.MonitorRetryTime) - retries-- - opts.WaitIndex = 0 - goto RETRY - } - return - } - if pair != nil && pair.Session == session { - opts.WaitIndex = meta.LastIndex - goto WAIT - } -} diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go deleted file mode 100644 index 079e224..0000000 --- a/vendor/github.com/hashicorp/consul/api/operator.go +++ /dev/null @@ -1,11 +0,0 @@ -package api - -// Operator can be used to perform low-level operator tasks for Consul. -type Operator struct { - c *Client -} - -// Operator returns a handle to the operator endpoints. -func (c *Client) Operator() *Operator { - return &Operator{c} -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go deleted file mode 100644 index a630b69..0000000 --- a/vendor/github.com/hashicorp/consul/api/operator_area.go +++ /dev/null @@ -1,193 +0,0 @@ -// The /v1/operator/area endpoints are available only in Consul Enterprise and -// interact with its network area subsystem. Network areas are used to link -// together Consul servers in different Consul datacenters. With network areas, -// Consul datacenters can be linked together in ways other than a fully-connected -// mesh, as is required for Consul's WAN. -package api - -import ( - "net" - "time" -) - -// Area defines a network area. -type Area struct { - // ID is this identifier for an area (a UUID). This must be left empty - // when creating a new area. - ID string - - // PeerDatacenter is the peer Consul datacenter that will make up the - // other side of this network area. Network areas always involve a pair - // of datacenters: the datacenter where the area was created, and the - // peer datacenter. This is required. - PeerDatacenter string - - // RetryJoin specifies the address of Consul servers to join to, such as - // an IPs or hostnames with an optional port number. This is optional. - RetryJoin []string - - // UseTLS specifies whether gossip over this area should be encrypted with TLS - // if possible. - UseTLS bool -} - -// AreaJoinResponse is returned when a join occurs and gives the result for each -// address. -type AreaJoinResponse struct { - // The address that was joined. - Address string - - // Whether or not the join was a success. - Joined bool - - // If we couldn't join, this is the message with information. - Error string -} - -// SerfMember is a generic structure for reporting information about members in -// a Serf cluster. This is only used by the area endpoints right now, but this -// could be expanded to other endpoints in the future. -type SerfMember struct { - // ID is the node identifier (a UUID). - ID string - - // Name is the node name. - Name string - - // Addr has the IP address. - Addr net.IP - - // Port is the RPC port. - Port uint16 - - // Datacenter is the DC name. - Datacenter string - - // Role is "client", "server", or "unknown". - Role string - - // Build has the version of the Consul agent. - Build string - - // Protocol is the protocol of the Consul agent. - Protocol int - - // Status is the Serf health status "none", "alive", "leaving", "left", - // or "failed". - Status string - - // RTT is the estimated round trip time from the server handling the - // request to the this member. This will be negative if no RTT estimate - // is available. - RTT time.Duration -} - -// AreaCreate will create a new network area. The ID in the given structure must -// be empty and a generated ID will be returned on success. -func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) { - r := op.c.newRequest("POST", "/v1/operator/area") - r.setWriteOptions(q) - r.obj = area - rtt, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// AreaUpdate will update the configuration of the network area with the given ID. -func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (string, *WriteMeta, error) { - r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID) - r.setWriteOptions(q) - r.obj = area - rtt, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// AreaGet returns a single network area. -func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) { - var out []*Area - qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// AreaList returns all the available network areas. -func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) { - var out []*Area - qm, err := op.c.query("/v1/operator/area", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// AreaDelete deletes the given network area. -func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) { - r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID) - r.setWriteOptions(q) - rtt, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - return wm, nil -} - -// AreaJoin attempts to join the given set of join addresses to the given -// network area. See the Area structure for details about join addresses. -func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) { - r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join") - r.setWriteOptions(q) - r.obj = addresses - rtt, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out []*AreaJoinResponse - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, wm, nil -} - -// AreaMembers lists the Serf information about the members in the given area. -func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) { - var out []*SerfMember - qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go deleted file mode 100644 index b179406..0000000 --- a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go +++ /dev/null @@ -1,219 +0,0 @@ -package api - -import ( - "bytes" - "fmt" - "io" - "strconv" - "strings" - "time" -) - -// AutopilotConfiguration is used for querying/setting the Autopilot configuration. -// Autopilot helps manage operator tasks related to Consul servers like removing -// failed servers from the Raft quorum. -type AutopilotConfiguration struct { - // CleanupDeadServers controls whether to remove dead servers from the Raft - // peer list when a new server joins - CleanupDeadServers bool - - // LastContactThreshold is the limit on the amount of time a server can go - // without leader contact before being considered unhealthy. - LastContactThreshold *ReadableDuration - - // MaxTrailingLogs is the amount of entries in the Raft Log that a server can - // be behind before being considered unhealthy. - MaxTrailingLogs uint64 - - // ServerStabilizationTime is the minimum amount of time a server must be - // in a stable, healthy state before it can be added to the cluster. Only - // applicable with Raft protocol version 3 or higher. - ServerStabilizationTime *ReadableDuration - - // (Enterprise-only) RedundancyZoneTag is the node tag to use for separating - // servers into zones for redundancy. If left blank, this feature will be disabled. - RedundancyZoneTag string - - // (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration - // strategy of waiting until enough newer-versioned servers have been added to the - // cluster before promoting them to voters. - DisableUpgradeMigration bool - - // (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when - // performing upgrade migrations. If left blank, the Consul version will be used. - UpgradeVersionTag string - - // CreateIndex holds the index corresponding the creation of this configuration. - // This is a read-only field. - CreateIndex uint64 - - // ModifyIndex will be set to the index of the last update when retrieving the - // Autopilot configuration. Resubmitting a configuration with - // AutopilotCASConfiguration will perform a check-and-set operation which ensures - // there hasn't been a subsequent update since the configuration was retrieved. - ModifyIndex uint64 -} - -// ServerHealth is the health (from the leader's point of view) of a server. -type ServerHealth struct { - // ID is the raft ID of the server. - ID string - - // Name is the node name of the server. - Name string - - // Address is the address of the server. - Address string - - // The status of the SerfHealth check for the server. - SerfStatus string - - // Version is the Consul version of the server. - Version string - - // Leader is whether this server is currently the leader. - Leader bool - - // LastContact is the time since this node's last contact with the leader. - LastContact *ReadableDuration - - // LastTerm is the highest leader term this server has a record of in its Raft log. - LastTerm uint64 - - // LastIndex is the last log index this server has a record of in its Raft log. - LastIndex uint64 - - // Healthy is whether or not the server is healthy according to the current - // Autopilot config. - Healthy bool - - // Voter is whether this is a voting server. - Voter bool - - // StableSince is the last time this server's Healthy value changed. - StableSince time.Time -} - -// OperatorHealthReply is a representation of the overall health of the cluster -type OperatorHealthReply struct { - // Healthy is true if all the servers in the cluster are healthy. - Healthy bool - - // FailureTolerance is the number of healthy servers that could be lost without - // an outage occurring. - FailureTolerance int - - // Servers holds the health of each server. - Servers []ServerHealth -} - -// ReadableDuration is a duration type that is serialized to JSON in human readable format. -type ReadableDuration time.Duration - -func NewReadableDuration(dur time.Duration) *ReadableDuration { - d := ReadableDuration(dur) - return &d -} - -func (d *ReadableDuration) String() string { - return d.Duration().String() -} - -func (d *ReadableDuration) Duration() time.Duration { - if d == nil { - return time.Duration(0) - } - return time.Duration(*d) -} - -func (d *ReadableDuration) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil -} - -func (d *ReadableDuration) UnmarshalJSON(raw []byte) error { - if d == nil { - return fmt.Errorf("cannot unmarshal to nil pointer") - } - - str := string(raw) - if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' { - return fmt.Errorf("must be enclosed with quotes: %s", str) - } - dur, err := time.ParseDuration(str[1 : len(str)-1]) - if err != nil { - return err - } - *d = ReadableDuration(dur) - return nil -} - -// AutopilotGetConfiguration is used to query the current Autopilot configuration. -func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) { - r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration") - r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out AutopilotConfiguration - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - - return &out, nil -} - -// AutopilotSetConfiguration is used to set the current Autopilot configuration. -func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error { - r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") - r.setWriteOptions(q) - r.obj = conf - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// AutopilotCASConfiguration is used to perform a Check-And-Set update on the -// Autopilot configuration. The ModifyIndex value will be respected. Returns -// true on success or false on failures. -func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) { - r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") - r.setWriteOptions(q) - r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10)) - r.obj = conf - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return false, err - } - defer resp.Body.Close() - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(buf.String(), "true") - - return res, nil -} - -// AutopilotServerHealth -func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) { - r := op.c.newRequest("GET", "/v1/operator/autopilot/health") - r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out OperatorHealthReply - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return &out, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go deleted file mode 100644 index 6b61429..0000000 --- a/vendor/github.com/hashicorp/consul/api/operator_keyring.go +++ /dev/null @@ -1,86 +0,0 @@ -package api - -// keyringRequest is used for performing Keyring operations -type keyringRequest struct { - Key string -} - -// KeyringResponse is returned when listing the gossip encryption keys -type KeyringResponse struct { - // Whether this response is for a WAN ring - WAN bool - - // The datacenter name this request corresponds to - Datacenter string - - // Segment has the network segment this request corresponds to. - Segment string - - // A map of the encryption keys to the number of nodes they're installed on - Keys map[string]int - - // The total number of nodes in this ring - NumNodes int -} - -// KeyringInstall is used to install a new gossip encryption key into the cluster -func (op *Operator) KeyringInstall(key string, q *WriteOptions) error { - r := op.c.newRequest("POST", "/v1/operator/keyring") - r.setWriteOptions(q) - r.obj = keyringRequest{ - Key: key, - } - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// KeyringList is used to list the gossip keys installed in the cluster -func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) { - r := op.c.newRequest("GET", "/v1/operator/keyring") - r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []*KeyringResponse - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// KeyringRemove is used to remove a gossip encryption key from the cluster -func (op *Operator) KeyringRemove(key string, q *WriteOptions) error { - r := op.c.newRequest("DELETE", "/v1/operator/keyring") - r.setWriteOptions(q) - r.obj = keyringRequest{ - Key: key, - } - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// KeyringUse is used to change the active gossip encryption key -func (op *Operator) KeyringUse(key string, q *WriteOptions) error { - r := op.c.newRequest("PUT", "/v1/operator/keyring") - r.setWriteOptions(q) - r.obj = keyringRequest{ - Key: key, - } - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go deleted file mode 100644 index a9844df..0000000 --- a/vendor/github.com/hashicorp/consul/api/operator_raft.go +++ /dev/null @@ -1,89 +0,0 @@ -package api - -// RaftServer has information about a server in the Raft configuration. -type RaftServer struct { - // ID is the unique ID for the server. These are currently the same - // as the address, but they will be changed to a real GUID in a future - // release of Consul. - ID string - - // Node is the node name of the server, as known by Consul, or this - // will be set to "(unknown)" otherwise. - Node string - - // Address is the IP:port of the server, used for Raft communications. - Address string - - // Leader is true if this server is the current cluster leader. - Leader bool - - // Protocol version is the raft protocol version used by the server - ProtocolVersion string - - // Voter is true if this server has a vote in the cluster. This might - // be false if the server is staging and still coming online, or if - // it's a non-voting server, which will be added in a future release of - // Consul. - Voter bool -} - -// RaftConfiguration is returned when querying for the current Raft configuration. -type RaftConfiguration struct { - // Servers has the list of servers in the Raft configuration. - Servers []*RaftServer - - // Index has the Raft index of this configuration. - Index uint64 -} - -// RaftGetConfiguration is used to query the current Raft peer set. -func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { - r := op.c.newRequest("GET", "/v1/operator/raft/configuration") - r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out RaftConfiguration - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return &out, nil -} - -// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft -// quorum but no longer known to Serf or the catalog) by address in the form of -// "IP:port". -func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error { - r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") - r.setWriteOptions(q) - - r.params.Set("address", string(address)) - - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - - resp.Body.Close() - return nil -} - -// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft -// quorum but no longer known to Serf or the catalog) by ID. -func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error { - r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") - r.setWriteOptions(q) - - r.params.Set("id", string(id)) - - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return err - } - - resp.Body.Close() - return nil -} diff --git a/vendor/github.com/hashicorp/consul/api/operator_segment.go b/vendor/github.com/hashicorp/consul/api/operator_segment.go deleted file mode 100644 index 92b05d3..0000000 --- a/vendor/github.com/hashicorp/consul/api/operator_segment.go +++ /dev/null @@ -1,11 +0,0 @@ -package api - -// SegmentList returns all the available LAN segments. -func (op *Operator) SegmentList(q *QueryOptions) ([]string, *QueryMeta, error) { - var out []string - qm, err := op.c.query("/v1/operator/segment", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go deleted file mode 100644 index 8bb1004..0000000 --- a/vendor/github.com/hashicorp/consul/api/prepared_query.go +++ /dev/null @@ -1,212 +0,0 @@ -package api - -// QueryDatacenterOptions sets options about how we fail over if there are no -// healthy nodes in the local datacenter. -type QueryDatacenterOptions struct { - // NearestN is set to the number of remote datacenters to try, based on - // network coordinates. - NearestN int - - // Datacenters is a fixed list of datacenters to try after NearestN. We - // never try a datacenter multiple times, so those are subtracted from - // this list before proceeding. - Datacenters []string -} - -// QueryDNSOptions controls settings when query results are served over DNS. -type QueryDNSOptions struct { - // TTL is the time to live for the served DNS results. - TTL string -} - -// ServiceQuery is used to query for a set of healthy nodes offering a specific -// service. -type ServiceQuery struct { - // Service is the service to query. - Service string - - // Near allows baking in the name of a node to automatically distance- - // sort from. The magic "_agent" value is supported, which sorts near - // the agent which initiated the request by default. - Near string - - // Failover controls what we do if there are no healthy nodes in the - // local datacenter. - Failover QueryDatacenterOptions - - // IgnoreCheckIDs is an optional list of health check IDs to ignore when - // considering which nodes are healthy. It is useful as an emergency measure - // to temporarily override some health check that is producing false negatives - // for example. - IgnoreCheckIDs []string - - // If OnlyPassing is true then we will only include nodes with passing - // health checks (critical AND warning checks will cause a node to be - // discarded) - OnlyPassing bool - - // Tags are a set of required and/or disallowed tags. If a tag is in - // this list it must be present. If the tag is preceded with "!" then - // it is disallowed. - Tags []string - - // NodeMeta is a map of required node metadata fields. If a key/value - // pair is in this map it must be present on the node in order for the - // service entry to be returned. - NodeMeta map[string]string - - // Connect if true will filter the prepared query results to only - // include Connect-capable services. These include both native services - // and proxies for matching services. Note that if a proxy matches, - // the constraints in the query above (Near, OnlyPassing, etc.) apply - // to the _proxy_ and not the service being proxied. In practice, proxies - // should be directly next to their services so this isn't an issue. - Connect bool -} - -// QueryTemplate carries the arguments for creating a templated query. -type QueryTemplate struct { - // Type specifies the type of the query template. Currently only - // "name_prefix_match" is supported. This field is required. - Type string - - // Regexp allows specifying a regex pattern to match against the name - // of the query being executed. - Regexp string -} - -// PreparedQueryDefinition defines a complete prepared query. -type PreparedQueryDefinition struct { - // ID is this UUID-based ID for the query, always generated by Consul. - ID string - - // Name is an optional friendly name for the query supplied by the - // user. NOTE - if this feature is used then it will reduce the security - // of any read ACL associated with this query/service since this name - // can be used to locate nodes with supplying any ACL. - Name string - - // Session is an optional session to tie this query's lifetime to. If - // this is omitted then the query will not expire. - Session string - - // Token is the ACL token used when the query was created, and it is - // used when a query is subsequently executed. This token, or a token - // with management privileges, must be used to change the query later. - Token string - - // Service defines a service query (leaving things open for other types - // later). - Service ServiceQuery - - // DNS has options that control how the results of this query are - // served over DNS. - DNS QueryDNSOptions - - // Template is used to pass through the arguments for creating a - // prepared query with an attached template. If a template is given, - // interpolations are possible in other struct fields. - Template QueryTemplate -} - -// PreparedQueryExecuteResponse has the results of executing a query. -type PreparedQueryExecuteResponse struct { - // Service is the service that was queried. - Service string - - // Nodes has the nodes that were output by the query. - Nodes []ServiceEntry - - // DNS has the options for serving these results over DNS. - DNS QueryDNSOptions - - // Datacenter is the datacenter that these results came from. - Datacenter string - - // Failovers is a count of how many times we had to query a remote - // datacenter. - Failovers int -} - -// PreparedQuery can be used to query the prepared query endpoints. -type PreparedQuery struct { - c *Client -} - -// PreparedQuery returns a handle to the prepared query endpoints. -func (c *Client) PreparedQuery() *PreparedQuery { - return &PreparedQuery{c} -} - -// Create makes a new prepared query. The ID of the new query is returned. -func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) { - r := c.c.newRequest("POST", "/v1/query") - r.setWriteOptions(q) - r.obj = query - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Update makes updates to an existing prepared query. -func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) { - return c.c.write("/v1/query/"+query.ID, query, nil, q) -} - -// List is used to fetch all the prepared queries (always requires a management -// token). -func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { - var out []*PreparedQueryDefinition - qm, err := c.c.query("/v1/query", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Get is used to fetch a specific prepared query. -func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { - var out []*PreparedQueryDefinition - qm, err := c.c.query("/v1/query/"+queryID, &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Delete is used to delete a specific prepared query. -func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("DELETE", "/v1/query/"+queryID) - r.setWriteOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - return wm, nil -} - -// Execute is used to execute a specific prepared query. You can execute using -// a query ID or name. -func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) { - var out *PreparedQueryExecuteResponse - qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q) - if err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go deleted file mode 100644 index 745a208..0000000 --- a/vendor/github.com/hashicorp/consul/api/raw.go +++ /dev/null @@ -1,24 +0,0 @@ -package api - -// Raw can be used to do raw queries against custom endpoints -type Raw struct { - c *Client -} - -// Raw returns a handle to query endpoints -func (c *Client) Raw() *Raw { - return &Raw{c} -} - -// Query is used to do a GET request against an endpoint -// and deserialize the response into an interface using -// standard Consul conventions. -func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { - return raw.c.query(endpoint, out, q) -} - -// Write is used to do a PUT request against an endpoint -// and serialize/deserialized using the standard Consul conventions. -func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { - return raw.c.write(endpoint, in, out, q) -} diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go deleted file mode 100644 index bc4f885..0000000 --- a/vendor/github.com/hashicorp/consul/api/semaphore.go +++ /dev/null @@ -1,514 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "path" - "sync" - "time" -) - -const ( - // DefaultSemaphoreSessionName is the Session Name we assign if none is provided - DefaultSemaphoreSessionName = "Consul API Semaphore" - - // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided - // when creating a new Semaphore. This is used because we do not have another - // other check to depend upon. - DefaultSemaphoreSessionTTL = "15s" - - // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore - // acquisition is possible. This affects the minimum time it takes to cancel - // a Semaphore acquisition. - DefaultSemaphoreWaitTime = 15 * time.Second - - // DefaultSemaphoreKey is the key used within the prefix to - // use for coordination between all the contenders. - DefaultSemaphoreKey = ".lock" - - // SemaphoreFlagValue is a magic flag we set to indicate a key - // is being used for a semaphore. It is used to detect a potential - // conflict with a lock. - SemaphoreFlagValue = 0xe0f69a2baa414de0 -) - -var ( - // ErrSemaphoreHeld is returned if we attempt to double lock - ErrSemaphoreHeld = fmt.Errorf("Semaphore already held") - - // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore - // that we do not hold. - ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held") - - // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore - // that is in use. - ErrSemaphoreInUse = fmt.Errorf("Semaphore in use") - - // ErrSemaphoreConflict is returned if the flags on a key - // used for a semaphore do not match expectation - ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use") -) - -// Semaphore is used to implement a distributed semaphore -// using the Consul KV primitives. -type Semaphore struct { - c *Client - opts *SemaphoreOptions - - isHeld bool - sessionRenew chan struct{} - lockSession string - l sync.Mutex -} - -// SemaphoreOptions is used to parameterize the Semaphore -type SemaphoreOptions struct { - Prefix string // Must be set and have write permissions - Limit int // Must be set, and be positive - Value []byte // Optional, value to associate with the contender entry - Session string // Optional, created if not specified - SessionName string // Optional, defaults to DefaultLockSessionName - SessionTTL string // Optional, defaults to DefaultLockSessionTTL - MonitorRetries int // Optional, defaults to 0 which means no retries - MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime - SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime - SemaphoreTryOnce bool // Optional, defaults to false which means try forever -} - -// semaphoreLock is written under the DefaultSemaphoreKey and -// is used to coordinate between all the contenders. -type semaphoreLock struct { - // Limit is the integer limit of holders. This is used to - // verify that all the holders agree on the value. - Limit int - - // Holders is a list of all the semaphore holders. - // It maps the session ID to true. It is used as a set effectively. - Holders map[string]bool -} - -// SemaphorePrefix is used to created a Semaphore which will operate -// at the given KV prefix and uses the given limit for the semaphore. -// The prefix must have write privileges, and the limit must be agreed -// upon by all contenders. -func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) { - opts := &SemaphoreOptions{ - Prefix: prefix, - Limit: limit, - } - return c.SemaphoreOpts(opts) -} - -// SemaphoreOpts is used to create a Semaphore with the given options. -// The prefix must have write privileges, and the limit must be agreed -// upon by all contenders. If a Session is not provided, one will be created. -func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { - if opts.Prefix == "" { - return nil, fmt.Errorf("missing prefix") - } - if opts.Limit <= 0 { - return nil, fmt.Errorf("semaphore limit must be positive") - } - if opts.SessionName == "" { - opts.SessionName = DefaultSemaphoreSessionName - } - if opts.SessionTTL == "" { - opts.SessionTTL = DefaultSemaphoreSessionTTL - } else { - if _, err := time.ParseDuration(opts.SessionTTL); err != nil { - return nil, fmt.Errorf("invalid SessionTTL: %v", err) - } - } - if opts.MonitorRetryTime == 0 { - opts.MonitorRetryTime = DefaultMonitorRetryTime - } - if opts.SemaphoreWaitTime == 0 { - opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime - } - s := &Semaphore{ - c: c, - opts: opts, - } - return s, nil -} - -// Acquire attempts to reserve a slot in the semaphore, blocking until -// success, interrupted via the stopCh or an error is encountered. -// Providing a non-nil stopCh can be used to abort the attempt. -// On success, a channel is returned that represents our slot. -// This channel could be closed at any time due to session invalidation, -// communication errors, operator intervention, etc. It is NOT safe to -// assume that the slot is held until Release() unless the Session is specifically -// created without any associated health checks. By default Consul sessions -// prefer liveness over safety and an application must be able to handle -// the session being lost. -func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { - // Hold the lock as we try to acquire - s.l.Lock() - defer s.l.Unlock() - - // Check if we already hold the semaphore - if s.isHeld { - return nil, ErrSemaphoreHeld - } - - // Check if we need to create a session first - s.lockSession = s.opts.Session - if s.lockSession == "" { - sess, err := s.createSession() - if err != nil { - return nil, fmt.Errorf("failed to create session: %v", err) - } - - s.sessionRenew = make(chan struct{}) - s.lockSession = sess - session := s.c.Session() - go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) - - // If we fail to acquire the lock, cleanup the session - defer func() { - if !s.isHeld { - close(s.sessionRenew) - s.sessionRenew = nil - } - }() - } - - // Create the contender entry - kv := s.c.KV() - made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil) - if err != nil || !made { - return nil, fmt.Errorf("failed to make contender entry: %v", err) - } - - // Setup the query options - qOpts := &QueryOptions{ - WaitTime: s.opts.SemaphoreWaitTime, - } - - start := time.Now() - attempts := 0 -WAIT: - // Check if we should quit - select { - case <-stopCh: - return nil, nil - default: - } - - // Handle the one-shot mode. - if s.opts.SemaphoreTryOnce && attempts > 0 { - elapsed := time.Since(start) - if elapsed > s.opts.SemaphoreWaitTime { - return nil, nil - } - - // Query wait time should not exceed the semaphore wait time - qOpts.WaitTime = s.opts.SemaphoreWaitTime - elapsed - } - attempts++ - - // Read the prefix - pairs, meta, err := kv.List(s.opts.Prefix, qOpts) - if err != nil { - return nil, fmt.Errorf("failed to read prefix: %v", err) - } - - // Decode the lock - lockPair := s.findLock(pairs) - if lockPair.Flags != SemaphoreFlagValue { - return nil, ErrSemaphoreConflict - } - lock, err := s.decodeLock(lockPair) - if err != nil { - return nil, err - } - - // Verify we agree with the limit - if lock.Limit != s.opts.Limit { - return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)", - lock.Limit, s.opts.Limit) - } - - // Prune the dead holders - s.pruneDeadHolders(lock, pairs) - - // Check if the lock is held - if len(lock.Holders) >= lock.Limit { - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } - - // Create a new lock with us as a holder - lock.Holders[s.lockSession] = true - newLock, err := s.encodeLock(lock, lockPair.ModifyIndex) - if err != nil { - return nil, err - } - - // Attempt the acquisition - didSet, _, err := kv.CAS(newLock, nil) - if err != nil { - return nil, fmt.Errorf("failed to update lock: %v", err) - } - if !didSet { - // Update failed, could have been a race with another contender, - // retry the operation - goto WAIT - } - - // Watch to ensure we maintain ownership of the slot - lockCh := make(chan struct{}) - go s.monitorLock(s.lockSession, lockCh) - - // Set that we own the lock - s.isHeld = true - - // Acquired! All done - return lockCh, nil -} - -// Release is used to voluntarily give up our semaphore slot. It is -// an error to call this if the semaphore has not been acquired. -func (s *Semaphore) Release() error { - // Hold the lock as we try to release - s.l.Lock() - defer s.l.Unlock() - - // Ensure the lock is actually held - if !s.isHeld { - return ErrSemaphoreNotHeld - } - - // Set that we no longer own the lock - s.isHeld = false - - // Stop the session renew - if s.sessionRenew != nil { - defer func() { - close(s.sessionRenew) - s.sessionRenew = nil - }() - } - - // Get and clear the lock session - lockSession := s.lockSession - s.lockSession = "" - - // Remove ourselves as a lock holder - kv := s.c.KV() - key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) -READ: - pair, _, err := kv.Get(key, nil) - if err != nil { - return err - } - if pair == nil { - pair = &KVPair{} - } - lock, err := s.decodeLock(pair) - if err != nil { - return err - } - - // Create a new lock without us as a holder - if _, ok := lock.Holders[lockSession]; ok { - delete(lock.Holders, lockSession) - newLock, err := s.encodeLock(lock, pair.ModifyIndex) - if err != nil { - return err - } - - // Swap the locks - didSet, _, err := kv.CAS(newLock, nil) - if err != nil { - return fmt.Errorf("failed to update lock: %v", err) - } - if !didSet { - goto READ - } - } - - // Destroy the contender entry - contenderKey := path.Join(s.opts.Prefix, lockSession) - if _, err := kv.Delete(contenderKey, nil); err != nil { - return err - } - return nil -} - -// Destroy is used to cleanup the semaphore entry. It is not necessary -// to invoke. It will fail if the semaphore is in use. -func (s *Semaphore) Destroy() error { - // Hold the lock as we try to acquire - s.l.Lock() - defer s.l.Unlock() - - // Check if we already hold the semaphore - if s.isHeld { - return ErrSemaphoreHeld - } - - // List for the semaphore - kv := s.c.KV() - pairs, _, err := kv.List(s.opts.Prefix, nil) - if err != nil { - return fmt.Errorf("failed to read prefix: %v", err) - } - - // Find the lock pair, bail if it doesn't exist - lockPair := s.findLock(pairs) - if lockPair.ModifyIndex == 0 { - return nil - } - if lockPair.Flags != SemaphoreFlagValue { - return ErrSemaphoreConflict - } - - // Decode the lock - lock, err := s.decodeLock(lockPair) - if err != nil { - return err - } - - // Prune the dead holders - s.pruneDeadHolders(lock, pairs) - - // Check if there are any holders - if len(lock.Holders) > 0 { - return ErrSemaphoreInUse - } - - // Attempt the delete - didRemove, _, err := kv.DeleteCAS(lockPair, nil) - if err != nil { - return fmt.Errorf("failed to remove semaphore: %v", err) - } - if !didRemove { - return ErrSemaphoreInUse - } - return nil -} - -// createSession is used to create a new managed session -func (s *Semaphore) createSession() (string, error) { - session := s.c.Session() - se := &SessionEntry{ - Name: s.opts.SessionName, - TTL: s.opts.SessionTTL, - Behavior: SessionBehaviorDelete, - } - id, _, err := session.Create(se, nil) - if err != nil { - return "", err - } - return id, nil -} - -// contenderEntry returns a formatted KVPair for the contender -func (s *Semaphore) contenderEntry(session string) *KVPair { - return &KVPair{ - Key: path.Join(s.opts.Prefix, session), - Value: s.opts.Value, - Session: session, - Flags: SemaphoreFlagValue, - } -} - -// findLock is used to find the KV Pair which is used for coordination -func (s *Semaphore) findLock(pairs KVPairs) *KVPair { - key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) - for _, pair := range pairs { - if pair.Key == key { - return pair - } - } - return &KVPair{Flags: SemaphoreFlagValue} -} - -// decodeLock is used to decode a semaphoreLock from an -// entry in Consul -func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) { - // Handle if there is no lock - if pair == nil || pair.Value == nil { - return &semaphoreLock{ - Limit: s.opts.Limit, - Holders: make(map[string]bool), - }, nil - } - - l := &semaphoreLock{} - if err := json.Unmarshal(pair.Value, l); err != nil { - return nil, fmt.Errorf("lock decoding failed: %v", err) - } - return l, nil -} - -// encodeLock is used to encode a semaphoreLock into a KVPair -// that can be PUT -func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) { - enc, err := json.Marshal(l) - if err != nil { - return nil, fmt.Errorf("lock encoding failed: %v", err) - } - pair := &KVPair{ - Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey), - Value: enc, - Flags: SemaphoreFlagValue, - ModifyIndex: oldIndex, - } - return pair, nil -} - -// pruneDeadHolders is used to remove all the dead lock holders -func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) { - // Gather all the live holders - alive := make(map[string]struct{}, len(pairs)) - for _, pair := range pairs { - if pair.Session != "" { - alive[pair.Session] = struct{}{} - } - } - - // Remove any holders that are dead - for holder := range lock.Holders { - if _, ok := alive[holder]; !ok { - delete(lock.Holders, holder) - } - } -} - -// monitorLock is a long running routine to monitor a semaphore ownership -// It closes the stopCh if we lose our slot. -func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { - defer close(stopCh) - kv := s.c.KV() - opts := &QueryOptions{RequireConsistent: true} -WAIT: - retries := s.opts.MonitorRetries -RETRY: - pairs, meta, err := kv.List(s.opts.Prefix, opts) - if err != nil { - // If configured we can try to ride out a brief Consul unavailability - // by doing retries. Note that we have to attempt the retry in a non- - // blocking fashion so that we have a clean place to reset the retry - // counter if service is restored. - if retries > 0 && IsRetryableError(err) { - time.Sleep(s.opts.MonitorRetryTime) - retries-- - opts.WaitIndex = 0 - goto RETRY - } - return - } - lockPair := s.findLock(pairs) - lock, err := s.decodeLock(lockPair) - if err != nil { - return - } - s.pruneDeadHolders(lock, pairs) - if _, ok := lock.Holders[session]; ok { - opts.WaitIndex = meta.LastIndex - goto WAIT - } -} diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go deleted file mode 100644 index 1613f11..0000000 --- a/vendor/github.com/hashicorp/consul/api/session.go +++ /dev/null @@ -1,224 +0,0 @@ -package api - -import ( - "errors" - "fmt" - "time" -) - -const ( - // SessionBehaviorRelease is the default behavior and causes - // all associated locks to be released on session invalidation. - SessionBehaviorRelease = "release" - - // SessionBehaviorDelete is new in Consul 0.5 and changes the - // behavior to delete all associated locks on session invalidation. - // It can be used in a way similar to Ephemeral Nodes in ZooKeeper. - SessionBehaviorDelete = "delete" -) - -var ErrSessionExpired = errors.New("session expired") - -// SessionEntry represents a session in consul -type SessionEntry struct { - CreateIndex uint64 - ID string - Name string - Node string - Checks []string - LockDelay time.Duration - Behavior string - TTL string -} - -// Session can be used to query the Session endpoints -type Session struct { - c *Client -} - -// Session returns a handle to the session endpoints -func (c *Client) Session() *Session { - return &Session{c} -} - -// CreateNoChecks is like Create but is used specifically to create -// a session with no associated health checks. -func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { - body := make(map[string]interface{}) - body["Checks"] = []string{} - if se != nil { - if se.Name != "" { - body["Name"] = se.Name - } - if se.Node != "" { - body["Node"] = se.Node - } - if se.LockDelay != 0 { - body["LockDelay"] = durToMsec(se.LockDelay) - } - if se.Behavior != "" { - body["Behavior"] = se.Behavior - } - if se.TTL != "" { - body["TTL"] = se.TTL - } - } - return s.create(body, q) - -} - -// Create makes a new session. Providing a session entry can -// customize the session. It can also be nil to use defaults. -func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { - var obj interface{} - if se != nil { - body := make(map[string]interface{}) - obj = body - if se.Name != "" { - body["Name"] = se.Name - } - if se.Node != "" { - body["Node"] = se.Node - } - if se.LockDelay != 0 { - body["LockDelay"] = durToMsec(se.LockDelay) - } - if len(se.Checks) > 0 { - body["Checks"] = se.Checks - } - if se.Behavior != "" { - body["Behavior"] = se.Behavior - } - if se.TTL != "" { - body["TTL"] = se.TTL - } - } - return s.create(obj, q) -} - -func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { - var out struct{ ID string } - wm, err := s.c.write("/v1/session/create", obj, &out, q) - if err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Destroy invalidates a given session -func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { - wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q) - if err != nil { - return nil, err - } - return wm, nil -} - -// Renew renews the TTL on a given session -func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { - r := s.c.newRequest("PUT", "/v1/session/renew/"+id) - r.setWriteOptions(q) - rtt, resp, err := s.c.doRequest(r) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - - if resp.StatusCode == 404 { - return nil, wm, nil - } else if resp.StatusCode != 200 { - return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) - } - - var entries []*SessionEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, fmt.Errorf("Failed to read response: %v", err) - } - if len(entries) > 0 { - return entries[0], wm, nil - } - return nil, wm, nil -} - -// RenewPeriodic is used to periodically invoke Session.Renew on a -// session until a doneCh is closed. This is meant to be used in a long running -// goroutine to ensure a session stays valid. -func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh <-chan struct{}) error { - ctx := q.Context() - - ttl, err := time.ParseDuration(initialTTL) - if err != nil { - return err - } - - waitDur := ttl / 2 - lastRenewTime := time.Now() - var lastErr error - for { - if time.Since(lastRenewTime) > ttl { - return lastErr - } - select { - case <-time.After(waitDur): - entry, _, err := s.Renew(id, q) - if err != nil { - waitDur = time.Second - lastErr = err - continue - } - if entry == nil { - return ErrSessionExpired - } - - // Handle the server updating the TTL - ttl, _ = time.ParseDuration(entry.TTL) - waitDur = ttl / 2 - lastRenewTime = time.Now() - - case <-doneCh: - // Attempt a session destroy - s.Destroy(id, q) - return nil - - case <-ctx.Done(): - // Bail immediately since attempting the destroy would - // use the canceled context in q, which would just bail. - return ctx.Err() - } - } -} - -// Info looks up a single session -func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/info/"+id, &entries, q) - if err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List gets sessions for a node -func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/node/"+node, &entries, q) - if err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// List gets all active sessions -func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/list", &entries, q) - if err != nil { - return nil, nil, err - } - return entries, qm, nil -} diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go deleted file mode 100644 index e902377..0000000 --- a/vendor/github.com/hashicorp/consul/api/snapshot.go +++ /dev/null @@ -1,47 +0,0 @@ -package api - -import ( - "io" -) - -// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of -// Consul's internal state and restore snapshots for disaster recovery. -type Snapshot struct { - c *Client -} - -// Snapshot returns a handle that exposes the snapshot endpoints. -func (c *Client) Snapshot() *Snapshot { - return &Snapshot{c} -} - -// Save requests a new snapshot and provides an io.ReadCloser with the snapshot -// data to save. If this doesn't return an error, then it's the responsibility -// of the caller to close it. Only a subset of the QueryOptions are supported: -// Datacenter, AllowStale, and Token. -func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) { - r := s.c.newRequest("GET", "/v1/snapshot") - r.setQueryOptions(q) - - rtt, resp, err := requireOK(s.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - return resp.Body, qm, nil -} - -// Restore streams in an existing snapshot and attempts to restore it. -func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error { - r := s.c.newRequest("PUT", "/v1/snapshot") - r.body = in - r.setWriteOptions(q) - _, _, err := requireOK(s.c.doRequest(r)) - if err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go deleted file mode 100644 index 74ef61a..0000000 --- a/vendor/github.com/hashicorp/consul/api/status.go +++ /dev/null @@ -1,43 +0,0 @@ -package api - -// Status can be used to query the Status endpoints -type Status struct { - c *Client -} - -// Status returns a handle to the status endpoints -func (c *Client) Status() *Status { - return &Status{c} -} - -// Leader is used to query for a known leader -func (s *Status) Leader() (string, error) { - r := s.c.newRequest("GET", "/v1/status/leader") - _, resp, err := requireOK(s.c.doRequest(r)) - if err != nil { - return "", err - } - defer resp.Body.Close() - - var leader string - if err := decodeBody(resp, &leader); err != nil { - return "", err - } - return leader, nil -} - -// Peers is used to query for a known raft peers -func (s *Status) Peers() ([]string, error) { - r := s.c.newRequest("GET", "/v1/status/peers") - _, resp, err := requireOK(s.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var peers []string - if err := decodeBody(resp, &peers); err != nil { - return nil, err - } - return peers, nil -} diff --git a/vendor/github.com/hashicorp/consul/ui-v2/app/styles/components/notice.scss b/vendor/github.com/hashicorp/consul/ui-v2/app/styles/components/notice.scss deleted file mode 100644 index ec4d63d..0000000 --- a/vendor/github.com/hashicorp/consul/ui-v2/app/styles/components/notice.scss +++ /dev/null @@ -1,24 +0,0 @@ -.notice { - @extend %notice; -} -%notice { - @extend %with-warning; -} -%notice::before { - left: 20px; - top: 18px; - margin-top: 0; -} -%notice { - border: 1px solid; - border-radius: $radius-small; -} -%notice.warning { - background-color: $yellow-050; - border-color: $yellow-500; -} -%notice { - position: relative; - padding: 1em; - padding-left: 45px; -} diff --git a/vendor/github.com/hashicorp/consul/website/LICENSE.md b/vendor/github.com/hashicorp/consul/website/LICENSE.md deleted file mode 100644 index 3189f43..0000000 --- a/vendor/github.com/hashicorp/consul/website/LICENSE.md +++ /dev/null @@ -1,10 +0,0 @@ -# Proprietary License - -This license is temporary while a more official one is drafted. However, -this should make it clear: - -The text contents of this website are MPL 2.0 licensed. - -The design contents of this website are proprietary and may not be reproduced -or reused in any way other than to run the website locally. The license for -the design is owned solely by HashiCorp, Inc. diff --git a/vendor/github.com/hashicorp/consul/website/source/api/operator/license.html.md b/vendor/github.com/hashicorp/consul/website/source/api/operator/license.html.md deleted file mode 100644 index c0e046e..0000000 --- a/vendor/github.com/hashicorp/consul/website/source/api/operator/license.html.md +++ /dev/null @@ -1,143 +0,0 @@ ---- -layout: api -page_title: License - Operator - HTTP API -sidebar_current: api-operator-license -description: |- - The /operator/license endpoints allow for setting and retrieving the Consul - Enterprise License. ---- - -# License - Operator HTTP API - -~> **Enterprise Only!** This API endpoint and functionality only exists in -Consul Enterprise. This is not present in the open source version of Consul. - -The licensing functionality described here is available only in -[Consul Enterprise](https://www.hashicorp.com/products/consul/) version 1.1.0 and later. - -## Getting the Consul License - -This endpoint gets information about the current license. - -| Method | Path | Produces | -| ------ | ---------------------------- | -------------------------- | -| `GET` | `/operator/license` | `application/json` | - -The table below shows this endpoint's support for -[blocking queries](/api/index.html#blocking-queries), -[consistency modes](/api/index.html#consistency-modes), and -[required ACLs](/api/index.html#acls). - -| Blocking Queries | Consistency Modes | ACL Required | -| ---------------- | ----------------- | ---------------- | -| `NO` | `all` | `none` | - -### Parameters - -- `dc` `(string: "")` - Specifies the datacenter whose license should be retrieved. - This will default to the datacenter of the agent serving the HTTP request. - This is specified as a URL query parameter. - -### Sample Request - -```text -$ curl \ - https://consul.rocks/v1/operator/license -``` - -### Sample Response - -```json -{ - "Valid": true, - "License": { - "license_id": "2afbf681-0d1a-0649-cb6c-333ec9f0989c", - "customer_id": "0259271d-8ffc-e85e-0830-c0822c1f5f2b", - "installation_id": "*", - "issue_time": "2018-05-21T20:03:35.911567355Z", - "start_time": "2018-05-21T04:00:00Z", - "expiration_time": "2019-05-22T03:59:59.999Z", - "product": "consul", - "flags": { - "package": "premium" - }, - "features": [ - "Automated Backups", - "Automated Upgrades", - "Enhanced Read Scalability", - "Network Segments", - "Redundancy Zone", - "Advanced Network Federation" - ], - "temporary": false - }, - "Warnings": [] -} -``` - -## Updating the Consul License - -This endpoint updates the Consul license and returns some of the -license contents as well as any warning messages regarding its validity. - -| Method | Path | Produces | -| ------ | ---------------------------- | -------------------------- | -| `PUT` | `/operator/license` | `application/json` | - -The table below shows this endpoint's support for -[blocking queries](/api/index.html#blocking-queries), -[consistency modes](/api/index.html#consistency-modes), and -[required ACLs](/api/index.html#acls). - -| Blocking Queries | Consistency Modes | ACL Required | -| ---------------- | ----------------- | ---------------- | -| `NO` | `none` | `operator:write` | - -### Parameters - -- `dc` `(string: "")` - Specifies the datacenter whose license should be updated. - This will default to the datacenter of the agent serving the HTTP request. - This is specified as a URL query parameter. - -### Sample Payload - -The payload is the raw license blob. - -### Sample Request - -```text -$ curl \ - --request PUT \ - --data @consul.license \ - https://consul.rocks/v1/operator/license -``` - -### Sample Response - -```json -{ - "Valid": true, - "License": { - "license_id": "2afbf681-0d1a-0649-cb6c-333ec9f0989c", - "customer_id": "0259271d-8ffc-e85e-0830-c0822c1f5f2b", - "installation_id": "*", - "issue_time": "2018-05-21T20:03:35.911567355Z", - "start_time": "2018-05-21T04:00:00Z", - "expiration_time": "2019-05-22T03:59:59.999Z", - "product": "consul", - "flags": { - "package": "premium" - }, - "features": [ - "Automated Backups", - "Automated Upgrades", - "Enhanced Read Scalability", - "Network Segments", - "Redundancy Zone", - "Advanced Network Federation" - ], - "temporary": false - }, - "Warnings": [] -} -``` diff --git a/vendor/github.com/hashicorp/consul/website/source/docs/commands/license.html.markdown.erb b/vendor/github.com/hashicorp/consul/website/source/docs/commands/license.html.markdown.erb deleted file mode 100644 index b65d171..0000000 --- a/vendor/github.com/hashicorp/consul/website/source/docs/commands/license.html.markdown.erb +++ /dev/null @@ -1,109 +0,0 @@ ---- -layout: "docs" -page_title: "Commands: License" -sidebar_current: "docs-commands-license" -description: > - The license command provides datacenter-level management of the Consul Enterprise license. ---- - -# Consul License - -Command: `consul license` - -<%= enterprise_alert :consul %> - -The `license` command provides datacenter-level management of the Consul Enterprise license. This was added in Consul 1.1.0. - -If ACLs are enabled then a token with operator privileges may be required in -order to use this command. Requests are forwarded internally to the leader -if required, so this can be run from any Consul node in a cluster. See the -[ACL Guide](/docs/guides/acl.html#operator) for more information. - - -```text -Usage: consul license [options] [args] - - This command has subcommands for managing the Consul Enterprise license - Here are some simple examples, and more detailed examples are - available in the subcommands or the documentation. - - Install a new license from a file: - - $ consul license put @consul.license - - Install a new license from stdin: - - $ consul license put - - - Install a new license from a string: - - $ consul license put "" - - Retrieve the current license: - - $ consul license get - - For more examples, ask for subcommand help or view the documentation. - -Subcommands: - get Get the current license - put Puts a new license in the datacenter -``` - -## put - -This command sets the Consul Enterprise license. - -Usage: `consul license put [options] LICENSE` - -#### API Options - -<%= partial "docs/commands/http_api_options_client" %> -<%= partial "docs/commands/http_api_options_server" %> - -The output looks like this: - -``` -License is valid -License ID: 2afbf681-0d1a-0649-cb6c-333ec9f0989c -Customer ID: 0259271d-8ffc-e85e-0830-c0822c1f5f2b -Expires At: 2019-05-22 03:59:59.999 +0000 UTC -Datacenter: * -Package: premium -Licensed Features: - Automated Backups - Automated Upgrades - Enhanced Read Scalability - Network Segments - Redundancy Zone - Advanced Network Federation -``` - -## get - -This command gets the Consul Enterprise license. - -Usage: `consul license get [options]` - -#### API Options - -<%= partial "docs/commands/http_api_options_client" %> -<%= partial "docs/commands/http_api_options_server" %> - -The output looks like this: - -``` -License is valid -License ID: 2afbf681-0d1a-0649-cb6c-333ec9f0989c -Customer ID: 0259271d-8ffc-e85e-0830-c0822c1f5f2b -Expires At: 2019-05-22 03:59:59.999 +0000 UTC -Datacenter: * -Package: premium -Licensed Features: - Automated Backups - Automated Upgrades - Enhanced Read Scalability - Network Segments - Redundancy Zone - Advanced Network Federation -``` \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE deleted file mode 100644 index e87a115..0000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md deleted file mode 100644 index 036e531..0000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# cleanhttp - -Functions for accessing "clean" Go http.Client values - -------------- - -The Go standard library contains a default `http.Client` called -`http.DefaultClient`. It is a common idiom in Go code to start with -`http.DefaultClient` and tweak it as necessary, and in fact, this is -encouraged; from the `http` package documentation: - -> The Client's Transport typically has internal state (cached TCP connections), -so Clients should be reused instead of created as needed. Clients are safe for -concurrent use by multiple goroutines. - -Unfortunately, this is a shared value, and it is not uncommon for libraries to -assume that they are free to modify it at will. With enough dependencies, it -can be very easy to encounter strange problems and race conditions due to -manipulation of this shared value across libraries and goroutines (clients are -safe for concurrent use, but writing values to the client struct itself is not -protected). - -Making things worse is the fact that a bare `http.Client` will use a default -`http.Transport` called `http.DefaultTransport`, which is another global value -that behaves the same way. So it is not simply enough to replace -`http.DefaultClient` with `&http.Client{}`. - -This repository provides some simple functions to get a "clean" `http.Client` --- one that uses the same default values as the Go standard library, but -returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go deleted file mode 100644 index 8d306bf..0000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go +++ /dev/null @@ -1,57 +0,0 @@ -package cleanhttp - -import ( - "net" - "net/http" - "runtime" - "time" -) - -// DefaultTransport returns a new http.Transport with similar default values to -// http.DefaultTransport, but with idle connections and keepalives disabled. -func DefaultTransport() *http.Transport { - transport := DefaultPooledTransport() - transport.DisableKeepAlives = true - transport.MaxIdleConnsPerHost = -1 - return transport -} - -// DefaultPooledTransport returns a new http.Transport with similar default -// values to http.DefaultTransport. Do not use this for transient transports as -// it can leak file descriptors over time. Only use this for transports that -// will be re-used for the same host(s). -func DefaultPooledTransport() *http.Transport { - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, - } - return transport -} - -// DefaultClient returns a new http.Client with similar default values to -// http.Client, but with a non-shared Transport, idle connections disabled, and -// keepalives disabled. -func DefaultClient() *http.Client { - return &http.Client{ - Transport: DefaultTransport(), - } -} - -// DefaultPooledClient returns a new http.Client with similar default values to -// http.Client, but with a shared Transport. Do not use this function for -// transient clients as it can leak file descriptors over time. Only use this -// for clients that will be re-used for the same host(s). -func DefaultPooledClient() *http.Client { - return &http.Client{ - Transport: DefaultPooledTransport(), - } -} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go deleted file mode 100644 index 0584109..0000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Package cleanhttp offers convenience utilities for acquiring "clean" -// http.Transport and http.Client structs. -// -// Values set on http.DefaultClient and http.DefaultTransport affect all -// callers. This can have detrimental effects, esepcially in TLS contexts, -// where client or root certificates set to talk to multiple endpoints can end -// up displacing each other, leading to hard-to-debug issues. This package -// provides non-shared http.Client and http.Transport structs to ensure that -// the configuration will not be overwritten by other parts of the application -// or dependencies. -// -// The DefaultClient and DefaultTransport functions disable idle connections -// and keepalives. Without ensuring that idle connections are closed before -// garbage collection, short-term clients/transports can leak file descriptors, -// eventually leading to "too many open files" errors. If you will be -// connecting to the same hosts repeatedly from the same client, you can use -// DefaultPooledClient to receive a client that has connection pooling -// semantics similar to http.DefaultClient. -// -package cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/go.mod b/vendor/github.com/hashicorp/go-cleanhttp/go.mod deleted file mode 100644 index 310f075..0000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/hashicorp/go-cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go deleted file mode 100644 index 7eda377..0000000 --- a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go +++ /dev/null @@ -1,43 +0,0 @@ -package cleanhttp - -import ( - "net/http" - "strings" - "unicode" -) - -// HandlerInput provides input options to cleanhttp's handlers -type HandlerInput struct { - ErrStatus int -} - -// PrintablePathCheckHandler is a middleware that ensures the request path -// contains only printable runes. -func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { - // Nil-check on input to make it optional - if input == nil { - input = &HandlerInput{ - ErrStatus: http.StatusBadRequest, - } - } - - // Default to http.StatusBadRequest on error - if input.ErrStatus == 0 { - input.ErrStatus = http.StatusBadRequest - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Check URL path for non-printable characters - idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { - return !unicode.IsPrint(c) - }) - - if idx != -1 { - w.WriteHeader(input.ErrStatus) - return - } - - next.ServeHTTP(w, r) - return - }) -} diff --git a/vendor/github.com/hashicorp/go-rootcerts/.travis.yml b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml deleted file mode 100644 index 80e1de4..0000000 --- a/vendor/github.com/hashicorp/go-rootcerts/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -sudo: false - -language: go - -go: - - 1.6 - -branches: - only: - - master - -script: make test diff --git a/vendor/github.com/hashicorp/go-rootcerts/LICENSE b/vendor/github.com/hashicorp/go-rootcerts/LICENSE deleted file mode 100644 index e87a115..0000000 --- a/vendor/github.com/hashicorp/go-rootcerts/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-rootcerts/Makefile b/vendor/github.com/hashicorp/go-rootcerts/Makefile deleted file mode 100644 index c3989e7..0000000 --- a/vendor/github.com/hashicorp/go-rootcerts/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -TEST?=./... - -test: - go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4 - go vet $(TEST) - go test $(TEST) -race - -.PHONY: test diff --git a/vendor/github.com/hashicorp/go-rootcerts/README.md b/vendor/github.com/hashicorp/go-rootcerts/README.md deleted file mode 100644 index f5abffc..0000000 --- a/vendor/github.com/hashicorp/go-rootcerts/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# rootcerts - -Functions for loading root certificates for TLS connections. - ------ - -Go's standard library `crypto/tls` provides a common mechanism for configuring -TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool -of certificates for the client to use as a trust store when verifying server -certificates. - -This library contains utility functions for loading certificates destined for -that field, as well as one other important thing: - -When the `RootCAs` field is `nil`, the standard library attempts to load the -host's root CA set. This behavior is OS-specific, and the Darwin -implementation contains [a bug that prevents trusted certificates from the -System and Login keychains from being loaded][1]. This library contains -Darwin-specific behavior that works around that bug. - -[1]: https://github.com/golang/go/issues/14514 - -## Example Usage - -Here's a snippet demonstrating how this library is meant to be used: - -```go -func httpClient() (*http.Client, error) - tlsConfig := &tls.Config{} - err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ - CAFile: os.Getenv("MYAPP_CAFILE"), - CAPath: os.Getenv("MYAPP_CAPATH"), - }) - if err != nil { - return nil, err - } - c := cleanhttp.DefaultClient() - t := cleanhttp.DefaultTransport() - t.TLSClientConfig = tlsConfig - c.Transport = t - return c, nil -} -``` diff --git a/vendor/github.com/hashicorp/go-rootcerts/doc.go b/vendor/github.com/hashicorp/go-rootcerts/doc.go deleted file mode 100644 index b55cc62..0000000 --- a/vendor/github.com/hashicorp/go-rootcerts/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Package rootcerts contains functions to aid in loading CA certificates for -// TLS connections. -// -// In addition, its default behavior on Darwin works around an open issue [1] -// in Go's crypto/x509 that prevents certicates from being loaded from the -// System or Login keychains. -// -// [1] https://github.com/golang/go/issues/14514 -package rootcerts diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go deleted file mode 100644 index aeb30ec..0000000 --- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go +++ /dev/null @@ -1,103 +0,0 @@ -package rootcerts - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "os" - "path/filepath" -) - -// Config determines where LoadCACerts will load certificates from. When both -// CAFile and CAPath are blank, this library's functions will either load -// system roots explicitly and return them, or set the CertPool to nil to allow -// Go's standard library to load system certs. -type Config struct { - // CAFile is a path to a PEM-encoded certificate file or bundle. Takes - // precedence over CAPath. - CAFile string - - // CAPath is a path to a directory populated with PEM-encoded certificates. - CAPath string -} - -// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the -// Config specified. -func ConfigureTLS(t *tls.Config, c *Config) error { - if t == nil { - return nil - } - pool, err := LoadCACerts(c) - if err != nil { - return err - } - t.RootCAs = pool - return nil -} - -// LoadCACerts loads a CertPool based on the Config specified. -func LoadCACerts(c *Config) (*x509.CertPool, error) { - if c == nil { - c = &Config{} - } - if c.CAFile != "" { - return LoadCAFile(c.CAFile) - } - if c.CAPath != "" { - return LoadCAPath(c.CAPath) - } - - return LoadSystemCAs() -} - -// LoadCAFile loads a single PEM-encoded file from the path specified. -func LoadCAFile(caFile string) (*x509.CertPool, error) { - pool := x509.NewCertPool() - - pem, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("Error loading CA File: %s", err) - } - - ok := pool.AppendCertsFromPEM(pem) - if !ok { - return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile) - } - - return pool, nil -} - -// LoadCAPath walks the provided path and loads all certificates encounted into -// a pool. -func LoadCAPath(caPath string) (*x509.CertPool, error) { - pool := x509.NewCertPool() - walkFn := func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() { - return nil - } - - pem, err := ioutil.ReadFile(path) - if err != nil { - return fmt.Errorf("Error loading file from CAPath: %s", err) - } - - ok := pool.AppendCertsFromPEM(pem) - if !ok { - return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path) - } - - return nil - } - - err := filepath.Walk(caPath, walkFn) - if err != nil { - return nil, err - } - - return pool, nil -} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go deleted file mode 100644 index 66b1472..0000000 --- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !darwin - -package rootcerts - -import "crypto/x509" - -// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that -// default behavior of standard TLS config libraries is triggered, which is to -// load system certs. -func LoadSystemCAs() (*x509.CertPool, error) { - return nil, nil -} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go deleted file mode 100644 index a9a0406..0000000 --- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go +++ /dev/null @@ -1,48 +0,0 @@ -package rootcerts - -import ( - "crypto/x509" - "os/exec" - "path" - - "github.com/mitchellh/go-homedir" -) - -// LoadSystemCAs has special behavior on Darwin systems to work around -func LoadSystemCAs() (*x509.CertPool, error) { - pool := x509.NewCertPool() - - for _, keychain := range certKeychains() { - err := addCertsFromKeychain(pool, keychain) - if err != nil { - return nil, err - } - } - - return pool, nil -} - -func addCertsFromKeychain(pool *x509.CertPool, keychain string) error { - cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain) - data, err := cmd.Output() - if err != nil { - return err - } - - pool.AppendCertsFromPEM(data) - - return nil -} - -func certKeychains() []string { - keychains := []string{ - "/System/Library/Keychains/SystemRootCertificates.keychain", - "/Library/Keychains/System.keychain", - } - home, err := homedir.Dir() - if err == nil { - loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain") - keychains = append(keychains, loginKeychain) - } - return keychains -} diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem deleted file mode 120000 index dda0574..0000000 --- a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem +++ /dev/null @@ -1 +0,0 @@ -../capath/securetrust.pem \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem deleted file mode 120000 index 37ed4f0..0000000 --- a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem +++ /dev/null @@ -1 +0,0 @@ -../capath/thawte.pem \ No newline at end of file diff --git a/vendor/github.com/hashicorp/serf/LICENSE b/vendor/github.com/hashicorp/serf/LICENSE deleted file mode 100644 index c33dcc7..0000000 --- a/vendor/github.com/hashicorp/serf/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go deleted file mode 100644 index 613bfff..0000000 --- a/vendor/github.com/hashicorp/serf/coordinate/client.go +++ /dev/null @@ -1,180 +0,0 @@ -package coordinate - -import ( - "fmt" - "math" - "sort" - "sync" - "time" -) - -// Client manages the estimated network coordinate for a given node, and adjusts -// it as the node observes round trip times and estimated coordinates from other -// nodes. The core algorithm is based on Vivaldi, see the documentation for Config -// for more details. -type Client struct { - // coord is the current estimate of the client's network coordinate. - coord *Coordinate - - // origin is a coordinate sitting at the origin. - origin *Coordinate - - // config contains the tuning parameters that govern the performance of - // the algorithm. - config *Config - - // adjustmentIndex is the current index into the adjustmentSamples slice. - adjustmentIndex uint - - // adjustment is used to store samples for the adjustment calculation. - adjustmentSamples []float64 - - // latencyFilterSamples is used to store the last several RTT samples, - // keyed by node name. We will use the config's LatencyFilterSamples - // value to determine how many samples we keep, per node. - latencyFilterSamples map[string][]float64 - - // mutex enables safe concurrent access to the client. - mutex sync.RWMutex -} - -// NewClient creates a new Client and verifies the configuration is valid. -func NewClient(config *Config) (*Client, error) { - if !(config.Dimensionality > 0) { - return nil, fmt.Errorf("dimensionality must be >0") - } - - return &Client{ - coord: NewCoordinate(config), - origin: NewCoordinate(config), - config: config, - adjustmentIndex: 0, - adjustmentSamples: make([]float64, config.AdjustmentWindowSize), - latencyFilterSamples: make(map[string][]float64), - }, nil -} - -// GetCoordinate returns a copy of the coordinate for this client. -func (c *Client) GetCoordinate() *Coordinate { - c.mutex.RLock() - defer c.mutex.RUnlock() - - return c.coord.Clone() -} - -// SetCoordinate forces the client's coordinate to a known state. -func (c *Client) SetCoordinate(coord *Coordinate) { - c.mutex.Lock() - defer c.mutex.Unlock() - - c.coord = coord.Clone() -} - -// ForgetNode removes any client state for the given node. -func (c *Client) ForgetNode(node string) { - c.mutex.Lock() - defer c.mutex.Unlock() - - delete(c.latencyFilterSamples, node) -} - -// latencyFilter applies a simple moving median filter with a new sample for -// a node. This assumes that the mutex has been locked already. -func (c *Client) latencyFilter(node string, rttSeconds float64) float64 { - samples, ok := c.latencyFilterSamples[node] - if !ok { - samples = make([]float64, 0, c.config.LatencyFilterSize) - } - - // Add the new sample and trim the list, if needed. - samples = append(samples, rttSeconds) - if len(samples) > int(c.config.LatencyFilterSize) { - samples = samples[1:] - } - c.latencyFilterSamples[node] = samples - - // Sort a copy of the samples and return the median. - sorted := make([]float64, len(samples)) - copy(sorted, samples) - sort.Float64s(sorted) - return sorted[len(sorted)/2] -} - -// updateVivialdi updates the Vivaldi portion of the client's coordinate. This -// assumes that the mutex has been locked already. -func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) { - const zeroThreshold = 1.0e-6 - - dist := c.coord.DistanceTo(other).Seconds() - if rttSeconds < zeroThreshold { - rttSeconds = zeroThreshold - } - wrongness := math.Abs(dist-rttSeconds) / rttSeconds - - totalError := c.coord.Error + other.Error - if totalError < zeroThreshold { - totalError = zeroThreshold - } - weight := c.coord.Error / totalError - - c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight) - if c.coord.Error > c.config.VivaldiErrorMax { - c.coord.Error = c.config.VivaldiErrorMax - } - - delta := c.config.VivaldiCC * weight - force := delta * (rttSeconds - dist) - c.coord = c.coord.ApplyForce(c.config, force, other) -} - -// updateAdjustment updates the adjustment portion of the client's coordinate, if -// the feature is enabled. This assumes that the mutex has been locked already. -func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) { - if c.config.AdjustmentWindowSize == 0 { - return - } - - // Note that the existing adjustment factors don't figure in to this - // calculation so we use the raw distance here. - dist := c.coord.rawDistanceTo(other) - c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist - c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize - - sum := 0.0 - for _, sample := range c.adjustmentSamples { - sum += sample - } - c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize)) -} - -// updateGravity applies a small amount of gravity to pull coordinates towards -// the center of the coordinate system to combat drift. This assumes that the -// mutex is locked already. -func (c *Client) updateGravity() { - dist := c.origin.DistanceTo(c.coord).Seconds() - force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0) - c.coord = c.coord.ApplyForce(c.config, force, c.origin) -} - -// Update takes other, a coordinate for another node, and rtt, a round trip -// time observation for a ping to that node, and updates the estimated position of -// the client's coordinate. Returns the updated coordinate. -func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) *Coordinate { - c.mutex.Lock() - defer c.mutex.Unlock() - - rttSeconds := c.latencyFilter(node, rtt.Seconds()) - c.updateVivaldi(other, rttSeconds) - c.updateAdjustment(other, rttSeconds) - c.updateGravity() - return c.coord.Clone() -} - -// DistanceTo returns the estimated RTT from the client's coordinate to other, the -// coordinate for another node. -func (c *Client) DistanceTo(other *Coordinate) time.Duration { - c.mutex.RLock() - defer c.mutex.RUnlock() - - return c.coord.DistanceTo(other) -} diff --git a/vendor/github.com/hashicorp/serf/coordinate/config.go b/vendor/github.com/hashicorp/serf/coordinate/config.go deleted file mode 100644 index b85a8ab..0000000 --- a/vendor/github.com/hashicorp/serf/coordinate/config.go +++ /dev/null @@ -1,70 +0,0 @@ -package coordinate - -// Config is used to set the parameters of the Vivaldi-based coordinate mapping -// algorithm. -// -// The following references are called out at various points in the documentation -// here: -// -// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system." -// ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004. -// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates -// in the Wild." NSDI. Vol. 7. 2007. -// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for -// host-based network coordinate systems." Networking, IEEE/ACM Transactions -// on 18.1 (2010): 27-40. -type Config struct { - // The dimensionality of the coordinate system. As discussed in [2], more - // dimensions improves the accuracy of the estimates up to a point. Per [2] - // we chose 8 dimensions plus a non-Euclidean height. - Dimensionality uint - - // VivaldiErrorMax is the default error value when a node hasn't yet made - // any observations. It also serves as an upper limit on the error value in - // case observations cause the error value to increase without bound. - VivaldiErrorMax float64 - - // VivaldiCE is a tuning factor that controls the maximum impact an - // observation can have on a node's confidence. See [1] for more details. - VivaldiCE float64 - - // VivaldiCC is a tuning factor that controls the maximum impact an - // observation can have on a node's coordinate. See [1] for more details. - VivaldiCC float64 - - // AdjustmentWindowSize is a tuning factor that determines how many samples - // we retain to calculate the adjustment factor as discussed in [3]. Setting - // this to zero disables this feature. - AdjustmentWindowSize uint - - // HeightMin is the minimum value of the height parameter. Since this - // always must be positive, it will introduce a small amount error, so - // the chosen value should be relatively small compared to "normal" - // coordinates. - HeightMin float64 - - // LatencyFilterSamples is the maximum number of samples that are retained - // per node, in order to compute a median. The intent is to ride out blips - // but still keep the delay low, since our time to probe any given node is - // pretty infrequent. See [2] for more details. - LatencyFilterSize uint - - // GravityRho is a tuning factor that sets how much gravity has an effect - // to try to re-center coordinates. See [2] for more details. - GravityRho float64 -} - -// DefaultConfig returns a Config that has some default values suitable for -// basic testing of the algorithm, but not tuned to any particular type of cluster. -func DefaultConfig() *Config { - return &Config{ - Dimensionality: 8, - VivaldiErrorMax: 1.5, - VivaldiCE: 0.25, - VivaldiCC: 0.25, - AdjustmentWindowSize: 20, - HeightMin: 10.0e-6, - LatencyFilterSize: 3, - GravityRho: 150.0, - } -} diff --git a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go deleted file mode 100644 index c9194e0..0000000 --- a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go +++ /dev/null @@ -1,183 +0,0 @@ -package coordinate - -import ( - "math" - "math/rand" - "time" -) - -// Coordinate is a specialized structure for holding network coordinates for the -// Vivaldi-based coordinate mapping algorithm. All of the fields should be public -// to enable this to be serialized. All values in here are in units of seconds. -type Coordinate struct { - // Vec is the Euclidean portion of the coordinate. This is used along - // with the other fields to provide an overall distance estimate. The - // units here are seconds. - Vec []float64 - - // Err reflects the confidence in the given coordinate and is updated - // dynamically by the Vivaldi Client. This is dimensionless. - Error float64 - - // Adjustment is a distance offset computed based on a calculation over - // observations from all other nodes over a fixed window and is updated - // dynamically by the Vivaldi Client. The units here are seconds. - Adjustment float64 - - // Height is a distance offset that accounts for non-Euclidean effects - // which model the access links from nodes to the core Internet. The access - // links are usually set by bandwidth and congestion, and the core links - // usually follow distance based on geography. - Height float64 -} - -const ( - // secondsToNanoseconds is used to convert float seconds to nanoseconds. - secondsToNanoseconds = 1.0e9 - - // zeroThreshold is used to decide if two coordinates are on top of each - // other. - zeroThreshold = 1.0e-6 -) - -// ErrDimensionalityConflict will be panic-d if you try to perform operations -// with incompatible dimensions. -type DimensionalityConflictError struct{} - -// Adds the error interface. -func (e DimensionalityConflictError) Error() string { - return "coordinate dimensionality does not match" -} - -// NewCoordinate creates a new coordinate at the origin, using the given config -// to supply key initial values. -func NewCoordinate(config *Config) *Coordinate { - return &Coordinate{ - Vec: make([]float64, config.Dimensionality), - Error: config.VivaldiErrorMax, - Adjustment: 0.0, - Height: config.HeightMin, - } -} - -// Clone creates an independent copy of this coordinate. -func (c *Coordinate) Clone() *Coordinate { - vec := make([]float64, len(c.Vec)) - copy(vec, c.Vec) - return &Coordinate{ - Vec: vec, - Error: c.Error, - Adjustment: c.Adjustment, - Height: c.Height, - } -} - -// IsCompatibleWith checks to see if the two coordinates are compatible -// dimensionally. If this returns true then you are guaranteed to not get -// any runtime errors operating on them. -func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool { - return len(c.Vec) == len(other.Vec) -} - -// ApplyForce returns the result of applying the force from the direction of the -// other coordinate. -func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate { - if !c.IsCompatibleWith(other) { - panic(DimensionalityConflictError{}) - } - - ret := c.Clone() - unit, mag := unitVectorAt(c.Vec, other.Vec) - ret.Vec = add(ret.Vec, mul(unit, force)) - if mag > zeroThreshold { - ret.Height = (ret.Height+other.Height)*force/mag + ret.Height - ret.Height = math.Max(ret.Height, config.HeightMin) - } - return ret -} - -// DistanceTo returns the distance between this coordinate and the other -// coordinate, including adjustments. -func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration { - if !c.IsCompatibleWith(other) { - panic(DimensionalityConflictError{}) - } - - dist := c.rawDistanceTo(other) - adjustedDist := dist + c.Adjustment + other.Adjustment - if adjustedDist > 0.0 { - dist = adjustedDist - } - return time.Duration(dist * secondsToNanoseconds) -} - -// rawDistanceTo returns the Vivaldi distance between this coordinate and the -// other coordinate in seconds, not including adjustments. This assumes the -// dimensions have already been checked to be compatible. -func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 { - return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height -} - -// add returns the sum of vec1 and vec2. This assumes the dimensions have -// already been checked to be compatible. -func add(vec1 []float64, vec2 []float64) []float64 { - ret := make([]float64, len(vec1)) - for i, _ := range ret { - ret[i] = vec1[i] + vec2[i] - } - return ret -} - -// diff returns the difference between the vec1 and vec2. This assumes the -// dimensions have already been checked to be compatible. -func diff(vec1 []float64, vec2 []float64) []float64 { - ret := make([]float64, len(vec1)) - for i, _ := range ret { - ret[i] = vec1[i] - vec2[i] - } - return ret -} - -// mul returns vec multiplied by a scalar factor. -func mul(vec []float64, factor float64) []float64 { - ret := make([]float64, len(vec)) - for i, _ := range vec { - ret[i] = vec[i] * factor - } - return ret -} - -// magnitude computes the magnitude of the vec. -func magnitude(vec []float64) float64 { - sum := 0.0 - for i, _ := range vec { - sum += vec[i] * vec[i] - } - return math.Sqrt(sum) -} - -// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two -// positions are the same then a random unit vector is returned. We also return -// the distance between the points for use in the later height calculation. -func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) { - ret := diff(vec1, vec2) - - // If the coordinates aren't on top of each other we can normalize. - if mag := magnitude(ret); mag > zeroThreshold { - return mul(ret, 1.0/mag), mag - } - - // Otherwise, just return a random unit vector. - for i, _ := range ret { - ret[i] = rand.Float64() - 0.5 - } - if mag := magnitude(ret); mag > zeroThreshold { - return mul(ret, 1.0/mag), 0.0 - } - - // And finally just give up and make a unit vector along the first - // dimension. This should be exceedingly rare. - ret = make([]float64, len(ret)) - ret[0] = 1.0 - return ret, 0.0 -} diff --git a/vendor/github.com/hashicorp/serf/coordinate/phantom.go b/vendor/github.com/hashicorp/serf/coordinate/phantom.go deleted file mode 100644 index 6fb033c..0000000 --- a/vendor/github.com/hashicorp/serf/coordinate/phantom.go +++ /dev/null @@ -1,187 +0,0 @@ -package coordinate - -import ( - "fmt" - "math" - "math/rand" - "time" -) - -// GenerateClients returns a slice with nodes number of clients, all with the -// given config. -func GenerateClients(nodes int, config *Config) ([]*Client, error) { - clients := make([]*Client, nodes) - for i, _ := range clients { - client, err := NewClient(config) - if err != nil { - return nil, err - } - - clients[i] = client - } - return clients, nil -} - -// GenerateLine returns a truth matrix as if all the nodes are in a straight linke -// with the given spacing between them. -func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - rtt := time.Duration(j-i) * spacing - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional -// grid with the given spacing between them. -func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - n := int(math.Sqrt(float64(nodes))) - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - x1, y1 := float64(i%n), float64(i/n) - x2, y2 := float64(j%n), float64(j/n) - dx, dy := x2-x1, y2-y1 - dist := math.Sqrt(dx*dx + dy*dy) - rtt := time.Duration(dist * float64(spacing)) - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateSplit returns a truth matrix as if half the nodes are close together in -// one location and half the nodes are close together in another. The lan factor -// is used to separate the nodes locally and the wan factor represents the split -// between the two sides. -func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - split := nodes / 2 - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - rtt := lan - if (i <= split && j > split) || (i > split && j <= split) { - rtt += wan - } - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed -// around a circle with the given radius. The first node is at the "center" of the -// circle because it's equidistant from all the other nodes, but we place it at -// double the radius, so it should show up above all the other nodes in height. -func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration { - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - var rtt time.Duration - if i == 0 { - rtt = 2 * radius - } else { - t1 := 2.0 * math.Pi * float64(i) / float64(nodes) - x1, y1 := math.Cos(t1), math.Sin(t1) - t2 := 2.0 * math.Pi * float64(j) / float64(nodes) - x2, y2 := math.Cos(t2), math.Sin(t2) - dx, dy := x2-x1, y2-y1 - dist := math.Sqrt(dx*dx + dy*dy) - rtt = time.Duration(dist * float64(radius)) - } - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// GenerateRandom returns a truth matrix for a set of nodes with normally -// distributed delays, with the given mean and deviation. The RNG is re-seeded -// so you always get the same matrix for a given size. -func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration { - rand.Seed(1) - - truth := make([][]time.Duration, nodes) - for i := range truth { - truth[i] = make([]time.Duration, nodes) - } - - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds() - rtt := time.Duration(rttSeconds * secondsToNanoseconds) - truth[i][j], truth[j][i] = rtt, rtt - } - } - return truth -} - -// Simulate runs the given number of cycles using the given list of clients and -// truth matrix. On each cycle, each client will pick a random node and observe -// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for -// each simulation run to get deterministic results (for this algorithm and the -// underlying algorithm which will use random numbers for position vectors when -// starting out with everything at the origin). -func Simulate(clients []*Client, truth [][]time.Duration, cycles int) { - rand.Seed(1) - - nodes := len(clients) - for cycle := 0; cycle < cycles; cycle++ { - for i, _ := range clients { - if j := rand.Intn(nodes); j != i { - c := clients[j].GetCoordinate() - rtt := truth[i][j] - node := fmt.Sprintf("node_%d", j) - clients[i].Update(node, c, rtt) - } - } - } -} - -// Stats is returned from the Evaluate function with a summary of the algorithm -// performance. -type Stats struct { - ErrorMax float64 - ErrorAvg float64 -} - -// Evaluate uses the coordinates of the given clients to calculate estimated -// distances and compares them with the given truth matrix, returning summary -// stats. -func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) { - nodes := len(clients) - count := 0 - for i := 0; i < nodes; i++ { - for j := i + 1; j < nodes; j++ { - est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds() - actual := truth[i][j].Seconds() - error := math.Abs(est-actual) / actual - stats.ErrorMax = math.Max(stats.ErrorMax, error) - stats.ErrorAvg += error - count += 1 - } - } - - stats.ErrorAvg /= float64(count) - fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax) - return -} diff --git a/vendor/github.com/hashicorp/serf/ops-misc/debian/copyright b/vendor/github.com/hashicorp/serf/ops-misc/debian/copyright deleted file mode 100644 index 21a1a1b..0000000 --- a/vendor/github.com/hashicorp/serf/ops-misc/debian/copyright +++ /dev/null @@ -1,2 +0,0 @@ -Name: serf -Copyright: Hashicorp 2013 diff --git a/vendor/github.com/hashicorp/serf/website/source/LICENSE b/vendor/github.com/hashicorp/serf/website/source/LICENSE deleted file mode 100644 index 36c29d7..0000000 --- a/vendor/github.com/hashicorp/serf/website/source/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ -# Proprietary License - -This license is temporary while a more official one is drafted. However, -this should make it clear: - -* The text contents of this website are MPL 2.0 licensed. - -* The design contents of this website are proprietary and may not be reproduced - or reused in any way other than to run the Serf website locally. The license - for the design is owned solely by HashiCorp, Inc. diff --git a/vendor/github.com/jinzhu/inflection/LICENSE b/vendor/github.com/jinzhu/inflection/LICENSE deleted file mode 100644 index a1ca9a0..0000000 --- a/vendor/github.com/jinzhu/inflection/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 - Jinzhu - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/jinzhu/inflection/README.md b/vendor/github.com/jinzhu/inflection/README.md deleted file mode 100644 index a3de336..0000000 --- a/vendor/github.com/jinzhu/inflection/README.md +++ /dev/null @@ -1,55 +0,0 @@ -# Inflection - -Inflection pluralizes and singularizes English nouns - -[![wercker status](https://app.wercker.com/status/f8c7432b097d1f4ce636879670be0930/s/master "wercker status")](https://app.wercker.com/project/byKey/f8c7432b097d1f4ce636879670be0930) - -## Basic Usage - -```go -inflection.Plural("person") => "people" -inflection.Plural("Person") => "People" -inflection.Plural("PERSON") => "PEOPLE" -inflection.Plural("bus") => "buses" -inflection.Plural("BUS") => "BUSES" -inflection.Plural("Bus") => "Buses" - -inflection.Singular("people") => "person" -inflection.Singular("People") => "Person" -inflection.Singular("PEOPLE") => "PERSON" -inflection.Singular("buses") => "bus" -inflection.Singular("BUSES") => "BUS" -inflection.Singular("Buses") => "Bus" - -inflection.Plural("FancyPerson") => "FancyPeople" -inflection.Singular("FancyPeople") => "FancyPerson" -``` - -## Register Rules - -Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb) - -If you want to register more rules, follow: - -``` -inflection.AddUncountable("fish") -inflection.AddIrregular("person", "people") -inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses" -inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS" -``` - -## Contributing - -You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do. - -## Author - -**jinzhu** - -* -* -* - -## License - -Released under the [MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/jinzhu/inflection/inflections.go b/vendor/github.com/jinzhu/inflection/inflections.go deleted file mode 100644 index 606263b..0000000 --- a/vendor/github.com/jinzhu/inflection/inflections.go +++ /dev/null @@ -1,273 +0,0 @@ -/* -Package inflection pluralizes and singularizes English nouns. - - inflection.Plural("person") => "people" - inflection.Plural("Person") => "People" - inflection.Plural("PERSON") => "PEOPLE" - - inflection.Singular("people") => "person" - inflection.Singular("People") => "Person" - inflection.Singular("PEOPLE") => "PERSON" - - inflection.Plural("FancyPerson") => "FancydPeople" - inflection.Singular("FancyPeople") => "FancydPerson" - -Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb) - -If you want to register more rules, follow: - - inflection.AddUncountable("fish") - inflection.AddIrregular("person", "people") - inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses" - inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS" -*/ -package inflection - -import ( - "regexp" - "strings" -) - -type inflection struct { - regexp *regexp.Regexp - replace string -} - -// Regular is a regexp find replace inflection -type Regular struct { - find string - replace string -} - -// Irregular is a hard replace inflection, -// containing both singular and plural forms -type Irregular struct { - singular string - plural string -} - -// RegularSlice is a slice of Regular inflections -type RegularSlice []Regular - -// IrregularSlice is a slice of Irregular inflections -type IrregularSlice []Irregular - -var pluralInflections = RegularSlice{ - {"([a-z])$", "${1}s"}, - {"s$", "s"}, - {"^(ax|test)is$", "${1}es"}, - {"(octop|vir)us$", "${1}i"}, - {"(octop|vir)i$", "${1}i"}, - {"(alias|status)$", "${1}es"}, - {"(bu)s$", "${1}ses"}, - {"(buffal|tomat)o$", "${1}oes"}, - {"([ti])um$", "${1}a"}, - {"([ti])a$", "${1}a"}, - {"sis$", "ses"}, - {"(?:([^f])fe|([lr])f)$", "${1}${2}ves"}, - {"(hive)$", "${1}s"}, - {"([^aeiouy]|qu)y$", "${1}ies"}, - {"(x|ch|ss|sh)$", "${1}es"}, - {"(matr|vert|ind)(?:ix|ex)$", "${1}ices"}, - {"^(m|l)ouse$", "${1}ice"}, - {"^(m|l)ice$", "${1}ice"}, - {"^(ox)$", "${1}en"}, - {"^(oxen)$", "${1}"}, - {"(quiz)$", "${1}zes"}, -} - -var singularInflections = RegularSlice{ - {"s$", ""}, - {"(ss)$", "${1}"}, - {"(n)ews$", "${1}ews"}, - {"([ti])a$", "${1}um"}, - {"((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)(sis|ses)$", "${1}sis"}, - {"(^analy)(sis|ses)$", "${1}sis"}, - {"([^f])ves$", "${1}fe"}, - {"(hive)s$", "${1}"}, - {"(tive)s$", "${1}"}, - {"([lr])ves$", "${1}f"}, - {"([^aeiouy]|qu)ies$", "${1}y"}, - {"(s)eries$", "${1}eries"}, - {"(m)ovies$", "${1}ovie"}, - {"(c)ookies$", "${1}ookie"}, - {"(x|ch|ss|sh)es$", "${1}"}, - {"^(m|l)ice$", "${1}ouse"}, - {"(bus)(es)?$", "${1}"}, - {"(o)es$", "${1}"}, - {"(shoe)s$", "${1}"}, - {"(cris|test)(is|es)$", "${1}is"}, - {"^(a)x[ie]s$", "${1}xis"}, - {"(octop|vir)(us|i)$", "${1}us"}, - {"(alias|status)(es)?$", "${1}"}, - {"^(ox)en", "${1}"}, - {"(vert|ind)ices$", "${1}ex"}, - {"(matr)ices$", "${1}ix"}, - {"(quiz)zes$", "${1}"}, - {"(database)s$", "${1}"}, -} - -var irregularInflections = IrregularSlice{ - {"person", "people"}, - {"man", "men"}, - {"child", "children"}, - {"sex", "sexes"}, - {"move", "moves"}, - {"mombie", "mombies"}, -} - -var uncountableInflections = []string{"equipment", "information", "rice", "money", "species", "series", "fish", "sheep", "jeans", "police"} - -var compiledPluralMaps []inflection -var compiledSingularMaps []inflection - -func compile() { - compiledPluralMaps = []inflection{} - compiledSingularMaps = []inflection{} - for _, uncountable := range uncountableInflections { - inf := inflection{ - regexp: regexp.MustCompile("^(?i)(" + uncountable + ")$"), - replace: "${1}", - } - compiledPluralMaps = append(compiledPluralMaps, inf) - compiledSingularMaps = append(compiledSingularMaps, inf) - } - - for _, value := range irregularInflections { - infs := []inflection{ - inflection{regexp: regexp.MustCompile(strings.ToUpper(value.singular) + "$"), replace: strings.ToUpper(value.plural)}, - inflection{regexp: regexp.MustCompile(strings.Title(value.singular) + "$"), replace: strings.Title(value.plural)}, - inflection{regexp: regexp.MustCompile(value.singular + "$"), replace: value.plural}, - } - compiledPluralMaps = append(compiledPluralMaps, infs...) - } - - for _, value := range irregularInflections { - infs := []inflection{ - inflection{regexp: regexp.MustCompile(strings.ToUpper(value.plural) + "$"), replace: strings.ToUpper(value.singular)}, - inflection{regexp: regexp.MustCompile(strings.Title(value.plural) + "$"), replace: strings.Title(value.singular)}, - inflection{regexp: regexp.MustCompile(value.plural + "$"), replace: value.singular}, - } - compiledSingularMaps = append(compiledSingularMaps, infs...) - } - - for i := len(pluralInflections) - 1; i >= 0; i-- { - value := pluralInflections[i] - infs := []inflection{ - inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)}, - inflection{regexp: regexp.MustCompile(value.find), replace: value.replace}, - inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace}, - } - compiledPluralMaps = append(compiledPluralMaps, infs...) - } - - for i := len(singularInflections) - 1; i >= 0; i-- { - value := singularInflections[i] - infs := []inflection{ - inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)}, - inflection{regexp: regexp.MustCompile(value.find), replace: value.replace}, - inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace}, - } - compiledSingularMaps = append(compiledSingularMaps, infs...) - } -} - -func init() { - compile() -} - -// AddPlural adds a plural inflection -func AddPlural(find, replace string) { - pluralInflections = append(pluralInflections, Regular{find, replace}) - compile() -} - -// AddSingular adds a singular inflection -func AddSingular(find, replace string) { - singularInflections = append(singularInflections, Regular{find, replace}) - compile() -} - -// AddIrregular adds an irregular inflection -func AddIrregular(singular, plural string) { - irregularInflections = append(irregularInflections, Irregular{singular, plural}) - compile() -} - -// AddUncountable adds an uncountable inflection -func AddUncountable(values ...string) { - uncountableInflections = append(uncountableInflections, values...) - compile() -} - -// GetPlural retrieves the plural inflection values -func GetPlural() RegularSlice { - plurals := make(RegularSlice, len(pluralInflections)) - copy(plurals, pluralInflections) - return plurals -} - -// GetSingular retrieves the singular inflection values -func GetSingular() RegularSlice { - singulars := make(RegularSlice, len(singularInflections)) - copy(singulars, singularInflections) - return singulars -} - -// GetIrregular retrieves the irregular inflection values -func GetIrregular() IrregularSlice { - irregular := make(IrregularSlice, len(irregularInflections)) - copy(irregular, irregularInflections) - return irregular -} - -// GetUncountable retrieves the uncountable inflection values -func GetUncountable() []string { - uncountables := make([]string, len(uncountableInflections)) - copy(uncountables, uncountableInflections) - return uncountables -} - -// SetPlural sets the plural inflections slice -func SetPlural(inflections RegularSlice) { - pluralInflections = inflections - compile() -} - -// SetSingular sets the singular inflections slice -func SetSingular(inflections RegularSlice) { - singularInflections = inflections - compile() -} - -// SetIrregular sets the irregular inflections slice -func SetIrregular(inflections IrregularSlice) { - irregularInflections = inflections - compile() -} - -// SetUncountable sets the uncountable inflections slice -func SetUncountable(inflections []string) { - uncountableInflections = inflections - compile() -} - -// Plural converts a word to its plural form -func Plural(str string) string { - for _, inflection := range compiledPluralMaps { - if inflection.regexp.MatchString(str) { - return inflection.regexp.ReplaceAllString(str, inflection.replace) - } - } - return str -} - -// Singular converts a word to its singular form -func Singular(str string) string { - for _, inflection := range compiledSingularMaps { - if inflection.regexp.MatchString(str) { - return inflection.regexp.ReplaceAllString(str, inflection.replace) - } - } - return str -} diff --git a/vendor/github.com/jinzhu/inflection/wercker.yml b/vendor/github.com/jinzhu/inflection/wercker.yml deleted file mode 100644 index 5e6ce98..0000000 --- a/vendor/github.com/jinzhu/inflection/wercker.yml +++ /dev/null @@ -1,23 +0,0 @@ -box: golang - -build: - steps: - - setup-go-workspace - - # Gets the dependencies - - script: - name: go get - code: | - go get - - # Build the project - - script: - name: go build - code: | - go build ./... - - # Test the project - - script: - name: go test - code: | - go test ./... diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore deleted file mode 100644 index 531fcc1..0000000 --- a/vendor/github.com/jmespath/go-jmespath/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -jpgo -jmespath-fuzz.zip -cpu.out -go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml deleted file mode 100644 index 1f98077..0000000 --- a/vendor/github.com/jmespath/go-jmespath/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -sudo: false - -go: - - 1.4 - -install: go get -v -t ./... -script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE deleted file mode 100644 index b03310a..0000000 --- a/vendor/github.com/jmespath/go-jmespath/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2015 James Saryerwinnie - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile deleted file mode 100644 index a828d28..0000000 --- a/vendor/github.com/jmespath/go-jmespath/Makefile +++ /dev/null @@ -1,44 +0,0 @@ - -CMD = jpgo - -help: - @echo "Please use \`make ' where is one of" - @echo " test to run all the tests" - @echo " build to build the library and jp executable" - @echo " generate to run codegen" - - -generate: - go generate ./... - -build: - rm -f $(CMD) - go build ./... - rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... - mv cmd/$(CMD)/$(CMD) . - -test: - go test -v ./... - -check: - go vet ./... - @echo "golint ./..." - @lint=`golint ./...`; \ - lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ - echo "$$lint"; \ - if [ "$$lint" != "" ]; then exit 1; fi - -htmlc: - go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov - -buildfuzz: - go-fuzz-build github.com/jmespath/go-jmespath/fuzz - -fuzz: buildfuzz - go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata - -bench: - go test -bench . -cpuprofile cpu.out - -pprof-cpu: - go tool pprof ./go-jmespath.test ./cpu.out diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md deleted file mode 100644 index 187ef67..0000000 --- a/vendor/github.com/jmespath/go-jmespath/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# go-jmespath - A JMESPath implementation in Go - -[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) - - - -See http://jmespath.org for more info. diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go deleted file mode 100644 index 9cfa988..0000000 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ /dev/null @@ -1,49 +0,0 @@ -package jmespath - -import "strconv" - -// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is -// safe for concurrent use by multiple goroutines. -type JMESPath struct { - ast ASTNode - intr *treeInterpreter -} - -// Compile parses a JMESPath expression and returns, if successful, a JMESPath -// object that can be used to match against data. -func Compile(expression string) (*JMESPath, error) { - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - jmespath := &JMESPath{ast: ast, intr: newInterpreter()} - return jmespath, nil -} - -// MustCompile is like Compile but panics if the expression cannot be parsed. -// It simplifies safe initialization of global variables holding compiled -// JMESPaths. -func MustCompile(expression string) *JMESPath { - jmespath, err := Compile(expression) - if err != nil { - panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) - } - return jmespath -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func (jp *JMESPath) Search(data interface{}) (interface{}, error) { - return jp.intr.Execute(jp.ast, data) -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func Search(expression string, data interface{}) (interface{}, error) { - intr := newInterpreter() - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - return intr.Execute(ast, data) -} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go deleted file mode 100644 index 1cd2d23..0000000 --- a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type astNodeType; DO NOT EDIT - -package jmespath - -import "fmt" - -const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" - -var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} - -func (i astNodeType) String() string { - if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { - return fmt.Sprintf("astNodeType(%d)", i) - } - return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] -} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go deleted file mode 100644 index 9b7cd89..0000000 --- a/vendor/github.com/jmespath/go-jmespath/functions.go +++ /dev/null @@ -1,842 +0,0 @@ -package jmespath - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "unicode/utf8" -) - -type jpFunction func(arguments []interface{}) (interface{}, error) - -type jpType string - -const ( - jpUnknown jpType = "unknown" - jpNumber jpType = "number" - jpString jpType = "string" - jpArray jpType = "array" - jpObject jpType = "object" - jpArrayNumber jpType = "array[number]" - jpArrayString jpType = "array[string]" - jpExpref jpType = "expref" - jpAny jpType = "any" -) - -type functionEntry struct { - name string - arguments []argSpec - handler jpFunction - hasExpRef bool -} - -type argSpec struct { - types []jpType - variadic bool -} - -type byExprString struct { - intr *treeInterpreter - node ASTNode - items []interface{} - hasError bool -} - -func (a *byExprString) Len() int { - return len(a.items) -} -func (a *byExprString) Swap(i, j int) { - a.items[i], a.items[j] = a.items[j], a.items[i] -} -func (a *byExprString) Less(i, j int) bool { - first, err := a.intr.Execute(a.node, a.items[i]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - ith, ok := first.(string) - if !ok { - a.hasError = true - return true - } - second, err := a.intr.Execute(a.node, a.items[j]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - jth, ok := second.(string) - if !ok { - a.hasError = true - return true - } - return ith < jth -} - -type byExprFloat struct { - intr *treeInterpreter - node ASTNode - items []interface{} - hasError bool -} - -func (a *byExprFloat) Len() int { - return len(a.items) -} -func (a *byExprFloat) Swap(i, j int) { - a.items[i], a.items[j] = a.items[j], a.items[i] -} -func (a *byExprFloat) Less(i, j int) bool { - first, err := a.intr.Execute(a.node, a.items[i]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - ith, ok := first.(float64) - if !ok { - a.hasError = true - return true - } - second, err := a.intr.Execute(a.node, a.items[j]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - jth, ok := second.(float64) - if !ok { - a.hasError = true - return true - } - return ith < jth -} - -type functionCaller struct { - functionTable map[string]functionEntry -} - -func newFunctionCaller() *functionCaller { - caller := &functionCaller{} - caller.functionTable = map[string]functionEntry{ - "length": { - name: "length", - arguments: []argSpec{ - {types: []jpType{jpString, jpArray, jpObject}}, - }, - handler: jpfLength, - }, - "starts_with": { - name: "starts_with", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, - }, - handler: jpfStartsWith, - }, - "abs": { - name: "abs", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfAbs, - }, - "avg": { - name: "avg", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, - }, - handler: jpfAvg, - }, - "ceil": { - name: "ceil", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfCeil, - }, - "contains": { - name: "contains", - arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - {types: []jpType{jpAny}}, - }, - handler: jpfContains, - }, - "ends_with": { - name: "ends_with", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, - }, - handler: jpfEndsWith, - }, - "floor": { - name: "floor", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfFloor, - }, - "map": { - name: "amp", - arguments: []argSpec{ - {types: []jpType{jpExpref}}, - {types: []jpType{jpArray}}, - }, - handler: jpfMap, - hasExpRef: true, - }, - "max": { - name: "max", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, - }, - handler: jpfMax, - }, - "merge": { - name: "merge", - arguments: []argSpec{ - {types: []jpType{jpObject}, variadic: true}, - }, - handler: jpfMerge, - }, - "max_by": { - name: "max_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfMaxBy, - hasExpRef: true, - }, - "sum": { - name: "sum", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, - }, - handler: jpfSum, - }, - "min": { - name: "min", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, - }, - handler: jpfMin, - }, - "min_by": { - name: "min_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfMinBy, - hasExpRef: true, - }, - "type": { - name: "type", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfType, - }, - "keys": { - name: "keys", - arguments: []argSpec{ - {types: []jpType{jpObject}}, - }, - handler: jpfKeys, - }, - "values": { - name: "values", - arguments: []argSpec{ - {types: []jpType{jpObject}}, - }, - handler: jpfValues, - }, - "sort": { - name: "sort", - arguments: []argSpec{ - {types: []jpType{jpArrayString, jpArrayNumber}}, - }, - handler: jpfSort, - }, - "sort_by": { - name: "sort_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfSortBy, - hasExpRef: true, - }, - "join": { - name: "join", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpArrayString}}, - }, - handler: jpfJoin, - }, - "reverse": { - name: "reverse", - arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - }, - handler: jpfReverse, - }, - "to_array": { - name: "to_array", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToArray, - }, - "to_string": { - name: "to_string", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToString, - }, - "to_number": { - name: "to_number", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToNumber, - }, - "not_null": { - name: "not_null", - arguments: []argSpec{ - {types: []jpType{jpAny}, variadic: true}, - }, - handler: jpfNotNull, - }, - } - return caller -} - -func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { - if len(e.arguments) == 0 { - return arguments, nil - } - if !e.arguments[len(e.arguments)-1].variadic { - if len(e.arguments) != len(arguments) { - return nil, errors.New("incorrect number of args") - } - for i, spec := range e.arguments { - userArg := arguments[i] - err := spec.typeCheck(userArg) - if err != nil { - return nil, err - } - } - return arguments, nil - } - if len(arguments) < len(e.arguments) { - return nil, errors.New("Invalid arity.") - } - return arguments, nil -} - -func (a *argSpec) typeCheck(arg interface{}) error { - for _, t := range a.types { - switch t { - case jpNumber: - if _, ok := arg.(float64); ok { - return nil - } - case jpString: - if _, ok := arg.(string); ok { - return nil - } - case jpArray: - if isSliceType(arg) { - return nil - } - case jpObject: - if _, ok := arg.(map[string]interface{}); ok { - return nil - } - case jpArrayNumber: - if _, ok := toArrayNum(arg); ok { - return nil - } - case jpArrayString: - if _, ok := toArrayStr(arg); ok { - return nil - } - case jpAny: - return nil - case jpExpref: - if _, ok := arg.(expRef); ok { - return nil - } - } - } - return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) -} - -func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { - entry, ok := f.functionTable[name] - if !ok { - return nil, errors.New("unknown function: " + name) - } - resolvedArgs, err := entry.resolveArgs(arguments) - if err != nil { - return nil, err - } - if entry.hasExpRef { - var extra []interface{} - extra = append(extra, intr) - resolvedArgs = append(extra, resolvedArgs...) - } - return entry.handler(resolvedArgs) -} - -func jpfAbs(arguments []interface{}) (interface{}, error) { - num := arguments[0].(float64) - return math.Abs(num), nil -} - -func jpfLength(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if c, ok := arg.(string); ok { - return float64(utf8.RuneCountInString(c)), nil - } else if isSliceType(arg) { - v := reflect.ValueOf(arg) - return float64(v.Len()), nil - } else if c, ok := arg.(map[string]interface{}); ok { - return float64(len(c)), nil - } - return nil, errors.New("could not compute length()") -} - -func jpfStartsWith(arguments []interface{}) (interface{}, error) { - search := arguments[0].(string) - prefix := arguments[1].(string) - return strings.HasPrefix(search, prefix), nil -} - -func jpfAvg(arguments []interface{}) (interface{}, error) { - // We've already type checked the value so we can safely use - // type assertions. - args := arguments[0].([]interface{}) - length := float64(len(args)) - numerator := 0.0 - for _, n := range args { - numerator += n.(float64) - } - return numerator / length, nil -} -func jpfCeil(arguments []interface{}) (interface{}, error) { - val := arguments[0].(float64) - return math.Ceil(val), nil -} -func jpfContains(arguments []interface{}) (interface{}, error) { - search := arguments[0] - el := arguments[1] - if searchStr, ok := search.(string); ok { - if elStr, ok := el.(string); ok { - return strings.Index(searchStr, elStr) != -1, nil - } - return false, nil - } - // Otherwise this is a generic contains for []interface{} - general := search.([]interface{}) - for _, item := range general { - if item == el { - return true, nil - } - } - return false, nil -} -func jpfEndsWith(arguments []interface{}) (interface{}, error) { - search := arguments[0].(string) - suffix := arguments[1].(string) - return strings.HasSuffix(search, suffix), nil -} -func jpfFloor(arguments []interface{}) (interface{}, error) { - val := arguments[0].(float64) - return math.Floor(val), nil -} -func jpfMap(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - exp := arguments[1].(expRef) - node := exp.ref - arr := arguments[2].([]interface{}) - mapped := make([]interface{}, 0, len(arr)) - for _, value := range arr { - current, err := intr.Execute(node, value) - if err != nil { - return nil, err - } - mapped = append(mapped, current) - } - return mapped, nil -} -func jpfMax(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item > best { - best = item - } - } - return best, nil - } - // Otherwise we're dealing with a max() of strings. - items, _ := toArrayStr(arguments[0]) - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item > best { - best = item - } - } - return best, nil -} -func jpfMerge(arguments []interface{}) (interface{}, error) { - final := make(map[string]interface{}) - for _, m := range arguments { - mapped := m.(map[string]interface{}) - for key, value := range mapped { - final[key] = value - } - } - return final, nil -} -func jpfMaxBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return nil, nil - } else if len(arr) == 1 { - return arr[0], nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - switch t := start.(type) { - case float64: - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(float64) - if !ok { - return nil, errors.New("invalid type, must be number") - } - if current > bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - case string: - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(string) - if !ok { - return nil, errors.New("invalid type, must be string") - } - if current > bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - default: - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfSum(arguments []interface{}) (interface{}, error) { - items, _ := toArrayNum(arguments[0]) - sum := 0.0 - for _, item := range items { - sum += item - } - return sum, nil -} - -func jpfMin(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item < best { - best = item - } - } - return best, nil - } - items, _ := toArrayStr(arguments[0]) - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item < best { - best = item - } - } - return best, nil -} - -func jpfMinBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return nil, nil - } else if len(arr) == 1 { - return arr[0], nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - if t, ok := start.(float64); ok { - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(float64) - if !ok { - return nil, errors.New("invalid type, must be number") - } - if current < bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - } else if t, ok := start.(string); ok { - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(string) - if !ok { - return nil, errors.New("invalid type, must be string") - } - if current < bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - } else { - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfType(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if _, ok := arg.(float64); ok { - return "number", nil - } - if _, ok := arg.(string); ok { - return "string", nil - } - if _, ok := arg.([]interface{}); ok { - return "array", nil - } - if _, ok := arg.(map[string]interface{}); ok { - return "object", nil - } - if arg == nil { - return "null", nil - } - if arg == true || arg == false { - return "boolean", nil - } - return nil, errors.New("unknown type") -} -func jpfKeys(arguments []interface{}) (interface{}, error) { - arg := arguments[0].(map[string]interface{}) - collected := make([]interface{}, 0, len(arg)) - for key := range arg { - collected = append(collected, key) - } - return collected, nil -} -func jpfValues(arguments []interface{}) (interface{}, error) { - arg := arguments[0].(map[string]interface{}) - collected := make([]interface{}, 0, len(arg)) - for _, value := range arg { - collected = append(collected, value) - } - return collected, nil -} -func jpfSort(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - d := sort.Float64Slice(items) - sort.Stable(d) - final := make([]interface{}, len(d)) - for i, val := range d { - final[i] = val - } - return final, nil - } - // Otherwise we're dealing with sort()'ing strings. - items, _ := toArrayStr(arguments[0]) - d := sort.StringSlice(items) - sort.Stable(d) - final := make([]interface{}, len(d)) - for i, val := range d { - final[i] = val - } - return final, nil -} -func jpfSortBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return arr, nil - } else if len(arr) == 1 { - return arr, nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - if _, ok := start.(float64); ok { - sortable := &byExprFloat{intr, node, arr, false} - sort.Stable(sortable) - if sortable.hasError { - return nil, errors.New("error in sort_by comparison") - } - return arr, nil - } else if _, ok := start.(string); ok { - sortable := &byExprString{intr, node, arr, false} - sort.Stable(sortable) - if sortable.hasError { - return nil, errors.New("error in sort_by comparison") - } - return arr, nil - } else { - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfJoin(arguments []interface{}) (interface{}, error) { - sep := arguments[0].(string) - // We can't just do arguments[1].([]string), we have to - // manually convert each item to a string. - arrayStr := []string{} - for _, item := range arguments[1].([]interface{}) { - arrayStr = append(arrayStr, item.(string)) - } - return strings.Join(arrayStr, sep), nil -} -func jpfReverse(arguments []interface{}) (interface{}, error) { - if s, ok := arguments[0].(string); ok { - r := []rune(s) - for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { - r[i], r[j] = r[j], r[i] - } - return string(r), nil - } - items := arguments[0].([]interface{}) - length := len(items) - reversed := make([]interface{}, length) - for i, item := range items { - reversed[length-(i+1)] = item - } - return reversed, nil -} -func jpfToArray(arguments []interface{}) (interface{}, error) { - if _, ok := arguments[0].([]interface{}); ok { - return arguments[0], nil - } - return arguments[:1:1], nil -} -func jpfToString(arguments []interface{}) (interface{}, error) { - if v, ok := arguments[0].(string); ok { - return v, nil - } - result, err := json.Marshal(arguments[0]) - if err != nil { - return nil, err - } - return string(result), nil -} -func jpfToNumber(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if v, ok := arg.(float64); ok { - return v, nil - } - if v, ok := arg.(string); ok { - conv, err := strconv.ParseFloat(v, 64) - if err != nil { - return nil, nil - } - return conv, nil - } - if _, ok := arg.([]interface{}); ok { - return nil, nil - } - if _, ok := arg.(map[string]interface{}); ok { - return nil, nil - } - if arg == nil { - return nil, nil - } - if arg == true || arg == false { - return nil, nil - } - return nil, errors.New("unknown type") -} -func jpfNotNull(arguments []interface{}) (interface{}, error) { - for _, arg := range arguments { - if arg != nil { - return arg, nil - } - } - return nil, nil -} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go deleted file mode 100644 index 13c7460..0000000 --- a/vendor/github.com/jmespath/go-jmespath/interpreter.go +++ /dev/null @@ -1,418 +0,0 @@ -package jmespath - -import ( - "errors" - "reflect" - "unicode" - "unicode/utf8" -) - -/* This is a tree based interpreter. It walks the AST and directly - interprets the AST to search through a JSON document. -*/ - -type treeInterpreter struct { - fCall *functionCaller -} - -func newInterpreter() *treeInterpreter { - interpreter := treeInterpreter{} - interpreter.fCall = newFunctionCaller() - return &interpreter -} - -type expRef struct { - ref ASTNode -} - -// Execute takes an ASTNode and input data and interprets the AST directly. -// It will produce the result of applying the JMESPath expression associated -// with the ASTNode to the input data "value". -func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { - switch node.nodeType { - case ASTComparator: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - right, err := intr.Execute(node.children[1], value) - if err != nil { - return nil, err - } - switch node.value { - case tEQ: - return objsEqual(left, right), nil - case tNE: - return !objsEqual(left, right), nil - } - leftNum, ok := left.(float64) - if !ok { - return nil, nil - } - rightNum, ok := right.(float64) - if !ok { - return nil, nil - } - switch node.value { - case tGT: - return leftNum > rightNum, nil - case tGTE: - return leftNum >= rightNum, nil - case tLT: - return leftNum < rightNum, nil - case tLTE: - return leftNum <= rightNum, nil - } - case ASTExpRef: - return expRef{ref: node.children[0]}, nil - case ASTFunctionExpression: - resolvedArgs := []interface{}{} - for _, arg := range node.children { - current, err := intr.Execute(arg, value) - if err != nil { - return nil, err - } - resolvedArgs = append(resolvedArgs, current) - } - return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) - case ASTField: - if m, ok := value.(map[string]interface{}); ok { - key := node.value.(string) - return m[key], nil - } - return intr.fieldFromStruct(node.value.(string), value) - case ASTFilterProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - sliceType, ok := left.([]interface{}) - if !ok { - if isSliceType(left) { - return intr.filterProjectionWithReflection(node, left) - } - return nil, nil - } - compareNode := node.children[2] - collected := []interface{}{} - for _, element := range sliceType { - result, err := intr.Execute(compareNode, element) - if err != nil { - return nil, err - } - if !isFalse(result) { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - } - return collected, nil - case ASTFlatten: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - sliceType, ok := left.([]interface{}) - if !ok { - // If we can't type convert to []interface{}, there's - // a chance this could still work via reflection if we're - // dealing with user provided types. - if isSliceType(left) { - return intr.flattenWithReflection(left) - } - return nil, nil - } - flattened := []interface{}{} - for _, element := range sliceType { - if elementSlice, ok := element.([]interface{}); ok { - flattened = append(flattened, elementSlice...) - } else if isSliceType(element) { - reflectFlat := []interface{}{} - v := reflect.ValueOf(element) - for i := 0; i < v.Len(); i++ { - reflectFlat = append(reflectFlat, v.Index(i).Interface()) - } - flattened = append(flattened, reflectFlat...) - } else { - flattened = append(flattened, element) - } - } - return flattened, nil - case ASTIdentity, ASTCurrentNode: - return value, nil - case ASTIndex: - if sliceType, ok := value.([]interface{}); ok { - index := node.value.(int) - if index < 0 { - index += len(sliceType) - } - if index < len(sliceType) && index >= 0 { - return sliceType[index], nil - } - return nil, nil - } - // Otherwise try via reflection. - rv := reflect.ValueOf(value) - if rv.Kind() == reflect.Slice { - index := node.value.(int) - if index < 0 { - index += rv.Len() - } - if index < rv.Len() && index >= 0 { - v := rv.Index(index) - return v.Interface(), nil - } - } - return nil, nil - case ASTKeyValPair: - return intr.Execute(node.children[0], value) - case ASTLiteral: - return node.value, nil - case ASTMultiSelectHash: - if value == nil { - return nil, nil - } - collected := make(map[string]interface{}) - for _, child := range node.children { - current, err := intr.Execute(child, value) - if err != nil { - return nil, err - } - key := child.value.(string) - collected[key] = current - } - return collected, nil - case ASTMultiSelectList: - if value == nil { - return nil, nil - } - collected := []interface{}{} - for _, child := range node.children { - current, err := intr.Execute(child, value) - if err != nil { - return nil, err - } - collected = append(collected, current) - } - return collected, nil - case ASTOrExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - matched, err = intr.Execute(node.children[1], value) - if err != nil { - return nil, err - } - } - return matched, nil - case ASTAndExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - return matched, nil - } - return intr.Execute(node.children[1], value) - case ASTNotExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - return true, nil - } - return false, nil - case ASTPipe: - result := value - var err error - for _, child := range node.children { - result, err = intr.Execute(child, result) - if err != nil { - return nil, err - } - } - return result, nil - case ASTProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - sliceType, ok := left.([]interface{}) - if !ok { - if isSliceType(left) { - return intr.projectWithReflection(node, left) - } - return nil, nil - } - collected := []interface{}{} - var current interface{} - for _, element := range sliceType { - current, err = intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - return collected, nil - case ASTSubexpression, ASTIndexExpression: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - return intr.Execute(node.children[1], left) - case ASTSlice: - sliceType, ok := value.([]interface{}) - if !ok { - if isSliceType(value) { - return intr.sliceWithReflection(node, value) - } - return nil, nil - } - parts := node.value.([]*int) - sliceParams := make([]sliceParam, 3) - for i, part := range parts { - if part != nil { - sliceParams[i].Specified = true - sliceParams[i].N = *part - } - } - return slice(sliceType, sliceParams) - case ASTValueProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - mapType, ok := left.(map[string]interface{}) - if !ok { - return nil, nil - } - values := make([]interface{}, len(mapType)) - for _, value := range mapType { - values = append(values, value) - } - collected := []interface{}{} - for _, element := range values { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - return collected, nil - } - return nil, errors.New("Unknown AST node: " + node.nodeType.String()) -} - -func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { - rv := reflect.ValueOf(value) - first, n := utf8.DecodeRuneInString(key) - fieldName := string(unicode.ToUpper(first)) + key[n:] - if rv.Kind() == reflect.Struct { - v := rv.FieldByName(fieldName) - if !v.IsValid() { - return nil, nil - } - return v.Interface(), nil - } else if rv.Kind() == reflect.Ptr { - // Handle multiple levels of indirection? - if rv.IsNil() { - return nil, nil - } - rv = rv.Elem() - v := rv.FieldByName(fieldName) - if !v.IsValid() { - return nil, nil - } - return v.Interface(), nil - } - return nil, nil -} - -func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { - v := reflect.ValueOf(value) - flattened := []interface{}{} - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - if reflect.TypeOf(element).Kind() == reflect.Slice { - // Then insert the contents of the element - // slice into the flattened slice, - // i.e flattened = append(flattened, mySlice...) - elementV := reflect.ValueOf(element) - for j := 0; j < elementV.Len(); j++ { - flattened = append( - flattened, elementV.Index(j).Interface()) - } - } else { - flattened = append(flattened, element) - } - } - return flattened, nil -} - -func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { - v := reflect.ValueOf(value) - parts := node.value.([]*int) - sliceParams := make([]sliceParam, 3) - for i, part := range parts { - if part != nil { - sliceParams[i].Specified = true - sliceParams[i].N = *part - } - } - final := []interface{}{} - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - final = append(final, element) - } - return slice(final, sliceParams) -} - -func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { - compareNode := node.children[2] - collected := []interface{}{} - v := reflect.ValueOf(value) - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - result, err := intr.Execute(compareNode, element) - if err != nil { - return nil, err - } - if !isFalse(result) { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - } - return collected, nil -} - -func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { - collected := []interface{}{} - v := reflect.ValueOf(value) - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - result, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if result != nil { - collected = append(collected, result) - } - } - return collected, nil -} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go deleted file mode 100644 index 817900c..0000000 --- a/vendor/github.com/jmespath/go-jmespath/lexer.go +++ /dev/null @@ -1,420 +0,0 @@ -package jmespath - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - "unicode/utf8" -) - -type token struct { - tokenType tokType - value string - position int - length int -} - -type tokType int - -const eof = -1 - -// Lexer contains information about the expression being tokenized. -type Lexer struct { - expression string // The expression provided by the user. - currentPos int // The current position in the string. - lastWidth int // The width of the current rune. This - buf bytes.Buffer // Internal buffer used for building up values. -} - -// SyntaxError is the main error used whenever a lexing or parsing error occurs. -type SyntaxError struct { - msg string // Error message displayed to user - Expression string // Expression that generated a SyntaxError - Offset int // The location in the string where the error occurred -} - -func (e SyntaxError) Error() string { - // In the future, it would be good to underline the specific - // location where the error occurred. - return "SyntaxError: " + e.msg -} - -// HighlightLocation will show where the syntax error occurred. -// It will place a "^" character on a line below the expression -// at the point where the syntax error occurred. -func (e SyntaxError) HighlightLocation() string { - return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" -} - -//go:generate stringer -type=tokType -const ( - tUnknown tokType = iota - tStar - tDot - tFilter - tFlatten - tLparen - tRparen - tLbracket - tRbracket - tLbrace - tRbrace - tOr - tPipe - tNumber - tUnquotedIdentifier - tQuotedIdentifier - tComma - tColon - tLT - tLTE - tGT - tGTE - tEQ - tNE - tJSONLiteral - tStringLiteral - tCurrent - tExpref - tAnd - tNot - tEOF -) - -var basicTokens = map[rune]tokType{ - '.': tDot, - '*': tStar, - ',': tComma, - ':': tColon, - '{': tLbrace, - '}': tRbrace, - ']': tRbracket, // tLbracket not included because it could be "[]" - '(': tLparen, - ')': tRparen, - '@': tCurrent, -} - -// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. -// When using this bitmask just be sure to shift the rune down 64 bits -// before checking against identifierStartBits. -const identifierStartBits uint64 = 576460745995190270 - -// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. -var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} - -var whiteSpace = map[rune]bool{ - ' ': true, '\t': true, '\n': true, '\r': true, -} - -func (t token) String() string { - return fmt.Sprintf("Token{%+v, %s, %d, %d}", - t.tokenType, t.value, t.position, t.length) -} - -// NewLexer creates a new JMESPath lexer. -func NewLexer() *Lexer { - lexer := Lexer{} - return &lexer -} - -func (lexer *Lexer) next() rune { - if lexer.currentPos >= len(lexer.expression) { - lexer.lastWidth = 0 - return eof - } - r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) - lexer.lastWidth = w - lexer.currentPos += w - return r -} - -func (lexer *Lexer) back() { - lexer.currentPos -= lexer.lastWidth -} - -func (lexer *Lexer) peek() rune { - t := lexer.next() - lexer.back() - return t -} - -// tokenize takes an expression and returns corresponding tokens. -func (lexer *Lexer) tokenize(expression string) ([]token, error) { - var tokens []token - lexer.expression = expression - lexer.currentPos = 0 - lexer.lastWidth = 0 -loop: - for { - r := lexer.next() - if identifierStartBits&(1<<(uint64(r)-64)) > 0 { - t := lexer.consumeUnquotedIdentifier() - tokens = append(tokens, t) - } else if val, ok := basicTokens[r]; ok { - // Basic single char token. - t := token{ - tokenType: val, - value: string(r), - position: lexer.currentPos - lexer.lastWidth, - length: 1, - } - tokens = append(tokens, t) - } else if r == '-' || (r >= '0' && r <= '9') { - t := lexer.consumeNumber() - tokens = append(tokens, t) - } else if r == '[' { - t := lexer.consumeLBracket() - tokens = append(tokens, t) - } else if r == '"' { - t, err := lexer.consumeQuotedIdentifier() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '\'' { - t, err := lexer.consumeRawStringLiteral() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '`' { - t, err := lexer.consumeLiteral() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '|' { - t := lexer.matchOrElse(r, '|', tOr, tPipe) - tokens = append(tokens, t) - } else if r == '<' { - t := lexer.matchOrElse(r, '=', tLTE, tLT) - tokens = append(tokens, t) - } else if r == '>' { - t := lexer.matchOrElse(r, '=', tGTE, tGT) - tokens = append(tokens, t) - } else if r == '!' { - t := lexer.matchOrElse(r, '=', tNE, tNot) - tokens = append(tokens, t) - } else if r == '=' { - t := lexer.matchOrElse(r, '=', tEQ, tUnknown) - tokens = append(tokens, t) - } else if r == '&' { - t := lexer.matchOrElse(r, '&', tAnd, tExpref) - tokens = append(tokens, t) - } else if r == eof { - break loop - } else if _, ok := whiteSpace[r]; ok { - // Ignore whitespace - } else { - return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) - } - } - tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) - return tokens, nil -} - -// Consume characters until the ending rune "r" is reached. -// If the end of the expression is reached before seeing the -// terminating rune "r", then an error is returned. -// If no error occurs then the matching substring is returned. -// The returned string will not include the ending rune. -func (lexer *Lexer) consumeUntil(end rune) (string, error) { - start := lexer.currentPos - current := lexer.next() - for current != end && current != eof { - if current == '\\' && lexer.peek() != eof { - lexer.next() - } - current = lexer.next() - } - if lexer.lastWidth == 0 { - // Then we hit an EOF so we never reached the closing - // delimiter. - return "", SyntaxError{ - msg: "Unclosed delimiter: " + string(end), - Expression: lexer.expression, - Offset: len(lexer.expression), - } - } - return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil -} - -func (lexer *Lexer) consumeLiteral() (token, error) { - start := lexer.currentPos - value, err := lexer.consumeUntil('`') - if err != nil { - return token{}, err - } - value = strings.Replace(value, "\\`", "`", -1) - return token{ - tokenType: tJSONLiteral, - value: value, - position: start, - length: len(value), - }, nil -} - -func (lexer *Lexer) consumeRawStringLiteral() (token, error) { - start := lexer.currentPos - currentIndex := start - current := lexer.next() - for current != '\'' && lexer.peek() != eof { - if current == '\\' && lexer.peek() == '\'' { - chunk := lexer.expression[currentIndex : lexer.currentPos-1] - lexer.buf.WriteString(chunk) - lexer.buf.WriteString("'") - lexer.next() - currentIndex = lexer.currentPos - } - current = lexer.next() - } - if lexer.lastWidth == 0 { - // Then we hit an EOF so we never reached the closing - // delimiter. - return token{}, SyntaxError{ - msg: "Unclosed delimiter: '", - Expression: lexer.expression, - Offset: len(lexer.expression), - } - } - if currentIndex < lexer.currentPos { - lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) - } - value := lexer.buf.String() - // Reset the buffer so it can reused again. - lexer.buf.Reset() - return token{ - tokenType: tStringLiteral, - value: value, - position: start, - length: len(value), - }, nil -} - -func (lexer *Lexer) syntaxError(msg string) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: lexer.expression, - Offset: lexer.currentPos - 1, - } -} - -// Checks for a two char token, otherwise matches a single character -// token. This is used whenever a two char token overlaps a single -// char token, e.g. "||" -> tPipe, "|" -> tOr. -func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { - start := lexer.currentPos - lexer.lastWidth - nextRune := lexer.next() - var t token - if nextRune == second { - t = token{ - tokenType: matchedType, - value: string(first) + string(second), - position: start, - length: 2, - } - } else { - lexer.back() - t = token{ - tokenType: singleCharType, - value: string(first), - position: start, - length: 1, - } - } - return t -} - -func (lexer *Lexer) consumeLBracket() token { - // There's three options here: - // 1. A filter expression "[?" - // 2. A flatten operator "[]" - // 3. A bare rbracket "[" - start := lexer.currentPos - lexer.lastWidth - nextRune := lexer.next() - var t token - if nextRune == '?' { - t = token{ - tokenType: tFilter, - value: "[?", - position: start, - length: 2, - } - } else if nextRune == ']' { - t = token{ - tokenType: tFlatten, - value: "[]", - position: start, - length: 2, - } - } else { - t = token{ - tokenType: tLbracket, - value: "[", - position: start, - length: 1, - } - lexer.back() - } - return t -} - -func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { - start := lexer.currentPos - value, err := lexer.consumeUntil('"') - if err != nil { - return token{}, err - } - var decoded string - asJSON := []byte("\"" + value + "\"") - if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { - return token{}, err - } - return token{ - tokenType: tQuotedIdentifier, - value: decoded, - position: start - 1, - length: len(decoded), - }, nil -} - -func (lexer *Lexer) consumeUnquotedIdentifier() token { - // Consume runes until we reach the end of an unquoted - // identifier. - start := lexer.currentPos - lexer.lastWidth - for { - r := lexer.next() - if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { - lexer.back() - break - } - } - value := lexer.expression[start:lexer.currentPos] - return token{ - tokenType: tUnquotedIdentifier, - value: value, - position: start, - length: lexer.currentPos - start, - } -} - -func (lexer *Lexer) consumeNumber() token { - // Consume runes until we reach something that's not a number. - start := lexer.currentPos - lexer.lastWidth - for { - r := lexer.next() - if r < '0' || r > '9' { - lexer.back() - break - } - } - value := lexer.expression[start:lexer.currentPos] - return token{ - tokenType: tNumber, - value: value, - position: start, - length: lexer.currentPos - start, - } -} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go deleted file mode 100644 index 1240a17..0000000 --- a/vendor/github.com/jmespath/go-jmespath/parser.go +++ /dev/null @@ -1,603 +0,0 @@ -package jmespath - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" -) - -type astNodeType int - -//go:generate stringer -type astNodeType -const ( - ASTEmpty astNodeType = iota - ASTComparator - ASTCurrentNode - ASTExpRef - ASTFunctionExpression - ASTField - ASTFilterProjection - ASTFlatten - ASTIdentity - ASTIndex - ASTIndexExpression - ASTKeyValPair - ASTLiteral - ASTMultiSelectHash - ASTMultiSelectList - ASTOrExpression - ASTAndExpression - ASTNotExpression - ASTPipe - ASTProjection - ASTSubexpression - ASTSlice - ASTValueProjection -) - -// ASTNode represents the abstract syntax tree of a JMESPath expression. -type ASTNode struct { - nodeType astNodeType - value interface{} - children []ASTNode -} - -func (node ASTNode) String() string { - return node.PrettyPrint(0) -} - -// PrettyPrint will pretty print the parsed AST. -// The AST is an implementation detail and this pretty print -// function is provided as a convenience method to help with -// debugging. You should not rely on its output as the internal -// structure of the AST may change at any time. -func (node ASTNode) PrettyPrint(indent int) string { - spaces := strings.Repeat(" ", indent) - output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) - nextIndent := indent + 2 - if node.value != nil { - if converted, ok := node.value.(fmt.Stringer); ok { - // Account for things like comparator nodes - // that are enums with a String() method. - output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) - } else { - output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) - } - } - lastIndex := len(node.children) - if lastIndex > 0 { - output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) - childIndent := nextIndent + 2 - for _, elem := range node.children { - output += elem.PrettyPrint(childIndent) - } - } - output += fmt.Sprintf("%s}\n", spaces) - return output -} - -var bindingPowers = map[tokType]int{ - tEOF: 0, - tUnquotedIdentifier: 0, - tQuotedIdentifier: 0, - tRbracket: 0, - tRparen: 0, - tComma: 0, - tRbrace: 0, - tNumber: 0, - tCurrent: 0, - tExpref: 0, - tColon: 0, - tPipe: 1, - tOr: 2, - tAnd: 3, - tEQ: 5, - tLT: 5, - tLTE: 5, - tGT: 5, - tGTE: 5, - tNE: 5, - tFlatten: 9, - tStar: 20, - tFilter: 21, - tDot: 40, - tNot: 45, - tLbrace: 50, - tLbracket: 55, - tLparen: 60, -} - -// Parser holds state about the current expression being parsed. -type Parser struct { - expression string - tokens []token - index int -} - -// NewParser creates a new JMESPath parser. -func NewParser() *Parser { - p := Parser{} - return &p -} - -// Parse will compile a JMESPath expression. -func (p *Parser) Parse(expression string) (ASTNode, error) { - lexer := NewLexer() - p.expression = expression - p.index = 0 - tokens, err := lexer.tokenize(expression) - if err != nil { - return ASTNode{}, err - } - p.tokens = tokens - parsed, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if p.current() != tEOF { - return ASTNode{}, p.syntaxError(fmt.Sprintf( - "Unexpected token at the end of the expresssion: %s", p.current())) - } - return parsed, nil -} - -func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { - var err error - leftToken := p.lookaheadToken(0) - p.advance() - leftNode, err := p.nud(leftToken) - if err != nil { - return ASTNode{}, err - } - currentToken := p.current() - for bindingPower < bindingPowers[currentToken] { - p.advance() - leftNode, err = p.led(currentToken, leftNode) - if err != nil { - return ASTNode{}, err - } - currentToken = p.current() - } - return leftNode, nil -} - -func (p *Parser) parseIndexExpression() (ASTNode, error) { - if p.lookahead(0) == tColon || p.lookahead(1) == tColon { - return p.parseSliceExpression() - } - indexStr := p.lookaheadToken(0).value - parsedInt, err := strconv.Atoi(indexStr) - if err != nil { - return ASTNode{}, err - } - indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} - p.advance() - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - return indexNode, nil -} - -func (p *Parser) parseSliceExpression() (ASTNode, error) { - parts := []*int{nil, nil, nil} - index := 0 - current := p.current() - for current != tRbracket && index < 3 { - if current == tColon { - index++ - p.advance() - } else if current == tNumber { - parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) - if err != nil { - return ASTNode{}, err - } - parts[index] = &parsedInt - p.advance() - } else { - return ASTNode{}, p.syntaxError( - "Expected tColon or tNumber" + ", received: " + p.current().String()) - } - current = p.current() - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTSlice, - value: parts, - }, nil -} - -func (p *Parser) match(tokenType tokType) error { - if p.current() == tokenType { - p.advance() - return nil - } - return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) -} - -func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { - switch tokenType { - case tDot: - if p.current() != tStar { - right, err := p.parseDotRHS(bindingPowers[tDot]) - return ASTNode{ - nodeType: ASTSubexpression, - children: []ASTNode{node, right}, - }, err - } - p.advance() - right, err := p.parseProjectionRHS(bindingPowers[tDot]) - return ASTNode{ - nodeType: ASTValueProjection, - children: []ASTNode{node, right}, - }, err - case tPipe: - right, err := p.parseExpression(bindingPowers[tPipe]) - return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err - case tOr: - right, err := p.parseExpression(bindingPowers[tOr]) - return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err - case tAnd: - right, err := p.parseExpression(bindingPowers[tAnd]) - return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err - case tLparen: - name := node.value - var args []ASTNode - for p.current() != tRparen { - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if p.current() == tComma { - if err := p.match(tComma); err != nil { - return ASTNode{}, err - } - } - args = append(args, expression) - } - if err := p.match(tRparen); err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTFunctionExpression, - value: name, - children: args, - }, nil - case tFilter: - return p.parseFilter(node) - case tFlatten: - left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} - right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{left, right}, - }, err - case tEQ, tNE, tGT, tGTE, tLT, tLTE: - right, err := p.parseExpression(bindingPowers[tokenType]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTComparator, - value: tokenType, - children: []ASTNode{node, right}, - }, nil - case tLbracket: - tokenType := p.current() - var right ASTNode - var err error - if tokenType == tNumber || tokenType == tColon { - right, err = p.parseIndexExpression() - if err != nil { - return ASTNode{}, err - } - return p.projectIfSlice(node, right) - } - // Otherwise this is a projection. - if err := p.match(tStar); err != nil { - return ASTNode{}, err - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - right, err = p.parseProjectionRHS(bindingPowers[tStar]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{node, right}, - }, nil - } - return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) -} - -func (p *Parser) nud(token token) (ASTNode, error) { - switch token.tokenType { - case tJSONLiteral: - var parsed interface{} - err := json.Unmarshal([]byte(token.value), &parsed) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTLiteral, value: parsed}, nil - case tStringLiteral: - return ASTNode{nodeType: ASTLiteral, value: token.value}, nil - case tUnquotedIdentifier: - return ASTNode{ - nodeType: ASTField, - value: token.value, - }, nil - case tQuotedIdentifier: - node := ASTNode{nodeType: ASTField, value: token.value} - if p.current() == tLparen { - return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) - } - return node, nil - case tStar: - left := ASTNode{nodeType: ASTIdentity} - var right ASTNode - var err error - if p.current() == tRbracket { - right = ASTNode{nodeType: ASTIdentity} - } else { - right, err = p.parseProjectionRHS(bindingPowers[tStar]) - } - return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err - case tFilter: - return p.parseFilter(ASTNode{nodeType: ASTIdentity}) - case tLbrace: - return p.parseMultiSelectHash() - case tFlatten: - left := ASTNode{ - nodeType: ASTFlatten, - children: []ASTNode{{nodeType: ASTIdentity}}, - } - right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil - case tLbracket: - tokenType := p.current() - //var right ASTNode - if tokenType == tNumber || tokenType == tColon { - right, err := p.parseIndexExpression() - if err != nil { - return ASTNode{}, nil - } - return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) - } else if tokenType == tStar && p.lookahead(1) == tRbracket { - p.advance() - p.advance() - right, err := p.parseProjectionRHS(bindingPowers[tStar]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{{nodeType: ASTIdentity}, right}, - }, nil - } else { - return p.parseMultiSelectList() - } - case tCurrent: - return ASTNode{nodeType: ASTCurrentNode}, nil - case tExpref: - expression, err := p.parseExpression(bindingPowers[tExpref]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil - case tNot: - expression, err := p.parseExpression(bindingPowers[tNot]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil - case tLparen: - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if err := p.match(tRparen); err != nil { - return ASTNode{}, err - } - return expression, nil - case tEOF: - return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) - } - - return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) -} - -func (p *Parser) parseMultiSelectList() (ASTNode, error) { - var expressions []ASTNode - for { - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - expressions = append(expressions, expression) - if p.current() == tRbracket { - break - } - err = p.match(tComma) - if err != nil { - return ASTNode{}, err - } - } - err := p.match(tRbracket) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTMultiSelectList, - children: expressions, - }, nil -} - -func (p *Parser) parseMultiSelectHash() (ASTNode, error) { - var children []ASTNode - for { - keyToken := p.lookaheadToken(0) - if err := p.match(tUnquotedIdentifier); err != nil { - if err := p.match(tQuotedIdentifier); err != nil { - return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") - } - } - keyName := keyToken.value - err := p.match(tColon) - if err != nil { - return ASTNode{}, err - } - value, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - node := ASTNode{ - nodeType: ASTKeyValPair, - value: keyName, - children: []ASTNode{value}, - } - children = append(children, node) - if p.current() == tComma { - err := p.match(tComma) - if err != nil { - return ASTNode{}, nil - } - } else if p.current() == tRbrace { - err := p.match(tRbrace) - if err != nil { - return ASTNode{}, nil - } - break - } - } - return ASTNode{ - nodeType: ASTMultiSelectHash, - children: children, - }, nil -} - -func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { - indexExpr := ASTNode{ - nodeType: ASTIndexExpression, - children: []ASTNode{left, right}, - } - if right.nodeType == ASTSlice { - right, err := p.parseProjectionRHS(bindingPowers[tStar]) - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{indexExpr, right}, - }, err - } - return indexExpr, nil -} -func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { - var right, condition ASTNode - var err error - condition, err = p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - if p.current() == tFlatten { - right = ASTNode{nodeType: ASTIdentity} - } else { - right, err = p.parseProjectionRHS(bindingPowers[tFilter]) - if err != nil { - return ASTNode{}, err - } - } - - return ASTNode{ - nodeType: ASTFilterProjection, - children: []ASTNode{node, right, condition}, - }, nil -} - -func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { - lookahead := p.current() - if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { - return p.parseExpression(bindingPower) - } else if lookahead == tLbracket { - if err := p.match(tLbracket); err != nil { - return ASTNode{}, err - } - return p.parseMultiSelectList() - } else if lookahead == tLbrace { - if err := p.match(tLbrace); err != nil { - return ASTNode{}, err - } - return p.parseMultiSelectHash() - } - return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") -} - -func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { - current := p.current() - if bindingPowers[current] < 10 { - return ASTNode{nodeType: ASTIdentity}, nil - } else if current == tLbracket { - return p.parseExpression(bindingPower) - } else if current == tFilter { - return p.parseExpression(bindingPower) - } else if current == tDot { - err := p.match(tDot) - if err != nil { - return ASTNode{}, err - } - return p.parseDotRHS(bindingPower) - } else { - return ASTNode{}, p.syntaxError("Error") - } -} - -func (p *Parser) lookahead(number int) tokType { - return p.lookaheadToken(number).tokenType -} - -func (p *Parser) current() tokType { - return p.lookahead(0) -} - -func (p *Parser) lookaheadToken(number int) token { - return p.tokens[p.index+number] -} - -func (p *Parser) advance() { - p.index++ -} - -func tokensOneOf(elements []tokType, token tokType) bool { - for _, elem := range elements { - if elem == token { - return true - } - } - return false -} - -func (p *Parser) syntaxError(msg string) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: p.expression, - Offset: p.lookaheadToken(0).position, - } -} - -// Create a SyntaxError based on the provided token. -// This differs from syntaxError() which creates a SyntaxError -// based on the current lookahead token. -func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: p.expression, - Offset: t.position, - } -} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go deleted file mode 100644 index dae79cb..0000000 --- a/vendor/github.com/jmespath/go-jmespath/toktype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type=tokType; DO NOT EDIT - -package jmespath - -import "fmt" - -const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" - -var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} - -func (i tokType) String() string { - if i < 0 || i >= tokType(len(_tokType_index)-1) { - return fmt.Sprintf("tokType(%d)", i) - } - return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] -} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go deleted file mode 100644 index ddc1b7d..0000000 --- a/vendor/github.com/jmespath/go-jmespath/util.go +++ /dev/null @@ -1,185 +0,0 @@ -package jmespath - -import ( - "errors" - "reflect" -) - -// IsFalse determines if an object is false based on the JMESPath spec. -// JMESPath defines false values to be any of: -// - An empty string array, or hash. -// - The boolean value false. -// - nil -func isFalse(value interface{}) bool { - switch v := value.(type) { - case bool: - return !v - case []interface{}: - return len(v) == 0 - case map[string]interface{}: - return len(v) == 0 - case string: - return len(v) == 0 - case nil: - return true - } - // Try the reflection cases before returning false. - rv := reflect.ValueOf(value) - switch rv.Kind() { - case reflect.Struct: - // A struct type will never be false, even if - // all of its values are the zero type. - return false - case reflect.Slice, reflect.Map: - return rv.Len() == 0 - case reflect.Ptr: - if rv.IsNil() { - return true - } - // If it's a pointer type, we'll try to deref the pointer - // and evaluate the pointer value for isFalse. - element := rv.Elem() - return isFalse(element.Interface()) - } - return false -} - -// ObjsEqual is a generic object equality check. -// It will take two arbitrary objects and recursively determine -// if they are equal. -func objsEqual(left interface{}, right interface{}) bool { - return reflect.DeepEqual(left, right) -} - -// SliceParam refers to a single part of a slice. -// A slice consists of a start, a stop, and a step, similar to -// python slices. -type sliceParam struct { - N int - Specified bool -} - -// Slice supports [start:stop:step] style slicing that's supported in JMESPath. -func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { - computed, err := computeSliceParams(len(slice), parts) - if err != nil { - return nil, err - } - start, stop, step := computed[0], computed[1], computed[2] - result := []interface{}{} - if step > 0 { - for i := start; i < stop; i += step { - result = append(result, slice[i]) - } - } else { - for i := start; i > stop; i += step { - result = append(result, slice[i]) - } - } - return result, nil -} - -func computeSliceParams(length int, parts []sliceParam) ([]int, error) { - var start, stop, step int - if !parts[2].Specified { - step = 1 - } else if parts[2].N == 0 { - return nil, errors.New("Invalid slice, step cannot be 0") - } else { - step = parts[2].N - } - var stepValueNegative bool - if step < 0 { - stepValueNegative = true - } else { - stepValueNegative = false - } - - if !parts[0].Specified { - if stepValueNegative { - start = length - 1 - } else { - start = 0 - } - } else { - start = capSlice(length, parts[0].N, step) - } - - if !parts[1].Specified { - if stepValueNegative { - stop = -1 - } else { - stop = length - } - } else { - stop = capSlice(length, parts[1].N, step) - } - return []int{start, stop, step}, nil -} - -func capSlice(length int, actual int, step int) int { - if actual < 0 { - actual += length - if actual < 0 { - if step < 0 { - actual = -1 - } else { - actual = 0 - } - } - } else if actual >= length { - if step < 0 { - actual = length - 1 - } else { - actual = length - } - } - return actual -} - -// ToArrayNum converts an empty interface type to a slice of float64. -// If any element in the array cannot be converted, then nil is returned -// along with a second value of false. -func toArrayNum(data interface{}) ([]float64, bool) { - // Is there a better way to do this with reflect? - if d, ok := data.([]interface{}); ok { - result := make([]float64, len(d)) - for i, el := range d { - item, ok := el.(float64) - if !ok { - return nil, false - } - result[i] = item - } - return result, true - } - return nil, false -} - -// ToArrayStr converts an empty interface type to a slice of strings. -// If any element in the array cannot be converted, then nil is returned -// along with a second value of false. If the input data could be entirely -// converted, then the converted data, along with a second value of true, -// will be returned. -func toArrayStr(data interface{}) ([]string, bool) { - // Is there a better way to do this with reflect? - if d, ok := data.([]interface{}); ok { - result := make([]string, len(d)) - for i, el := range d { - item, ok := el.(string) - if !ok { - return nil, false - } - result[i] = item - } - return result, true - } - return nil, false -} - -func isSliceType(v interface{}) bool { - if v == nil { - return false - } - return reflect.TypeOf(v).Kind() == reflect.Slice -} diff --git a/vendor/github.com/kardianos/osext/LICENSE b/vendor/github.com/kardianos/osext/LICENSE deleted file mode 100644 index 7448756..0000000 --- a/vendor/github.com/kardianos/osext/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/kardianos/osext/README.md b/vendor/github.com/kardianos/osext/README.md deleted file mode 100644 index 15cbc3d..0000000 --- a/vendor/github.com/kardianos/osext/README.md +++ /dev/null @@ -1,21 +0,0 @@ -### Extensions to the "os" package. - -[![GoDoc](https://godoc.org/github.com/kardianos/osext?status.svg)](https://godoc.org/github.com/kardianos/osext) - -## Find the current Executable and ExecutableFolder. - -As of go1.8 the Executable function may be found in `os`. The Executable function -in the std lib `os` package is used if available. - -There is sometimes utility in finding the current executable file -that is running. This can be used for upgrading the current executable -or finding resources located relative to the executable file. Both -working directory and the os.Args[0] value are arbitrary and cannot -be relied on; os.Args[0] can be "faked". - -Multi-platform and supports: - * Linux - * OS X - * Windows - * Plan 9 - * BSDs. diff --git a/vendor/github.com/kardianos/osext/osext.go b/vendor/github.com/kardianos/osext/osext.go deleted file mode 100644 index 17f380f..0000000 --- a/vendor/github.com/kardianos/osext/osext.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Extensions to the standard "os" package. -package osext // import "github.com/kardianos/osext" - -import "path/filepath" - -var cx, ce = executableClean() - -func executableClean() (string, error) { - p, err := executable() - return filepath.Clean(p), err -} - -// Executable returns an absolute path that can be used to -// re-invoke the current program. -// It may not be valid after the current program exits. -func Executable() (string, error) { - return cx, ce -} - -// Returns same path as Executable, returns just the folder -// path. Excludes the executable name and any trailing slash. -func ExecutableFolder() (string, error) { - p, err := Executable() - if err != nil { - return "", err - } - - return filepath.Dir(p), nil -} diff --git a/vendor/github.com/kardianos/osext/osext_go18.go b/vendor/github.com/kardianos/osext/osext_go18.go deleted file mode 100644 index 009d8a9..0000000 --- a/vendor/github.com/kardianos/osext/osext_go18.go +++ /dev/null @@ -1,9 +0,0 @@ -//+build go1.8,!openbsd - -package osext - -import "os" - -func executable() (string, error) { - return os.Executable() -} diff --git a/vendor/github.com/kardianos/osext/osext_plan9.go b/vendor/github.com/kardianos/osext/osext_plan9.go deleted file mode 100644 index 95e2371..0000000 --- a/vendor/github.com/kardianos/osext/osext_plan9.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build !go1.8 - -package osext - -import ( - "os" - "strconv" - "syscall" -) - -func executable() (string, error) { - f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text") - if err != nil { - return "", err - } - defer f.Close() - return syscall.Fd2path(int(f.Fd())) -} diff --git a/vendor/github.com/kardianos/osext/osext_procfs.go b/vendor/github.com/kardianos/osext/osext_procfs.go deleted file mode 100644 index e1f16f8..0000000 --- a/vendor/github.com/kardianos/osext/osext_procfs.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.8,android !go1.8,linux !go1.8,netbsd !go1.8,solaris !go1.8,dragonfly - -package osext - -import ( - "errors" - "fmt" - "os" - "runtime" - "strings" -) - -func executable() (string, error) { - switch runtime.GOOS { - case "linux", "android": - const deletedTag = " (deleted)" - execpath, err := os.Readlink("/proc/self/exe") - if err != nil { - return execpath, err - } - execpath = strings.TrimSuffix(execpath, deletedTag) - execpath = strings.TrimPrefix(execpath, deletedTag) - return execpath, nil - case "netbsd": - return os.Readlink("/proc/curproc/exe") - case "dragonfly": - return os.Readlink("/proc/curproc/file") - case "solaris": - return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid())) - } - return "", errors.New("ExecPath not implemented for " + runtime.GOOS) -} diff --git a/vendor/github.com/kardianos/osext/osext_sysctl.go b/vendor/github.com/kardianos/osext/osext_sysctl.go deleted file mode 100644 index 33cee25..0000000 --- a/vendor/github.com/kardianos/osext/osext_sysctl.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.8,darwin !go1.8,freebsd openbsd - -package osext - -import ( - "os" - "os/exec" - "path/filepath" - "runtime" - "syscall" - "unsafe" -) - -var initCwd, initCwdErr = os.Getwd() - -func executable() (string, error) { - var mib [4]int32 - switch runtime.GOOS { - case "freebsd": - mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1} - case "darwin": - mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1} - case "openbsd": - mib = [4]int32{1 /* CTL_KERN */, 55 /* KERN_PROC_ARGS */, int32(os.Getpid()), 1 /* KERN_PROC_ARGV */} - } - - n := uintptr(0) - // Get length. - _, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0) - if errNum != 0 { - return "", errNum - } - if n == 0 { // This shouldn't happen. - return "", nil - } - buf := make([]byte, n) - _, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0) - if errNum != 0 { - return "", errNum - } - if n == 0 { // This shouldn't happen. - return "", nil - } - - var execPath string - switch runtime.GOOS { - case "openbsd": - // buf now contains **argv, with pointers to each of the C-style - // NULL terminated arguments. - var args []string - argv := uintptr(unsafe.Pointer(&buf[0])) - Loop: - for { - argp := *(**[1 << 20]byte)(unsafe.Pointer(argv)) - if argp == nil { - break - } - for i := 0; uintptr(i) < n; i++ { - // we don't want the full arguments list - if string(argp[i]) == " " { - break Loop - } - if argp[i] != 0 { - continue - } - args = append(args, string(argp[:i])) - n -= uintptr(i) - break - } - if n < unsafe.Sizeof(argv) { - break - } - argv += unsafe.Sizeof(argv) - n -= unsafe.Sizeof(argv) - } - execPath = args[0] - // There is no canonical way to get an executable path on - // OpenBSD, so check PATH in case we are called directly - if execPath[0] != '/' && execPath[0] != '.' { - execIsInPath, err := exec.LookPath(execPath) - if err == nil { - execPath = execIsInPath - } - } - default: - for i, v := range buf { - if v == 0 { - buf = buf[:i] - break - } - } - execPath = string(buf) - } - - var err error - // execPath will not be empty due to above checks. - // Try to get the absolute path if the execPath is not rooted. - if execPath[0] != '/' { - execPath, err = getAbs(execPath) - if err != nil { - return execPath, err - } - } - // For darwin KERN_PROCARGS may return the path to a symlink rather than the - // actual executable. - if runtime.GOOS == "darwin" { - if execPath, err = filepath.EvalSymlinks(execPath); err != nil { - return execPath, err - } - } - return execPath, nil -} - -func getAbs(execPath string) (string, error) { - if initCwdErr != nil { - return execPath, initCwdErr - } - // The execPath may begin with a "../" or a "./" so clean it first. - // Join the two paths, trailing and starting slashes undetermined, so use - // the generic Join function. - return filepath.Join(initCwd, filepath.Clean(execPath)), nil -} diff --git a/vendor/github.com/kardianos/osext/osext_windows.go b/vendor/github.com/kardianos/osext/osext_windows.go deleted file mode 100644 index 074b3b3..0000000 --- a/vendor/github.com/kardianos/osext/osext_windows.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build !go1.8 - -package osext - -import ( - "syscall" - "unicode/utf16" - "unsafe" -) - -var ( - kernel = syscall.MustLoadDLL("kernel32.dll") - getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW") -) - -// GetModuleFileName() with hModule = NULL -func executable() (exePath string, err error) { - return getModuleFileName() -} - -func getModuleFileName() (string, error) { - var n uint32 - b := make([]uint16, syscall.MAX_PATH) - size := uint32(len(b)) - - r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size)) - n = uint32(r0) - if n == 0 { - return "", e1 - } - return string(utf16.Decode(b[0:n])), nil -} diff --git a/vendor/github.com/mattn/go-runewidth/.travis.yml b/vendor/github.com/mattn/go-runewidth/.travis.yml deleted file mode 100644 index 5c9c2a3..0000000 --- a/vendor/github.com/mattn/go-runewidth/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - tip -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -repotoken lAKAWPzcGsD3A8yBX3BGGtRUdJ6CaGERL diff --git a/vendor/github.com/mattn/go-runewidth/LICENSE b/vendor/github.com/mattn/go-runewidth/LICENSE deleted file mode 100644 index 91b5cef..0000000 --- a/vendor/github.com/mattn/go-runewidth/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Yasuhiro Matsumoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/mattn/go-runewidth/README.mkd b/vendor/github.com/mattn/go-runewidth/README.mkd deleted file mode 100644 index 66663a9..0000000 --- a/vendor/github.com/mattn/go-runewidth/README.mkd +++ /dev/null @@ -1,27 +0,0 @@ -go-runewidth -============ - -[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth) -[![Coverage Status](https://coveralls.io/repos/mattn/go-runewidth/badge.png?branch=HEAD)](https://coveralls.io/r/mattn/go-runewidth?branch=HEAD) -[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth) -[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth) - -Provides functions to get fixed width of the character or string. - -Usage ------ - -```go -runewidth.StringWidth("つのだ☆HIRO") == 12 -``` - - -Author ------- - -Yasuhiro Matsumoto - -License -------- - -under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go deleted file mode 100644 index 82568a1..0000000 --- a/vendor/github.com/mattn/go-runewidth/runewidth.go +++ /dev/null @@ -1,1235 +0,0 @@ -package runewidth - -import "os" - -var ( - // EastAsianWidth will be set true if the current locale is CJK - EastAsianWidth bool - - // DefaultCondition is a condition in current locale - DefaultCondition = &Condition{EastAsianWidth} -) - -func init() { - env := os.Getenv("RUNEWIDTH_EASTASIAN") - if env == "" { - EastAsianWidth = IsEastAsian() - } else { - EastAsianWidth = env == "1" - } -} - -type interval struct { - first rune - last rune -} - -type table []interval - -func inTables(r rune, ts ...table) bool { - for _, t := range ts { - if inTable(r, t) { - return true - } - } - return false -} - -func inTable(r rune, t table) bool { - // func (t table) IncludesRune(r rune) bool { - if r < t[0].first { - return false - } - - bot := 0 - top := len(t) - 1 - for top >= bot { - mid := (bot + top) / 2 - - switch { - case t[mid].last < r: - bot = mid + 1 - case t[mid].first > r: - top = mid - 1 - default: - return true - } - } - - return false -} - -var private = table{ - {0x00E000, 0x00F8FF}, {0x0F0000, 0x0FFFFD}, {0x100000, 0x10FFFD}, -} - -var nonprint = table{ - {0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD}, - {0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F}, - {0x2028, 0x2029}, - {0x202A, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF}, - {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF}, -} - -var combining = table{ - {0x0300, 0x036F}, {0x0483, 0x0489}, {0x0591, 0x05BD}, - {0x05BF, 0x05BF}, {0x05C1, 0x05C2}, {0x05C4, 0x05C5}, - {0x05C7, 0x05C7}, {0x0610, 0x061A}, {0x064B, 0x065F}, - {0x0670, 0x0670}, {0x06D6, 0x06DC}, {0x06DF, 0x06E4}, - {0x06E7, 0x06E8}, {0x06EA, 0x06ED}, {0x0711, 0x0711}, - {0x0730, 0x074A}, {0x07A6, 0x07B0}, {0x07EB, 0x07F3}, - {0x0816, 0x0819}, {0x081B, 0x0823}, {0x0825, 0x0827}, - {0x0829, 0x082D}, {0x0859, 0x085B}, {0x08D4, 0x08E1}, - {0x08E3, 0x0903}, {0x093A, 0x093C}, {0x093E, 0x094F}, - {0x0951, 0x0957}, {0x0962, 0x0963}, {0x0981, 0x0983}, - {0x09BC, 0x09BC}, {0x09BE, 0x09C4}, {0x09C7, 0x09C8}, - {0x09CB, 0x09CD}, {0x09D7, 0x09D7}, {0x09E2, 0x09E3}, - {0x0A01, 0x0A03}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, - {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, - {0x0A70, 0x0A71}, {0x0A75, 0x0A75}, {0x0A81, 0x0A83}, - {0x0ABC, 0x0ABC}, {0x0ABE, 0x0AC5}, {0x0AC7, 0x0AC9}, - {0x0ACB, 0x0ACD}, {0x0AE2, 0x0AE3}, {0x0B01, 0x0B03}, - {0x0B3C, 0x0B3C}, {0x0B3E, 0x0B44}, {0x0B47, 0x0B48}, - {0x0B4B, 0x0B4D}, {0x0B56, 0x0B57}, {0x0B62, 0x0B63}, - {0x0B82, 0x0B82}, {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8}, - {0x0BCA, 0x0BCD}, {0x0BD7, 0x0BD7}, {0x0C00, 0x0C03}, - {0x0C3E, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, - {0x0C55, 0x0C56}, {0x0C62, 0x0C63}, {0x0C81, 0x0C83}, - {0x0CBC, 0x0CBC}, {0x0CBE, 0x0CC4}, {0x0CC6, 0x0CC8}, - {0x0CCA, 0x0CCD}, {0x0CD5, 0x0CD6}, {0x0CE2, 0x0CE3}, - {0x0D01, 0x0D03}, {0x0D3E, 0x0D44}, {0x0D46, 0x0D48}, - {0x0D4A, 0x0D4D}, {0x0D57, 0x0D57}, {0x0D62, 0x0D63}, - {0x0D82, 0x0D83}, {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD4}, - {0x0DD6, 0x0DD6}, {0x0DD8, 0x0DDF}, {0x0DF2, 0x0DF3}, - {0x0E31, 0x0E31}, {0x0E34, 0x0E3A}, {0x0E47, 0x0E4E}, - {0x0EB1, 0x0EB1}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC}, - {0x0EC8, 0x0ECD}, {0x0F18, 0x0F19}, {0x0F35, 0x0F35}, - {0x0F37, 0x0F37}, {0x0F39, 0x0F39}, {0x0F3E, 0x0F3F}, - {0x0F71, 0x0F84}, {0x0F86, 0x0F87}, {0x0F8D, 0x0F97}, - {0x0F99, 0x0FBC}, {0x0FC6, 0x0FC6}, {0x102B, 0x103E}, - {0x1056, 0x1059}, {0x105E, 0x1060}, {0x1062, 0x1064}, - {0x1067, 0x106D}, {0x1071, 0x1074}, {0x1082, 0x108D}, - {0x108F, 0x108F}, {0x109A, 0x109D}, {0x135D, 0x135F}, - {0x1712, 0x1714}, {0x1732, 0x1734}, {0x1752, 0x1753}, - {0x1772, 0x1773}, {0x17B4, 0x17D3}, {0x17DD, 0x17DD}, - {0x180B, 0x180D}, {0x1885, 0x1886}, {0x18A9, 0x18A9}, - {0x1920, 0x192B}, {0x1930, 0x193B}, {0x1A17, 0x1A1B}, - {0x1A55, 0x1A5E}, {0x1A60, 0x1A7C}, {0x1A7F, 0x1A7F}, - {0x1AB0, 0x1ABE}, {0x1B00, 0x1B04}, {0x1B34, 0x1B44}, - {0x1B6B, 0x1B73}, {0x1B80, 0x1B82}, {0x1BA1, 0x1BAD}, - {0x1BE6, 0x1BF3}, {0x1C24, 0x1C37}, {0x1CD0, 0x1CD2}, - {0x1CD4, 0x1CE8}, {0x1CED, 0x1CED}, {0x1CF2, 0x1CF4}, - {0x1CF8, 0x1CF9}, {0x1DC0, 0x1DF5}, {0x1DFB, 0x1DFF}, - {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2D7F, 0x2D7F}, - {0x2DE0, 0x2DFF}, {0x302A, 0x302F}, {0x3099, 0x309A}, - {0xA66F, 0xA672}, {0xA674, 0xA67D}, {0xA69E, 0xA69F}, - {0xA6F0, 0xA6F1}, {0xA802, 0xA802}, {0xA806, 0xA806}, - {0xA80B, 0xA80B}, {0xA823, 0xA827}, {0xA880, 0xA881}, - {0xA8B4, 0xA8C5}, {0xA8E0, 0xA8F1}, {0xA926, 0xA92D}, - {0xA947, 0xA953}, {0xA980, 0xA983}, {0xA9B3, 0xA9C0}, - {0xA9E5, 0xA9E5}, {0xAA29, 0xAA36}, {0xAA43, 0xAA43}, - {0xAA4C, 0xAA4D}, {0xAA7B, 0xAA7D}, {0xAAB0, 0xAAB0}, - {0xAAB2, 0xAAB4}, {0xAAB7, 0xAAB8}, {0xAABE, 0xAABF}, - {0xAAC1, 0xAAC1}, {0xAAEB, 0xAAEF}, {0xAAF5, 0xAAF6}, - {0xABE3, 0xABEA}, {0xABEC, 0xABED}, {0xFB1E, 0xFB1E}, - {0xFE00, 0xFE0F}, {0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, - {0x102E0, 0x102E0}, {0x10376, 0x1037A}, {0x10A01, 0x10A03}, - {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, {0x10A38, 0x10A3A}, - {0x10A3F, 0x10A3F}, {0x10AE5, 0x10AE6}, {0x11000, 0x11002}, - {0x11038, 0x11046}, {0x1107F, 0x11082}, {0x110B0, 0x110BA}, - {0x11100, 0x11102}, {0x11127, 0x11134}, {0x11173, 0x11173}, - {0x11180, 0x11182}, {0x111B3, 0x111C0}, {0x111CA, 0x111CC}, - {0x1122C, 0x11237}, {0x1123E, 0x1123E}, {0x112DF, 0x112EA}, - {0x11300, 0x11303}, {0x1133C, 0x1133C}, {0x1133E, 0x11344}, - {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11357, 0x11357}, - {0x11362, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374}, - {0x11435, 0x11446}, {0x114B0, 0x114C3}, {0x115AF, 0x115B5}, - {0x115B8, 0x115C0}, {0x115DC, 0x115DD}, {0x11630, 0x11640}, - {0x116AB, 0x116B7}, {0x1171D, 0x1172B}, {0x11C2F, 0x11C36}, - {0x11C38, 0x11C3F}, {0x11C92, 0x11CA7}, {0x11CA9, 0x11CB6}, - {0x16AF0, 0x16AF4}, {0x16B30, 0x16B36}, {0x16F51, 0x16F7E}, - {0x16F8F, 0x16F92}, {0x1BC9D, 0x1BC9E}, {0x1D165, 0x1D169}, - {0x1D16D, 0x1D172}, {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, - {0x1D1AA, 0x1D1AD}, {0x1D242, 0x1D244}, {0x1DA00, 0x1DA36}, - {0x1DA3B, 0x1DA6C}, {0x1DA75, 0x1DA75}, {0x1DA84, 0x1DA84}, - {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006}, - {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, - {0x1E026, 0x1E02A}, {0x1E8D0, 0x1E8D6}, {0x1E944, 0x1E94A}, - {0xE0100, 0xE01EF}, -} - -var doublewidth = table{ - {0x1100, 0x115F}, {0x231A, 0x231B}, {0x2329, 0x232A}, - {0x23E9, 0x23EC}, {0x23F0, 0x23F0}, {0x23F3, 0x23F3}, - {0x25FD, 0x25FE}, {0x2614, 0x2615}, {0x2648, 0x2653}, - {0x267F, 0x267F}, {0x2693, 0x2693}, {0x26A1, 0x26A1}, - {0x26AA, 0x26AB}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5}, - {0x26CE, 0x26CE}, {0x26D4, 0x26D4}, {0x26EA, 0x26EA}, - {0x26F2, 0x26F3}, {0x26F5, 0x26F5}, {0x26FA, 0x26FA}, - {0x26FD, 0x26FD}, {0x2705, 0x2705}, {0x270A, 0x270B}, - {0x2728, 0x2728}, {0x274C, 0x274C}, {0x274E, 0x274E}, - {0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797}, - {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C}, - {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99}, - {0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB}, - {0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF}, - {0x3105, 0x312D}, {0x3131, 0x318E}, {0x3190, 0x31BA}, - {0x31C0, 0x31E3}, {0x31F0, 0x321E}, {0x3220, 0x3247}, - {0x3250, 0x32FE}, {0x3300, 0x4DBF}, {0x4E00, 0xA48C}, - {0xA490, 0xA4C6}, {0xA960, 0xA97C}, {0xAC00, 0xD7A3}, - {0xF900, 0xFAFF}, {0xFE10, 0xFE19}, {0xFE30, 0xFE52}, - {0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, {0xFF01, 0xFF60}, - {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE0}, {0x17000, 0x187EC}, - {0x18800, 0x18AF2}, {0x1B000, 0x1B001}, {0x1F004, 0x1F004}, - {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A}, - {0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248}, - {0x1F250, 0x1F251}, {0x1F300, 0x1F320}, {0x1F32D, 0x1F335}, - {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393}, {0x1F3A0, 0x1F3CA}, - {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0}, {0x1F3F4, 0x1F3F4}, - {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440}, {0x1F442, 0x1F4FC}, - {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E}, {0x1F550, 0x1F567}, - {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596}, {0x1F5A4, 0x1F5A4}, - {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5}, {0x1F6CC, 0x1F6CC}, - {0x1F6D0, 0x1F6D2}, {0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6F6}, - {0x1F910, 0x1F91E}, {0x1F920, 0x1F927}, {0x1F930, 0x1F930}, - {0x1F933, 0x1F93E}, {0x1F940, 0x1F94B}, {0x1F950, 0x1F95E}, - {0x1F980, 0x1F991}, {0x1F9C0, 0x1F9C0}, {0x20000, 0x2FFFD}, - {0x30000, 0x3FFFD}, -} - -var ambiguous = table{ - {0x00A1, 0x00A1}, {0x00A4, 0x00A4}, {0x00A7, 0x00A8}, - {0x00AA, 0x00AA}, {0x00AD, 0x00AE}, {0x00B0, 0x00B4}, - {0x00B6, 0x00BA}, {0x00BC, 0x00BF}, {0x00C6, 0x00C6}, - {0x00D0, 0x00D0}, {0x00D7, 0x00D8}, {0x00DE, 0x00E1}, - {0x00E6, 0x00E6}, {0x00E8, 0x00EA}, {0x00EC, 0x00ED}, - {0x00F0, 0x00F0}, {0x00F2, 0x00F3}, {0x00F7, 0x00FA}, - {0x00FC, 0x00FC}, {0x00FE, 0x00FE}, {0x0101, 0x0101}, - {0x0111, 0x0111}, {0x0113, 0x0113}, {0x011B, 0x011B}, - {0x0126, 0x0127}, {0x012B, 0x012B}, {0x0131, 0x0133}, - {0x0138, 0x0138}, {0x013F, 0x0142}, {0x0144, 0x0144}, - {0x0148, 0x014B}, {0x014D, 0x014D}, {0x0152, 0x0153}, - {0x0166, 0x0167}, {0x016B, 0x016B}, {0x01CE, 0x01CE}, - {0x01D0, 0x01D0}, {0x01D2, 0x01D2}, {0x01D4, 0x01D4}, - {0x01D6, 0x01D6}, {0x01D8, 0x01D8}, {0x01DA, 0x01DA}, - {0x01DC, 0x01DC}, {0x0251, 0x0251}, {0x0261, 0x0261}, - {0x02C4, 0x02C4}, {0x02C7, 0x02C7}, {0x02C9, 0x02CB}, - {0x02CD, 0x02CD}, {0x02D0, 0x02D0}, {0x02D8, 0x02DB}, - {0x02DD, 0x02DD}, {0x02DF, 0x02DF}, {0x0300, 0x036F}, - {0x0391, 0x03A1}, {0x03A3, 0x03A9}, {0x03B1, 0x03C1}, - {0x03C3, 0x03C9}, {0x0401, 0x0401}, {0x0410, 0x044F}, - {0x0451, 0x0451}, {0x2010, 0x2010}, {0x2013, 0x2016}, - {0x2018, 0x2019}, {0x201C, 0x201D}, {0x2020, 0x2022}, - {0x2024, 0x2027}, {0x2030, 0x2030}, {0x2032, 0x2033}, - {0x2035, 0x2035}, {0x203B, 0x203B}, {0x203E, 0x203E}, - {0x2074, 0x2074}, {0x207F, 0x207F}, {0x2081, 0x2084}, - {0x20AC, 0x20AC}, {0x2103, 0x2103}, {0x2105, 0x2105}, - {0x2109, 0x2109}, {0x2113, 0x2113}, {0x2116, 0x2116}, - {0x2121, 0x2122}, {0x2126, 0x2126}, {0x212B, 0x212B}, - {0x2153, 0x2154}, {0x215B, 0x215E}, {0x2160, 0x216B}, - {0x2170, 0x2179}, {0x2189, 0x2189}, {0x2190, 0x2199}, - {0x21B8, 0x21B9}, {0x21D2, 0x21D2}, {0x21D4, 0x21D4}, - {0x21E7, 0x21E7}, {0x2200, 0x2200}, {0x2202, 0x2203}, - {0x2207, 0x2208}, {0x220B, 0x220B}, {0x220F, 0x220F}, - {0x2211, 0x2211}, {0x2215, 0x2215}, {0x221A, 0x221A}, - {0x221D, 0x2220}, {0x2223, 0x2223}, {0x2225, 0x2225}, - {0x2227, 0x222C}, {0x222E, 0x222E}, {0x2234, 0x2237}, - {0x223C, 0x223D}, {0x2248, 0x2248}, {0x224C, 0x224C}, - {0x2252, 0x2252}, {0x2260, 0x2261}, {0x2264, 0x2267}, - {0x226A, 0x226B}, {0x226E, 0x226F}, {0x2282, 0x2283}, - {0x2286, 0x2287}, {0x2295, 0x2295}, {0x2299, 0x2299}, - {0x22A5, 0x22A5}, {0x22BF, 0x22BF}, {0x2312, 0x2312}, - {0x2460, 0x24E9}, {0x24EB, 0x254B}, {0x2550, 0x2573}, - {0x2580, 0x258F}, {0x2592, 0x2595}, {0x25A0, 0x25A1}, - {0x25A3, 0x25A9}, {0x25B2, 0x25B3}, {0x25B6, 0x25B7}, - {0x25BC, 0x25BD}, {0x25C0, 0x25C1}, {0x25C6, 0x25C8}, - {0x25CB, 0x25CB}, {0x25CE, 0x25D1}, {0x25E2, 0x25E5}, - {0x25EF, 0x25EF}, {0x2605, 0x2606}, {0x2609, 0x2609}, - {0x260E, 0x260F}, {0x261C, 0x261C}, {0x261E, 0x261E}, - {0x2640, 0x2640}, {0x2642, 0x2642}, {0x2660, 0x2661}, - {0x2663, 0x2665}, {0x2667, 0x266A}, {0x266C, 0x266D}, - {0x266F, 0x266F}, {0x269E, 0x269F}, {0x26BF, 0x26BF}, - {0x26C6, 0x26CD}, {0x26CF, 0x26D3}, {0x26D5, 0x26E1}, - {0x26E3, 0x26E3}, {0x26E8, 0x26E9}, {0x26EB, 0x26F1}, - {0x26F4, 0x26F4}, {0x26F6, 0x26F9}, {0x26FB, 0x26FC}, - {0x26FE, 0x26FF}, {0x273D, 0x273D}, {0x2776, 0x277F}, - {0x2B56, 0x2B59}, {0x3248, 0x324F}, {0xE000, 0xF8FF}, - {0xFE00, 0xFE0F}, {0xFFFD, 0xFFFD}, {0x1F100, 0x1F10A}, - {0x1F110, 0x1F12D}, {0x1F130, 0x1F169}, {0x1F170, 0x1F18D}, - {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF}, - {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD}, -} - -var emoji = table{ - {0x1F1E6, 0x1F1FF}, {0x1F321, 0x1F321}, {0x1F324, 0x1F32C}, - {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D}, {0x1F396, 0x1F397}, - {0x1F399, 0x1F39B}, {0x1F39E, 0x1F39F}, {0x1F3CB, 0x1F3CE}, - {0x1F3D4, 0x1F3DF}, {0x1F3F3, 0x1F3F5}, {0x1F3F7, 0x1F3F7}, - {0x1F43F, 0x1F43F}, {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FD}, - {0x1F549, 0x1F54A}, {0x1F56F, 0x1F570}, {0x1F573, 0x1F579}, - {0x1F587, 0x1F587}, {0x1F58A, 0x1F58D}, {0x1F590, 0x1F590}, - {0x1F5A5, 0x1F5A5}, {0x1F5A8, 0x1F5A8}, {0x1F5B1, 0x1F5B2}, - {0x1F5BC, 0x1F5BC}, {0x1F5C2, 0x1F5C4}, {0x1F5D1, 0x1F5D3}, - {0x1F5DC, 0x1F5DE}, {0x1F5E1, 0x1F5E1}, {0x1F5E3, 0x1F5E3}, - {0x1F5E8, 0x1F5E8}, {0x1F5EF, 0x1F5EF}, {0x1F5F3, 0x1F5F3}, - {0x1F5FA, 0x1F5FA}, {0x1F6CB, 0x1F6CF}, {0x1F6E0, 0x1F6E5}, - {0x1F6E9, 0x1F6E9}, {0x1F6F0, 0x1F6F0}, {0x1F6F3, 0x1F6F3}, -} - -var notassigned = table{ - {0x0378, 0x0379}, {0x0380, 0x0383}, {0x038B, 0x038B}, - {0x038D, 0x038D}, {0x03A2, 0x03A2}, {0x0530, 0x0530}, - {0x0557, 0x0558}, {0x0560, 0x0560}, {0x0588, 0x0588}, - {0x058B, 0x058C}, {0x0590, 0x0590}, {0x05C8, 0x05CF}, - {0x05EB, 0x05EF}, {0x05F5, 0x05FF}, {0x061D, 0x061D}, - {0x070E, 0x070E}, {0x074B, 0x074C}, {0x07B2, 0x07BF}, - {0x07FB, 0x07FF}, {0x082E, 0x082F}, {0x083F, 0x083F}, - {0x085C, 0x085D}, {0x085F, 0x089F}, {0x08B5, 0x08B5}, - {0x08BE, 0x08D3}, {0x0984, 0x0984}, {0x098D, 0x098E}, - {0x0991, 0x0992}, {0x09A9, 0x09A9}, {0x09B1, 0x09B1}, - {0x09B3, 0x09B5}, {0x09BA, 0x09BB}, {0x09C5, 0x09C6}, - {0x09C9, 0x09CA}, {0x09CF, 0x09D6}, {0x09D8, 0x09DB}, - {0x09DE, 0x09DE}, {0x09E4, 0x09E5}, {0x09FC, 0x0A00}, - {0x0A04, 0x0A04}, {0x0A0B, 0x0A0E}, {0x0A11, 0x0A12}, - {0x0A29, 0x0A29}, {0x0A31, 0x0A31}, {0x0A34, 0x0A34}, - {0x0A37, 0x0A37}, {0x0A3A, 0x0A3B}, {0x0A3D, 0x0A3D}, - {0x0A43, 0x0A46}, {0x0A49, 0x0A4A}, {0x0A4E, 0x0A50}, - {0x0A52, 0x0A58}, {0x0A5D, 0x0A5D}, {0x0A5F, 0x0A65}, - {0x0A76, 0x0A80}, {0x0A84, 0x0A84}, {0x0A8E, 0x0A8E}, - {0x0A92, 0x0A92}, {0x0AA9, 0x0AA9}, {0x0AB1, 0x0AB1}, - {0x0AB4, 0x0AB4}, {0x0ABA, 0x0ABB}, {0x0AC6, 0x0AC6}, - {0x0ACA, 0x0ACA}, {0x0ACE, 0x0ACF}, {0x0AD1, 0x0ADF}, - {0x0AE4, 0x0AE5}, {0x0AF2, 0x0AF8}, {0x0AFA, 0x0B00}, - {0x0B04, 0x0B04}, {0x0B0D, 0x0B0E}, {0x0B11, 0x0B12}, - {0x0B29, 0x0B29}, {0x0B31, 0x0B31}, {0x0B34, 0x0B34}, - {0x0B3A, 0x0B3B}, {0x0B45, 0x0B46}, {0x0B49, 0x0B4A}, - {0x0B4E, 0x0B55}, {0x0B58, 0x0B5B}, {0x0B5E, 0x0B5E}, - {0x0B64, 0x0B65}, {0x0B78, 0x0B81}, {0x0B84, 0x0B84}, - {0x0B8B, 0x0B8D}, {0x0B91, 0x0B91}, {0x0B96, 0x0B98}, - {0x0B9B, 0x0B9B}, {0x0B9D, 0x0B9D}, {0x0BA0, 0x0BA2}, - {0x0BA5, 0x0BA7}, {0x0BAB, 0x0BAD}, {0x0BBA, 0x0BBD}, - {0x0BC3, 0x0BC5}, {0x0BC9, 0x0BC9}, {0x0BCE, 0x0BCF}, - {0x0BD1, 0x0BD6}, {0x0BD8, 0x0BE5}, {0x0BFB, 0x0BFF}, - {0x0C04, 0x0C04}, {0x0C0D, 0x0C0D}, {0x0C11, 0x0C11}, - {0x0C29, 0x0C29}, {0x0C3A, 0x0C3C}, {0x0C45, 0x0C45}, - {0x0C49, 0x0C49}, {0x0C4E, 0x0C54}, {0x0C57, 0x0C57}, - {0x0C5B, 0x0C5F}, {0x0C64, 0x0C65}, {0x0C70, 0x0C77}, - {0x0C84, 0x0C84}, {0x0C8D, 0x0C8D}, {0x0C91, 0x0C91}, - {0x0CA9, 0x0CA9}, {0x0CB4, 0x0CB4}, {0x0CBA, 0x0CBB}, - {0x0CC5, 0x0CC5}, {0x0CC9, 0x0CC9}, {0x0CCE, 0x0CD4}, - {0x0CD7, 0x0CDD}, {0x0CDF, 0x0CDF}, {0x0CE4, 0x0CE5}, - {0x0CF0, 0x0CF0}, {0x0CF3, 0x0D00}, {0x0D04, 0x0D04}, - {0x0D0D, 0x0D0D}, {0x0D11, 0x0D11}, {0x0D3B, 0x0D3C}, - {0x0D45, 0x0D45}, {0x0D49, 0x0D49}, {0x0D50, 0x0D53}, - {0x0D64, 0x0D65}, {0x0D80, 0x0D81}, {0x0D84, 0x0D84}, - {0x0D97, 0x0D99}, {0x0DB2, 0x0DB2}, {0x0DBC, 0x0DBC}, - {0x0DBE, 0x0DBF}, {0x0DC7, 0x0DC9}, {0x0DCB, 0x0DCE}, - {0x0DD5, 0x0DD5}, {0x0DD7, 0x0DD7}, {0x0DE0, 0x0DE5}, - {0x0DF0, 0x0DF1}, {0x0DF5, 0x0E00}, {0x0E3B, 0x0E3E}, - {0x0E5C, 0x0E80}, {0x0E83, 0x0E83}, {0x0E85, 0x0E86}, - {0x0E89, 0x0E89}, {0x0E8B, 0x0E8C}, {0x0E8E, 0x0E93}, - {0x0E98, 0x0E98}, {0x0EA0, 0x0EA0}, {0x0EA4, 0x0EA4}, - {0x0EA6, 0x0EA6}, {0x0EA8, 0x0EA9}, {0x0EAC, 0x0EAC}, - {0x0EBA, 0x0EBA}, {0x0EBE, 0x0EBF}, {0x0EC5, 0x0EC5}, - {0x0EC7, 0x0EC7}, {0x0ECE, 0x0ECF}, {0x0EDA, 0x0EDB}, - {0x0EE0, 0x0EFF}, {0x0F48, 0x0F48}, {0x0F6D, 0x0F70}, - {0x0F98, 0x0F98}, {0x0FBD, 0x0FBD}, {0x0FCD, 0x0FCD}, - {0x0FDB, 0x0FFF}, {0x10C6, 0x10C6}, {0x10C8, 0x10CC}, - {0x10CE, 0x10CF}, {0x1249, 0x1249}, {0x124E, 0x124F}, - {0x1257, 0x1257}, {0x1259, 0x1259}, {0x125E, 0x125F}, - {0x1289, 0x1289}, {0x128E, 0x128F}, {0x12B1, 0x12B1}, - {0x12B6, 0x12B7}, {0x12BF, 0x12BF}, {0x12C1, 0x12C1}, - {0x12C6, 0x12C7}, {0x12D7, 0x12D7}, {0x1311, 0x1311}, - {0x1316, 0x1317}, {0x135B, 0x135C}, {0x137D, 0x137F}, - {0x139A, 0x139F}, {0x13F6, 0x13F7}, {0x13FE, 0x13FF}, - {0x169D, 0x169F}, {0x16F9, 0x16FF}, {0x170D, 0x170D}, - {0x1715, 0x171F}, {0x1737, 0x173F}, {0x1754, 0x175F}, - {0x176D, 0x176D}, {0x1771, 0x1771}, {0x1774, 0x177F}, - {0x17DE, 0x17DF}, {0x17EA, 0x17EF}, {0x17FA, 0x17FF}, - {0x180F, 0x180F}, {0x181A, 0x181F}, {0x1878, 0x187F}, - {0x18AB, 0x18AF}, {0x18F6, 0x18FF}, {0x191F, 0x191F}, - {0x192C, 0x192F}, {0x193C, 0x193F}, {0x1941, 0x1943}, - {0x196E, 0x196F}, {0x1975, 0x197F}, {0x19AC, 0x19AF}, - {0x19CA, 0x19CF}, {0x19DB, 0x19DD}, {0x1A1C, 0x1A1D}, - {0x1A5F, 0x1A5F}, {0x1A7D, 0x1A7E}, {0x1A8A, 0x1A8F}, - {0x1A9A, 0x1A9F}, {0x1AAE, 0x1AAF}, {0x1ABF, 0x1AFF}, - {0x1B4C, 0x1B4F}, {0x1B7D, 0x1B7F}, {0x1BF4, 0x1BFB}, - {0x1C38, 0x1C3A}, {0x1C4A, 0x1C4C}, {0x1C89, 0x1CBF}, - {0x1CC8, 0x1CCF}, {0x1CF7, 0x1CF7}, {0x1CFA, 0x1CFF}, - {0x1DF6, 0x1DFA}, {0x1F16, 0x1F17}, {0x1F1E, 0x1F1F}, - {0x1F46, 0x1F47}, {0x1F4E, 0x1F4F}, {0x1F58, 0x1F58}, - {0x1F5A, 0x1F5A}, {0x1F5C, 0x1F5C}, {0x1F5E, 0x1F5E}, - {0x1F7E, 0x1F7F}, {0x1FB5, 0x1FB5}, {0x1FC5, 0x1FC5}, - {0x1FD4, 0x1FD5}, {0x1FDC, 0x1FDC}, {0x1FF0, 0x1FF1}, - {0x1FF5, 0x1FF5}, {0x1FFF, 0x1FFF}, {0x2065, 0x2065}, - {0x2072, 0x2073}, {0x208F, 0x208F}, {0x209D, 0x209F}, - {0x20BF, 0x20CF}, {0x20F1, 0x20FF}, {0x218C, 0x218F}, - {0x23FF, 0x23FF}, {0x2427, 0x243F}, {0x244B, 0x245F}, - {0x2B74, 0x2B75}, {0x2B96, 0x2B97}, {0x2BBA, 0x2BBC}, - {0x2BC9, 0x2BC9}, {0x2BD2, 0x2BEB}, {0x2BF0, 0x2BFF}, - {0x2C2F, 0x2C2F}, {0x2C5F, 0x2C5F}, {0x2CF4, 0x2CF8}, - {0x2D26, 0x2D26}, {0x2D28, 0x2D2C}, {0x2D2E, 0x2D2F}, - {0x2D68, 0x2D6E}, {0x2D71, 0x2D7E}, {0x2D97, 0x2D9F}, - {0x2DA7, 0x2DA7}, {0x2DAF, 0x2DAF}, {0x2DB7, 0x2DB7}, - {0x2DBF, 0x2DBF}, {0x2DC7, 0x2DC7}, {0x2DCF, 0x2DCF}, - {0x2DD7, 0x2DD7}, {0x2DDF, 0x2DDF}, {0x2E45, 0x2E7F}, - {0x2E9A, 0x2E9A}, {0x2EF4, 0x2EFF}, {0x2FD6, 0x2FEF}, - {0x2FFC, 0x2FFF}, {0x3040, 0x3040}, {0x3097, 0x3098}, - {0x3100, 0x3104}, {0x312E, 0x3130}, {0x318F, 0x318F}, - {0x31BB, 0x31BF}, {0x31E4, 0x31EF}, {0x321F, 0x321F}, - {0x32FF, 0x32FF}, {0x4DB6, 0x4DBF}, {0x9FD6, 0x9FFF}, - {0xA48D, 0xA48F}, {0xA4C7, 0xA4CF}, {0xA62C, 0xA63F}, - {0xA6F8, 0xA6FF}, {0xA7AF, 0xA7AF}, {0xA7B8, 0xA7F6}, - {0xA82C, 0xA82F}, {0xA83A, 0xA83F}, {0xA878, 0xA87F}, - {0xA8C6, 0xA8CD}, {0xA8DA, 0xA8DF}, {0xA8FE, 0xA8FF}, - {0xA954, 0xA95E}, {0xA97D, 0xA97F}, {0xA9CE, 0xA9CE}, - {0xA9DA, 0xA9DD}, {0xA9FF, 0xA9FF}, {0xAA37, 0xAA3F}, - {0xAA4E, 0xAA4F}, {0xAA5A, 0xAA5B}, {0xAAC3, 0xAADA}, - {0xAAF7, 0xAB00}, {0xAB07, 0xAB08}, {0xAB0F, 0xAB10}, - {0xAB17, 0xAB1F}, {0xAB27, 0xAB27}, {0xAB2F, 0xAB2F}, - {0xAB66, 0xAB6F}, {0xABEE, 0xABEF}, {0xABFA, 0xABFF}, - {0xD7A4, 0xD7AF}, {0xD7C7, 0xD7CA}, {0xD7FC, 0xD7FF}, - {0xFA6E, 0xFA6F}, {0xFADA, 0xFAFF}, {0xFB07, 0xFB12}, - {0xFB18, 0xFB1C}, {0xFB37, 0xFB37}, {0xFB3D, 0xFB3D}, - {0xFB3F, 0xFB3F}, {0xFB42, 0xFB42}, {0xFB45, 0xFB45}, - {0xFBC2, 0xFBD2}, {0xFD40, 0xFD4F}, {0xFD90, 0xFD91}, - {0xFDC8, 0xFDEF}, {0xFDFE, 0xFDFF}, {0xFE1A, 0xFE1F}, - {0xFE53, 0xFE53}, {0xFE67, 0xFE67}, {0xFE6C, 0xFE6F}, - {0xFE75, 0xFE75}, {0xFEFD, 0xFEFE}, {0xFF00, 0xFF00}, - {0xFFBF, 0xFFC1}, {0xFFC8, 0xFFC9}, {0xFFD0, 0xFFD1}, - {0xFFD8, 0xFFD9}, {0xFFDD, 0xFFDF}, {0xFFE7, 0xFFE7}, - {0xFFEF, 0xFFF8}, {0xFFFE, 0xFFFF}, {0x1000C, 0x1000C}, - {0x10027, 0x10027}, {0x1003B, 0x1003B}, {0x1003E, 0x1003E}, - {0x1004E, 0x1004F}, {0x1005E, 0x1007F}, {0x100FB, 0x100FF}, - {0x10103, 0x10106}, {0x10134, 0x10136}, {0x1018F, 0x1018F}, - {0x1019C, 0x1019F}, {0x101A1, 0x101CF}, {0x101FE, 0x1027F}, - {0x1029D, 0x1029F}, {0x102D1, 0x102DF}, {0x102FC, 0x102FF}, - {0x10324, 0x1032F}, {0x1034B, 0x1034F}, {0x1037B, 0x1037F}, - {0x1039E, 0x1039E}, {0x103C4, 0x103C7}, {0x103D6, 0x103FF}, - {0x1049E, 0x1049F}, {0x104AA, 0x104AF}, {0x104D4, 0x104D7}, - {0x104FC, 0x104FF}, {0x10528, 0x1052F}, {0x10564, 0x1056E}, - {0x10570, 0x105FF}, {0x10737, 0x1073F}, {0x10756, 0x1075F}, - {0x10768, 0x107FF}, {0x10806, 0x10807}, {0x10809, 0x10809}, - {0x10836, 0x10836}, {0x10839, 0x1083B}, {0x1083D, 0x1083E}, - {0x10856, 0x10856}, {0x1089F, 0x108A6}, {0x108B0, 0x108DF}, - {0x108F3, 0x108F3}, {0x108F6, 0x108FA}, {0x1091C, 0x1091E}, - {0x1093A, 0x1093E}, {0x10940, 0x1097F}, {0x109B8, 0x109BB}, - {0x109D0, 0x109D1}, {0x10A04, 0x10A04}, {0x10A07, 0x10A0B}, - {0x10A14, 0x10A14}, {0x10A18, 0x10A18}, {0x10A34, 0x10A37}, - {0x10A3B, 0x10A3E}, {0x10A48, 0x10A4F}, {0x10A59, 0x10A5F}, - {0x10AA0, 0x10ABF}, {0x10AE7, 0x10AEA}, {0x10AF7, 0x10AFF}, - {0x10B36, 0x10B38}, {0x10B56, 0x10B57}, {0x10B73, 0x10B77}, - {0x10B92, 0x10B98}, {0x10B9D, 0x10BA8}, {0x10BB0, 0x10BFF}, - {0x10C49, 0x10C7F}, {0x10CB3, 0x10CBF}, {0x10CF3, 0x10CF9}, - {0x10D00, 0x10E5F}, {0x10E7F, 0x10FFF}, {0x1104E, 0x11051}, - {0x11070, 0x1107E}, {0x110C2, 0x110CF}, {0x110E9, 0x110EF}, - {0x110FA, 0x110FF}, {0x11135, 0x11135}, {0x11144, 0x1114F}, - {0x11177, 0x1117F}, {0x111CE, 0x111CF}, {0x111E0, 0x111E0}, - {0x111F5, 0x111FF}, {0x11212, 0x11212}, {0x1123F, 0x1127F}, - {0x11287, 0x11287}, {0x11289, 0x11289}, {0x1128E, 0x1128E}, - {0x1129E, 0x1129E}, {0x112AA, 0x112AF}, {0x112EB, 0x112EF}, - {0x112FA, 0x112FF}, {0x11304, 0x11304}, {0x1130D, 0x1130E}, - {0x11311, 0x11312}, {0x11329, 0x11329}, {0x11331, 0x11331}, - {0x11334, 0x11334}, {0x1133A, 0x1133B}, {0x11345, 0x11346}, - {0x11349, 0x1134A}, {0x1134E, 0x1134F}, {0x11351, 0x11356}, - {0x11358, 0x1135C}, {0x11364, 0x11365}, {0x1136D, 0x1136F}, - {0x11375, 0x113FF}, {0x1145A, 0x1145A}, {0x1145C, 0x1145C}, - {0x1145E, 0x1147F}, {0x114C8, 0x114CF}, {0x114DA, 0x1157F}, - {0x115B6, 0x115B7}, {0x115DE, 0x115FF}, {0x11645, 0x1164F}, - {0x1165A, 0x1165F}, {0x1166D, 0x1167F}, {0x116B8, 0x116BF}, - {0x116CA, 0x116FF}, {0x1171A, 0x1171C}, {0x1172C, 0x1172F}, - {0x11740, 0x1189F}, {0x118F3, 0x118FE}, {0x11900, 0x11ABF}, - {0x11AF9, 0x11BFF}, {0x11C09, 0x11C09}, {0x11C37, 0x11C37}, - {0x11C46, 0x11C4F}, {0x11C6D, 0x11C6F}, {0x11C90, 0x11C91}, - {0x11CA8, 0x11CA8}, {0x11CB7, 0x11FFF}, {0x1239A, 0x123FF}, - {0x1246F, 0x1246F}, {0x12475, 0x1247F}, {0x12544, 0x12FFF}, - {0x1342F, 0x143FF}, {0x14647, 0x167FF}, {0x16A39, 0x16A3F}, - {0x16A5F, 0x16A5F}, {0x16A6A, 0x16A6D}, {0x16A70, 0x16ACF}, - {0x16AEE, 0x16AEF}, {0x16AF6, 0x16AFF}, {0x16B46, 0x16B4F}, - {0x16B5A, 0x16B5A}, {0x16B62, 0x16B62}, {0x16B78, 0x16B7C}, - {0x16B90, 0x16EFF}, {0x16F45, 0x16F4F}, {0x16F7F, 0x16F8E}, - {0x16FA0, 0x16FDF}, {0x16FE1, 0x16FFF}, {0x187ED, 0x187FF}, - {0x18AF3, 0x1AFFF}, {0x1B002, 0x1BBFF}, {0x1BC6B, 0x1BC6F}, - {0x1BC7D, 0x1BC7F}, {0x1BC89, 0x1BC8F}, {0x1BC9A, 0x1BC9B}, - {0x1BCA4, 0x1CFFF}, {0x1D0F6, 0x1D0FF}, {0x1D127, 0x1D128}, - {0x1D1E9, 0x1D1FF}, {0x1D246, 0x1D2FF}, {0x1D357, 0x1D35F}, - {0x1D372, 0x1D3FF}, {0x1D455, 0x1D455}, {0x1D49D, 0x1D49D}, - {0x1D4A0, 0x1D4A1}, {0x1D4A3, 0x1D4A4}, {0x1D4A7, 0x1D4A8}, - {0x1D4AD, 0x1D4AD}, {0x1D4BA, 0x1D4BA}, {0x1D4BC, 0x1D4BC}, - {0x1D4C4, 0x1D4C4}, {0x1D506, 0x1D506}, {0x1D50B, 0x1D50C}, - {0x1D515, 0x1D515}, {0x1D51D, 0x1D51D}, {0x1D53A, 0x1D53A}, - {0x1D53F, 0x1D53F}, {0x1D545, 0x1D545}, {0x1D547, 0x1D549}, - {0x1D551, 0x1D551}, {0x1D6A6, 0x1D6A7}, {0x1D7CC, 0x1D7CD}, - {0x1DA8C, 0x1DA9A}, {0x1DAA0, 0x1DAA0}, {0x1DAB0, 0x1DFFF}, - {0x1E007, 0x1E007}, {0x1E019, 0x1E01A}, {0x1E022, 0x1E022}, - {0x1E025, 0x1E025}, {0x1E02B, 0x1E7FF}, {0x1E8C5, 0x1E8C6}, - {0x1E8D7, 0x1E8FF}, {0x1E94B, 0x1E94F}, {0x1E95A, 0x1E95D}, - {0x1E960, 0x1EDFF}, {0x1EE04, 0x1EE04}, {0x1EE20, 0x1EE20}, - {0x1EE23, 0x1EE23}, {0x1EE25, 0x1EE26}, {0x1EE28, 0x1EE28}, - {0x1EE33, 0x1EE33}, {0x1EE38, 0x1EE38}, {0x1EE3A, 0x1EE3A}, - {0x1EE3C, 0x1EE41}, {0x1EE43, 0x1EE46}, {0x1EE48, 0x1EE48}, - {0x1EE4A, 0x1EE4A}, {0x1EE4C, 0x1EE4C}, {0x1EE50, 0x1EE50}, - {0x1EE53, 0x1EE53}, {0x1EE55, 0x1EE56}, {0x1EE58, 0x1EE58}, - {0x1EE5A, 0x1EE5A}, {0x1EE5C, 0x1EE5C}, {0x1EE5E, 0x1EE5E}, - {0x1EE60, 0x1EE60}, {0x1EE63, 0x1EE63}, {0x1EE65, 0x1EE66}, - {0x1EE6B, 0x1EE6B}, {0x1EE73, 0x1EE73}, {0x1EE78, 0x1EE78}, - {0x1EE7D, 0x1EE7D}, {0x1EE7F, 0x1EE7F}, {0x1EE8A, 0x1EE8A}, - {0x1EE9C, 0x1EEA0}, {0x1EEA4, 0x1EEA4}, {0x1EEAA, 0x1EEAA}, - {0x1EEBC, 0x1EEEF}, {0x1EEF2, 0x1EFFF}, {0x1F02C, 0x1F02F}, - {0x1F094, 0x1F09F}, {0x1F0AF, 0x1F0B0}, {0x1F0C0, 0x1F0C0}, - {0x1F0D0, 0x1F0D0}, {0x1F0F6, 0x1F0FF}, {0x1F10D, 0x1F10F}, - {0x1F12F, 0x1F12F}, {0x1F16C, 0x1F16F}, {0x1F1AD, 0x1F1E5}, - {0x1F203, 0x1F20F}, {0x1F23C, 0x1F23F}, {0x1F249, 0x1F24F}, - {0x1F252, 0x1F2FF}, {0x1F6D3, 0x1F6DF}, {0x1F6ED, 0x1F6EF}, - {0x1F6F7, 0x1F6FF}, {0x1F774, 0x1F77F}, {0x1F7D5, 0x1F7FF}, - {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F}, {0x1F85A, 0x1F85F}, - {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F90F}, {0x1F91F, 0x1F91F}, - {0x1F928, 0x1F92F}, {0x1F931, 0x1F932}, {0x1F93F, 0x1F93F}, - {0x1F94C, 0x1F94F}, {0x1F95F, 0x1F97F}, {0x1F992, 0x1F9BF}, - {0x1F9C1, 0x1FFFF}, {0x2A6D7, 0x2A6FF}, {0x2B735, 0x2B73F}, - {0x2B81E, 0x2B81F}, {0x2CEA2, 0x2F7FF}, {0x2FA1E, 0xE0000}, - {0xE0002, 0xE001F}, {0xE0080, 0xE00FF}, {0xE01F0, 0xEFFFF}, - {0xFFFFE, 0xFFFFF}, -} - -var neutral = table{ - {0x0000, 0x001F}, {0x007F, 0x007F}, {0x0080, 0x009F}, - {0x00A0, 0x00A0}, {0x00A9, 0x00A9}, {0x00AB, 0x00AB}, - {0x00B5, 0x00B5}, {0x00BB, 0x00BB}, {0x00C0, 0x00C5}, - {0x00C7, 0x00CF}, {0x00D1, 0x00D6}, {0x00D9, 0x00DD}, - {0x00E2, 0x00E5}, {0x00E7, 0x00E7}, {0x00EB, 0x00EB}, - {0x00EE, 0x00EF}, {0x00F1, 0x00F1}, {0x00F4, 0x00F6}, - {0x00FB, 0x00FB}, {0x00FD, 0x00FD}, {0x00FF, 0x00FF}, - {0x0100, 0x0100}, {0x0102, 0x0110}, {0x0112, 0x0112}, - {0x0114, 0x011A}, {0x011C, 0x0125}, {0x0128, 0x012A}, - {0x012C, 0x0130}, {0x0134, 0x0137}, {0x0139, 0x013E}, - {0x0143, 0x0143}, {0x0145, 0x0147}, {0x014C, 0x014C}, - {0x014E, 0x0151}, {0x0154, 0x0165}, {0x0168, 0x016A}, - {0x016C, 0x017F}, {0x0180, 0x01BA}, {0x01BB, 0x01BB}, - {0x01BC, 0x01BF}, {0x01C0, 0x01C3}, {0x01C4, 0x01CD}, - {0x01CF, 0x01CF}, {0x01D1, 0x01D1}, {0x01D3, 0x01D3}, - {0x01D5, 0x01D5}, {0x01D7, 0x01D7}, {0x01D9, 0x01D9}, - {0x01DB, 0x01DB}, {0x01DD, 0x024F}, {0x0250, 0x0250}, - {0x0252, 0x0260}, {0x0262, 0x0293}, {0x0294, 0x0294}, - {0x0295, 0x02AF}, {0x02B0, 0x02C1}, {0x02C2, 0x02C3}, - {0x02C5, 0x02C5}, {0x02C6, 0x02C6}, {0x02C8, 0x02C8}, - {0x02CC, 0x02CC}, {0x02CE, 0x02CF}, {0x02D1, 0x02D1}, - {0x02D2, 0x02D7}, {0x02DC, 0x02DC}, {0x02DE, 0x02DE}, - {0x02E0, 0x02E4}, {0x02E5, 0x02EB}, {0x02EC, 0x02EC}, - {0x02ED, 0x02ED}, {0x02EE, 0x02EE}, {0x02EF, 0x02FF}, - {0x0370, 0x0373}, {0x0374, 0x0374}, {0x0375, 0x0375}, - {0x0376, 0x0377}, {0x037A, 0x037A}, {0x037B, 0x037D}, - {0x037E, 0x037E}, {0x037F, 0x037F}, {0x0384, 0x0385}, - {0x0386, 0x0386}, {0x0387, 0x0387}, {0x0388, 0x038A}, - {0x038C, 0x038C}, {0x038E, 0x0390}, {0x03AA, 0x03B0}, - {0x03C2, 0x03C2}, {0x03CA, 0x03F5}, {0x03F6, 0x03F6}, - {0x03F7, 0x03FF}, {0x0400, 0x0400}, {0x0402, 0x040F}, - {0x0450, 0x0450}, {0x0452, 0x0481}, {0x0482, 0x0482}, - {0x0483, 0x0487}, {0x0488, 0x0489}, {0x048A, 0x04FF}, - {0x0500, 0x052F}, {0x0531, 0x0556}, {0x0559, 0x0559}, - {0x055A, 0x055F}, {0x0561, 0x0587}, {0x0589, 0x0589}, - {0x058A, 0x058A}, {0x058D, 0x058E}, {0x058F, 0x058F}, - {0x0591, 0x05BD}, {0x05BE, 0x05BE}, {0x05BF, 0x05BF}, - {0x05C0, 0x05C0}, {0x05C1, 0x05C2}, {0x05C3, 0x05C3}, - {0x05C4, 0x05C5}, {0x05C6, 0x05C6}, {0x05C7, 0x05C7}, - {0x05D0, 0x05EA}, {0x05F0, 0x05F2}, {0x05F3, 0x05F4}, - {0x0600, 0x0605}, {0x0606, 0x0608}, {0x0609, 0x060A}, - {0x060B, 0x060B}, {0x060C, 0x060D}, {0x060E, 0x060F}, - {0x0610, 0x061A}, {0x061B, 0x061B}, {0x061C, 0x061C}, - {0x061E, 0x061F}, {0x0620, 0x063F}, {0x0640, 0x0640}, - {0x0641, 0x064A}, {0x064B, 0x065F}, {0x0660, 0x0669}, - {0x066A, 0x066D}, {0x066E, 0x066F}, {0x0670, 0x0670}, - {0x0671, 0x06D3}, {0x06D4, 0x06D4}, {0x06D5, 0x06D5}, - {0x06D6, 0x06DC}, {0x06DD, 0x06DD}, {0x06DE, 0x06DE}, - {0x06DF, 0x06E4}, {0x06E5, 0x06E6}, {0x06E7, 0x06E8}, - {0x06E9, 0x06E9}, {0x06EA, 0x06ED}, {0x06EE, 0x06EF}, - {0x06F0, 0x06F9}, {0x06FA, 0x06FC}, {0x06FD, 0x06FE}, - {0x06FF, 0x06FF}, {0x0700, 0x070D}, {0x070F, 0x070F}, - {0x0710, 0x0710}, {0x0711, 0x0711}, {0x0712, 0x072F}, - {0x0730, 0x074A}, {0x074D, 0x074F}, {0x0750, 0x077F}, - {0x0780, 0x07A5}, {0x07A6, 0x07B0}, {0x07B1, 0x07B1}, - {0x07C0, 0x07C9}, {0x07CA, 0x07EA}, {0x07EB, 0x07F3}, - {0x07F4, 0x07F5}, {0x07F6, 0x07F6}, {0x07F7, 0x07F9}, - {0x07FA, 0x07FA}, {0x0800, 0x0815}, {0x0816, 0x0819}, - {0x081A, 0x081A}, {0x081B, 0x0823}, {0x0824, 0x0824}, - {0x0825, 0x0827}, {0x0828, 0x0828}, {0x0829, 0x082D}, - {0x0830, 0x083E}, {0x0840, 0x0858}, {0x0859, 0x085B}, - {0x085E, 0x085E}, {0x08A0, 0x08B4}, {0x08B6, 0x08BD}, - {0x08D4, 0x08E1}, {0x08E2, 0x08E2}, {0x08E3, 0x08FF}, - {0x0900, 0x0902}, {0x0903, 0x0903}, {0x0904, 0x0939}, - {0x093A, 0x093A}, {0x093B, 0x093B}, {0x093C, 0x093C}, - {0x093D, 0x093D}, {0x093E, 0x0940}, {0x0941, 0x0948}, - {0x0949, 0x094C}, {0x094D, 0x094D}, {0x094E, 0x094F}, - {0x0950, 0x0950}, {0x0951, 0x0957}, {0x0958, 0x0961}, - {0x0962, 0x0963}, {0x0964, 0x0965}, {0x0966, 0x096F}, - {0x0970, 0x0970}, {0x0971, 0x0971}, {0x0972, 0x097F}, - {0x0980, 0x0980}, {0x0981, 0x0981}, {0x0982, 0x0983}, - {0x0985, 0x098C}, {0x098F, 0x0990}, {0x0993, 0x09A8}, - {0x09AA, 0x09B0}, {0x09B2, 0x09B2}, {0x09B6, 0x09B9}, - {0x09BC, 0x09BC}, {0x09BD, 0x09BD}, {0x09BE, 0x09C0}, - {0x09C1, 0x09C4}, {0x09C7, 0x09C8}, {0x09CB, 0x09CC}, - {0x09CD, 0x09CD}, {0x09CE, 0x09CE}, {0x09D7, 0x09D7}, - {0x09DC, 0x09DD}, {0x09DF, 0x09E1}, {0x09E2, 0x09E3}, - {0x09E6, 0x09EF}, {0x09F0, 0x09F1}, {0x09F2, 0x09F3}, - {0x09F4, 0x09F9}, {0x09FA, 0x09FA}, {0x09FB, 0x09FB}, - {0x0A01, 0x0A02}, {0x0A03, 0x0A03}, {0x0A05, 0x0A0A}, - {0x0A0F, 0x0A10}, {0x0A13, 0x0A28}, {0x0A2A, 0x0A30}, - {0x0A32, 0x0A33}, {0x0A35, 0x0A36}, {0x0A38, 0x0A39}, - {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A40}, {0x0A41, 0x0A42}, - {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, - {0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E}, {0x0A66, 0x0A6F}, - {0x0A70, 0x0A71}, {0x0A72, 0x0A74}, {0x0A75, 0x0A75}, - {0x0A81, 0x0A82}, {0x0A83, 0x0A83}, {0x0A85, 0x0A8D}, - {0x0A8F, 0x0A91}, {0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, - {0x0AB2, 0x0AB3}, {0x0AB5, 0x0AB9}, {0x0ABC, 0x0ABC}, - {0x0ABD, 0x0ABD}, {0x0ABE, 0x0AC0}, {0x0AC1, 0x0AC5}, - {0x0AC7, 0x0AC8}, {0x0AC9, 0x0AC9}, {0x0ACB, 0x0ACC}, - {0x0ACD, 0x0ACD}, {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE1}, - {0x0AE2, 0x0AE3}, {0x0AE6, 0x0AEF}, {0x0AF0, 0x0AF0}, - {0x0AF1, 0x0AF1}, {0x0AF9, 0x0AF9}, {0x0B01, 0x0B01}, - {0x0B02, 0x0B03}, {0x0B05, 0x0B0C}, {0x0B0F, 0x0B10}, - {0x0B13, 0x0B28}, {0x0B2A, 0x0B30}, {0x0B32, 0x0B33}, - {0x0B35, 0x0B39}, {0x0B3C, 0x0B3C}, {0x0B3D, 0x0B3D}, - {0x0B3E, 0x0B3E}, {0x0B3F, 0x0B3F}, {0x0B40, 0x0B40}, - {0x0B41, 0x0B44}, {0x0B47, 0x0B48}, {0x0B4B, 0x0B4C}, - {0x0B4D, 0x0B4D}, {0x0B56, 0x0B56}, {0x0B57, 0x0B57}, - {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B61}, {0x0B62, 0x0B63}, - {0x0B66, 0x0B6F}, {0x0B70, 0x0B70}, {0x0B71, 0x0B71}, - {0x0B72, 0x0B77}, {0x0B82, 0x0B82}, {0x0B83, 0x0B83}, - {0x0B85, 0x0B8A}, {0x0B8E, 0x0B90}, {0x0B92, 0x0B95}, - {0x0B99, 0x0B9A}, {0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F}, - {0x0BA3, 0x0BA4}, {0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9}, - {0x0BBE, 0x0BBF}, {0x0BC0, 0x0BC0}, {0x0BC1, 0x0BC2}, - {0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCC}, {0x0BCD, 0x0BCD}, - {0x0BD0, 0x0BD0}, {0x0BD7, 0x0BD7}, {0x0BE6, 0x0BEF}, - {0x0BF0, 0x0BF2}, {0x0BF3, 0x0BF8}, {0x0BF9, 0x0BF9}, - {0x0BFA, 0x0BFA}, {0x0C00, 0x0C00}, {0x0C01, 0x0C03}, - {0x0C05, 0x0C0C}, {0x0C0E, 0x0C10}, {0x0C12, 0x0C28}, - {0x0C2A, 0x0C39}, {0x0C3D, 0x0C3D}, {0x0C3E, 0x0C40}, - {0x0C41, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, - {0x0C55, 0x0C56}, {0x0C58, 0x0C5A}, {0x0C60, 0x0C61}, - {0x0C62, 0x0C63}, {0x0C66, 0x0C6F}, {0x0C78, 0x0C7E}, - {0x0C7F, 0x0C7F}, {0x0C80, 0x0C80}, {0x0C81, 0x0C81}, - {0x0C82, 0x0C83}, {0x0C85, 0x0C8C}, {0x0C8E, 0x0C90}, - {0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9}, - {0x0CBC, 0x0CBC}, {0x0CBD, 0x0CBD}, {0x0CBE, 0x0CBE}, - {0x0CBF, 0x0CBF}, {0x0CC0, 0x0CC4}, {0x0CC6, 0x0CC6}, - {0x0CC7, 0x0CC8}, {0x0CCA, 0x0CCB}, {0x0CCC, 0x0CCD}, - {0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE1}, - {0x0CE2, 0x0CE3}, {0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, - {0x0D01, 0x0D01}, {0x0D02, 0x0D03}, {0x0D05, 0x0D0C}, - {0x0D0E, 0x0D10}, {0x0D12, 0x0D3A}, {0x0D3D, 0x0D3D}, - {0x0D3E, 0x0D40}, {0x0D41, 0x0D44}, {0x0D46, 0x0D48}, - {0x0D4A, 0x0D4C}, {0x0D4D, 0x0D4D}, {0x0D4E, 0x0D4E}, - {0x0D4F, 0x0D4F}, {0x0D54, 0x0D56}, {0x0D57, 0x0D57}, - {0x0D58, 0x0D5E}, {0x0D5F, 0x0D61}, {0x0D62, 0x0D63}, - {0x0D66, 0x0D6F}, {0x0D70, 0x0D78}, {0x0D79, 0x0D79}, - {0x0D7A, 0x0D7F}, {0x0D82, 0x0D83}, {0x0D85, 0x0D96}, - {0x0D9A, 0x0DB1}, {0x0DB3, 0x0DBB}, {0x0DBD, 0x0DBD}, - {0x0DC0, 0x0DC6}, {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD1}, - {0x0DD2, 0x0DD4}, {0x0DD6, 0x0DD6}, {0x0DD8, 0x0DDF}, - {0x0DE6, 0x0DEF}, {0x0DF2, 0x0DF3}, {0x0DF4, 0x0DF4}, - {0x0E01, 0x0E30}, {0x0E31, 0x0E31}, {0x0E32, 0x0E33}, - {0x0E34, 0x0E3A}, {0x0E3F, 0x0E3F}, {0x0E40, 0x0E45}, - {0x0E46, 0x0E46}, {0x0E47, 0x0E4E}, {0x0E4F, 0x0E4F}, - {0x0E50, 0x0E59}, {0x0E5A, 0x0E5B}, {0x0E81, 0x0E82}, - {0x0E84, 0x0E84}, {0x0E87, 0x0E88}, {0x0E8A, 0x0E8A}, - {0x0E8D, 0x0E8D}, {0x0E94, 0x0E97}, {0x0E99, 0x0E9F}, - {0x0EA1, 0x0EA3}, {0x0EA5, 0x0EA5}, {0x0EA7, 0x0EA7}, - {0x0EAA, 0x0EAB}, {0x0EAD, 0x0EB0}, {0x0EB1, 0x0EB1}, - {0x0EB2, 0x0EB3}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC}, - {0x0EBD, 0x0EBD}, {0x0EC0, 0x0EC4}, {0x0EC6, 0x0EC6}, - {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9}, {0x0EDC, 0x0EDF}, - {0x0F00, 0x0F00}, {0x0F01, 0x0F03}, {0x0F04, 0x0F12}, - {0x0F13, 0x0F13}, {0x0F14, 0x0F14}, {0x0F15, 0x0F17}, - {0x0F18, 0x0F19}, {0x0F1A, 0x0F1F}, {0x0F20, 0x0F29}, - {0x0F2A, 0x0F33}, {0x0F34, 0x0F34}, {0x0F35, 0x0F35}, - {0x0F36, 0x0F36}, {0x0F37, 0x0F37}, {0x0F38, 0x0F38}, - {0x0F39, 0x0F39}, {0x0F3A, 0x0F3A}, {0x0F3B, 0x0F3B}, - {0x0F3C, 0x0F3C}, {0x0F3D, 0x0F3D}, {0x0F3E, 0x0F3F}, - {0x0F40, 0x0F47}, {0x0F49, 0x0F6C}, {0x0F71, 0x0F7E}, - {0x0F7F, 0x0F7F}, {0x0F80, 0x0F84}, {0x0F85, 0x0F85}, - {0x0F86, 0x0F87}, {0x0F88, 0x0F8C}, {0x0F8D, 0x0F97}, - {0x0F99, 0x0FBC}, {0x0FBE, 0x0FC5}, {0x0FC6, 0x0FC6}, - {0x0FC7, 0x0FCC}, {0x0FCE, 0x0FCF}, {0x0FD0, 0x0FD4}, - {0x0FD5, 0x0FD8}, {0x0FD9, 0x0FDA}, {0x1000, 0x102A}, - {0x102B, 0x102C}, {0x102D, 0x1030}, {0x1031, 0x1031}, - {0x1032, 0x1037}, {0x1038, 0x1038}, {0x1039, 0x103A}, - {0x103B, 0x103C}, {0x103D, 0x103E}, {0x103F, 0x103F}, - {0x1040, 0x1049}, {0x104A, 0x104F}, {0x1050, 0x1055}, - {0x1056, 0x1057}, {0x1058, 0x1059}, {0x105A, 0x105D}, - {0x105E, 0x1060}, {0x1061, 0x1061}, {0x1062, 0x1064}, - {0x1065, 0x1066}, {0x1067, 0x106D}, {0x106E, 0x1070}, - {0x1071, 0x1074}, {0x1075, 0x1081}, {0x1082, 0x1082}, - {0x1083, 0x1084}, {0x1085, 0x1086}, {0x1087, 0x108C}, - {0x108D, 0x108D}, {0x108E, 0x108E}, {0x108F, 0x108F}, - {0x1090, 0x1099}, {0x109A, 0x109C}, {0x109D, 0x109D}, - {0x109E, 0x109F}, {0x10A0, 0x10C5}, {0x10C7, 0x10C7}, - {0x10CD, 0x10CD}, {0x10D0, 0x10FA}, {0x10FB, 0x10FB}, - {0x10FC, 0x10FC}, {0x10FD, 0x10FF}, {0x1160, 0x11FF}, - {0x1200, 0x1248}, {0x124A, 0x124D}, {0x1250, 0x1256}, - {0x1258, 0x1258}, {0x125A, 0x125D}, {0x1260, 0x1288}, - {0x128A, 0x128D}, {0x1290, 0x12B0}, {0x12B2, 0x12B5}, - {0x12B8, 0x12BE}, {0x12C0, 0x12C0}, {0x12C2, 0x12C5}, - {0x12C8, 0x12D6}, {0x12D8, 0x1310}, {0x1312, 0x1315}, - {0x1318, 0x135A}, {0x135D, 0x135F}, {0x1360, 0x1368}, - {0x1369, 0x137C}, {0x1380, 0x138F}, {0x1390, 0x1399}, - {0x13A0, 0x13F5}, {0x13F8, 0x13FD}, {0x1400, 0x1400}, - {0x1401, 0x166C}, {0x166D, 0x166E}, {0x166F, 0x167F}, - {0x1680, 0x1680}, {0x1681, 0x169A}, {0x169B, 0x169B}, - {0x169C, 0x169C}, {0x16A0, 0x16EA}, {0x16EB, 0x16ED}, - {0x16EE, 0x16F0}, {0x16F1, 0x16F8}, {0x1700, 0x170C}, - {0x170E, 0x1711}, {0x1712, 0x1714}, {0x1720, 0x1731}, - {0x1732, 0x1734}, {0x1735, 0x1736}, {0x1740, 0x1751}, - {0x1752, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770}, - {0x1772, 0x1773}, {0x1780, 0x17B3}, {0x17B4, 0x17B5}, - {0x17B6, 0x17B6}, {0x17B7, 0x17BD}, {0x17BE, 0x17C5}, - {0x17C6, 0x17C6}, {0x17C7, 0x17C8}, {0x17C9, 0x17D3}, - {0x17D4, 0x17D6}, {0x17D7, 0x17D7}, {0x17D8, 0x17DA}, - {0x17DB, 0x17DB}, {0x17DC, 0x17DC}, {0x17DD, 0x17DD}, - {0x17E0, 0x17E9}, {0x17F0, 0x17F9}, {0x1800, 0x1805}, - {0x1806, 0x1806}, {0x1807, 0x180A}, {0x180B, 0x180D}, - {0x180E, 0x180E}, {0x1810, 0x1819}, {0x1820, 0x1842}, - {0x1843, 0x1843}, {0x1844, 0x1877}, {0x1880, 0x1884}, - {0x1885, 0x1886}, {0x1887, 0x18A8}, {0x18A9, 0x18A9}, - {0x18AA, 0x18AA}, {0x18B0, 0x18F5}, {0x1900, 0x191E}, - {0x1920, 0x1922}, {0x1923, 0x1926}, {0x1927, 0x1928}, - {0x1929, 0x192B}, {0x1930, 0x1931}, {0x1932, 0x1932}, - {0x1933, 0x1938}, {0x1939, 0x193B}, {0x1940, 0x1940}, - {0x1944, 0x1945}, {0x1946, 0x194F}, {0x1950, 0x196D}, - {0x1970, 0x1974}, {0x1980, 0x19AB}, {0x19B0, 0x19C9}, - {0x19D0, 0x19D9}, {0x19DA, 0x19DA}, {0x19DE, 0x19DF}, - {0x19E0, 0x19FF}, {0x1A00, 0x1A16}, {0x1A17, 0x1A18}, - {0x1A19, 0x1A1A}, {0x1A1B, 0x1A1B}, {0x1A1E, 0x1A1F}, - {0x1A20, 0x1A54}, {0x1A55, 0x1A55}, {0x1A56, 0x1A56}, - {0x1A57, 0x1A57}, {0x1A58, 0x1A5E}, {0x1A60, 0x1A60}, - {0x1A61, 0x1A61}, {0x1A62, 0x1A62}, {0x1A63, 0x1A64}, - {0x1A65, 0x1A6C}, {0x1A6D, 0x1A72}, {0x1A73, 0x1A7C}, - {0x1A7F, 0x1A7F}, {0x1A80, 0x1A89}, {0x1A90, 0x1A99}, - {0x1AA0, 0x1AA6}, {0x1AA7, 0x1AA7}, {0x1AA8, 0x1AAD}, - {0x1AB0, 0x1ABD}, {0x1ABE, 0x1ABE}, {0x1B00, 0x1B03}, - {0x1B04, 0x1B04}, {0x1B05, 0x1B33}, {0x1B34, 0x1B34}, - {0x1B35, 0x1B35}, {0x1B36, 0x1B3A}, {0x1B3B, 0x1B3B}, - {0x1B3C, 0x1B3C}, {0x1B3D, 0x1B41}, {0x1B42, 0x1B42}, - {0x1B43, 0x1B44}, {0x1B45, 0x1B4B}, {0x1B50, 0x1B59}, - {0x1B5A, 0x1B60}, {0x1B61, 0x1B6A}, {0x1B6B, 0x1B73}, - {0x1B74, 0x1B7C}, {0x1B80, 0x1B81}, {0x1B82, 0x1B82}, - {0x1B83, 0x1BA0}, {0x1BA1, 0x1BA1}, {0x1BA2, 0x1BA5}, - {0x1BA6, 0x1BA7}, {0x1BA8, 0x1BA9}, {0x1BAA, 0x1BAA}, - {0x1BAB, 0x1BAD}, {0x1BAE, 0x1BAF}, {0x1BB0, 0x1BB9}, - {0x1BBA, 0x1BBF}, {0x1BC0, 0x1BE5}, {0x1BE6, 0x1BE6}, - {0x1BE7, 0x1BE7}, {0x1BE8, 0x1BE9}, {0x1BEA, 0x1BEC}, - {0x1BED, 0x1BED}, {0x1BEE, 0x1BEE}, {0x1BEF, 0x1BF1}, - {0x1BF2, 0x1BF3}, {0x1BFC, 0x1BFF}, {0x1C00, 0x1C23}, - {0x1C24, 0x1C2B}, {0x1C2C, 0x1C33}, {0x1C34, 0x1C35}, - {0x1C36, 0x1C37}, {0x1C3B, 0x1C3F}, {0x1C40, 0x1C49}, - {0x1C4D, 0x1C4F}, {0x1C50, 0x1C59}, {0x1C5A, 0x1C77}, - {0x1C78, 0x1C7D}, {0x1C7E, 0x1C7F}, {0x1C80, 0x1C88}, - {0x1CC0, 0x1CC7}, {0x1CD0, 0x1CD2}, {0x1CD3, 0x1CD3}, - {0x1CD4, 0x1CE0}, {0x1CE1, 0x1CE1}, {0x1CE2, 0x1CE8}, - {0x1CE9, 0x1CEC}, {0x1CED, 0x1CED}, {0x1CEE, 0x1CF1}, - {0x1CF2, 0x1CF3}, {0x1CF4, 0x1CF4}, {0x1CF5, 0x1CF6}, - {0x1CF8, 0x1CF9}, {0x1D00, 0x1D2B}, {0x1D2C, 0x1D6A}, - {0x1D6B, 0x1D77}, {0x1D78, 0x1D78}, {0x1D79, 0x1D7F}, - {0x1D80, 0x1D9A}, {0x1D9B, 0x1DBF}, {0x1DC0, 0x1DF5}, - {0x1DFB, 0x1DFF}, {0x1E00, 0x1EFF}, {0x1F00, 0x1F15}, - {0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D}, - {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B}, - {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4}, - {0x1FB6, 0x1FBC}, {0x1FBD, 0x1FBD}, {0x1FBE, 0x1FBE}, - {0x1FBF, 0x1FC1}, {0x1FC2, 0x1FC4}, {0x1FC6, 0x1FCC}, - {0x1FCD, 0x1FCF}, {0x1FD0, 0x1FD3}, {0x1FD6, 0x1FDB}, - {0x1FDD, 0x1FDF}, {0x1FE0, 0x1FEC}, {0x1FED, 0x1FEF}, - {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFC}, {0x1FFD, 0x1FFE}, - {0x2000, 0x200A}, {0x200B, 0x200F}, {0x2011, 0x2012}, - {0x2017, 0x2017}, {0x201A, 0x201A}, {0x201B, 0x201B}, - {0x201E, 0x201E}, {0x201F, 0x201F}, {0x2023, 0x2023}, - {0x2028, 0x2028}, {0x2029, 0x2029}, {0x202A, 0x202E}, - {0x202F, 0x202F}, {0x2031, 0x2031}, {0x2034, 0x2034}, - {0x2036, 0x2038}, {0x2039, 0x2039}, {0x203A, 0x203A}, - {0x203C, 0x203D}, {0x203F, 0x2040}, {0x2041, 0x2043}, - {0x2044, 0x2044}, {0x2045, 0x2045}, {0x2046, 0x2046}, - {0x2047, 0x2051}, {0x2052, 0x2052}, {0x2053, 0x2053}, - {0x2054, 0x2054}, {0x2055, 0x205E}, {0x205F, 0x205F}, - {0x2060, 0x2064}, {0x2066, 0x206F}, {0x2070, 0x2070}, - {0x2071, 0x2071}, {0x2075, 0x2079}, {0x207A, 0x207C}, - {0x207D, 0x207D}, {0x207E, 0x207E}, {0x2080, 0x2080}, - {0x2085, 0x2089}, {0x208A, 0x208C}, {0x208D, 0x208D}, - {0x208E, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8}, - {0x20AA, 0x20AB}, {0x20AD, 0x20BE}, {0x20D0, 0x20DC}, - {0x20DD, 0x20E0}, {0x20E1, 0x20E1}, {0x20E2, 0x20E4}, - {0x20E5, 0x20F0}, {0x2100, 0x2101}, {0x2102, 0x2102}, - {0x2104, 0x2104}, {0x2106, 0x2106}, {0x2107, 0x2107}, - {0x2108, 0x2108}, {0x210A, 0x2112}, {0x2114, 0x2114}, - {0x2115, 0x2115}, {0x2117, 0x2117}, {0x2118, 0x2118}, - {0x2119, 0x211D}, {0x211E, 0x2120}, {0x2123, 0x2123}, - {0x2124, 0x2124}, {0x2125, 0x2125}, {0x2127, 0x2127}, - {0x2128, 0x2128}, {0x2129, 0x2129}, {0x212A, 0x212A}, - {0x212C, 0x212D}, {0x212E, 0x212E}, {0x212F, 0x2134}, - {0x2135, 0x2138}, {0x2139, 0x2139}, {0x213A, 0x213B}, - {0x213C, 0x213F}, {0x2140, 0x2144}, {0x2145, 0x2149}, - {0x214A, 0x214A}, {0x214B, 0x214B}, {0x214C, 0x214D}, - {0x214E, 0x214E}, {0x214F, 0x214F}, {0x2150, 0x2152}, - {0x2155, 0x215A}, {0x215F, 0x215F}, {0x216C, 0x216F}, - {0x217A, 0x2182}, {0x2183, 0x2184}, {0x2185, 0x2188}, - {0x218A, 0x218B}, {0x219A, 0x219B}, {0x219C, 0x219F}, - {0x21A0, 0x21A0}, {0x21A1, 0x21A2}, {0x21A3, 0x21A3}, - {0x21A4, 0x21A5}, {0x21A6, 0x21A6}, {0x21A7, 0x21AD}, - {0x21AE, 0x21AE}, {0x21AF, 0x21B7}, {0x21BA, 0x21CD}, - {0x21CE, 0x21CF}, {0x21D0, 0x21D1}, {0x21D3, 0x21D3}, - {0x21D5, 0x21E6}, {0x21E8, 0x21F3}, {0x21F4, 0x21FF}, - {0x2201, 0x2201}, {0x2204, 0x2206}, {0x2209, 0x220A}, - {0x220C, 0x220E}, {0x2210, 0x2210}, {0x2212, 0x2214}, - {0x2216, 0x2219}, {0x221B, 0x221C}, {0x2221, 0x2222}, - {0x2224, 0x2224}, {0x2226, 0x2226}, {0x222D, 0x222D}, - {0x222F, 0x2233}, {0x2238, 0x223B}, {0x223E, 0x2247}, - {0x2249, 0x224B}, {0x224D, 0x2251}, {0x2253, 0x225F}, - {0x2262, 0x2263}, {0x2268, 0x2269}, {0x226C, 0x226D}, - {0x2270, 0x2281}, {0x2284, 0x2285}, {0x2288, 0x2294}, - {0x2296, 0x2298}, {0x229A, 0x22A4}, {0x22A6, 0x22BE}, - {0x22C0, 0x22FF}, {0x2300, 0x2307}, {0x2308, 0x2308}, - {0x2309, 0x2309}, {0x230A, 0x230A}, {0x230B, 0x230B}, - {0x230C, 0x2311}, {0x2313, 0x2319}, {0x231C, 0x231F}, - {0x2320, 0x2321}, {0x2322, 0x2328}, {0x232B, 0x237B}, - {0x237C, 0x237C}, {0x237D, 0x239A}, {0x239B, 0x23B3}, - {0x23B4, 0x23DB}, {0x23DC, 0x23E1}, {0x23E2, 0x23E8}, - {0x23ED, 0x23EF}, {0x23F1, 0x23F2}, {0x23F4, 0x23FE}, - {0x2400, 0x2426}, {0x2440, 0x244A}, {0x24EA, 0x24EA}, - {0x254C, 0x254F}, {0x2574, 0x257F}, {0x2590, 0x2591}, - {0x2596, 0x259F}, {0x25A2, 0x25A2}, {0x25AA, 0x25B1}, - {0x25B4, 0x25B5}, {0x25B8, 0x25BB}, {0x25BE, 0x25BF}, - {0x25C2, 0x25C5}, {0x25C9, 0x25CA}, {0x25CC, 0x25CD}, - {0x25D2, 0x25E1}, {0x25E6, 0x25EE}, {0x25F0, 0x25F7}, - {0x25F8, 0x25FC}, {0x25FF, 0x25FF}, {0x2600, 0x2604}, - {0x2607, 0x2608}, {0x260A, 0x260D}, {0x2610, 0x2613}, - {0x2616, 0x261B}, {0x261D, 0x261D}, {0x261F, 0x263F}, - {0x2641, 0x2641}, {0x2643, 0x2647}, {0x2654, 0x265F}, - {0x2662, 0x2662}, {0x2666, 0x2666}, {0x266B, 0x266B}, - {0x266E, 0x266E}, {0x2670, 0x267E}, {0x2680, 0x2692}, - {0x2694, 0x269D}, {0x26A0, 0x26A0}, {0x26A2, 0x26A9}, - {0x26AC, 0x26BC}, {0x26C0, 0x26C3}, {0x26E2, 0x26E2}, - {0x26E4, 0x26E7}, {0x2700, 0x2704}, {0x2706, 0x2709}, - {0x270C, 0x2727}, {0x2729, 0x273C}, {0x273E, 0x274B}, - {0x274D, 0x274D}, {0x274F, 0x2752}, {0x2756, 0x2756}, - {0x2758, 0x2767}, {0x2768, 0x2768}, {0x2769, 0x2769}, - {0x276A, 0x276A}, {0x276B, 0x276B}, {0x276C, 0x276C}, - {0x276D, 0x276D}, {0x276E, 0x276E}, {0x276F, 0x276F}, - {0x2770, 0x2770}, {0x2771, 0x2771}, {0x2772, 0x2772}, - {0x2773, 0x2773}, {0x2774, 0x2774}, {0x2775, 0x2775}, - {0x2780, 0x2793}, {0x2794, 0x2794}, {0x2798, 0x27AF}, - {0x27B1, 0x27BE}, {0x27C0, 0x27C4}, {0x27C5, 0x27C5}, - {0x27C6, 0x27C6}, {0x27C7, 0x27E5}, {0x27EE, 0x27EE}, - {0x27EF, 0x27EF}, {0x27F0, 0x27FF}, {0x2800, 0x28FF}, - {0x2900, 0x297F}, {0x2980, 0x2982}, {0x2983, 0x2983}, - {0x2984, 0x2984}, {0x2987, 0x2987}, {0x2988, 0x2988}, - {0x2989, 0x2989}, {0x298A, 0x298A}, {0x298B, 0x298B}, - {0x298C, 0x298C}, {0x298D, 0x298D}, {0x298E, 0x298E}, - {0x298F, 0x298F}, {0x2990, 0x2990}, {0x2991, 0x2991}, - {0x2992, 0x2992}, {0x2993, 0x2993}, {0x2994, 0x2994}, - {0x2995, 0x2995}, {0x2996, 0x2996}, {0x2997, 0x2997}, - {0x2998, 0x2998}, {0x2999, 0x29D7}, {0x29D8, 0x29D8}, - {0x29D9, 0x29D9}, {0x29DA, 0x29DA}, {0x29DB, 0x29DB}, - {0x29DC, 0x29FB}, {0x29FC, 0x29FC}, {0x29FD, 0x29FD}, - {0x29FE, 0x29FF}, {0x2A00, 0x2AFF}, {0x2B00, 0x2B1A}, - {0x2B1D, 0x2B2F}, {0x2B30, 0x2B44}, {0x2B45, 0x2B46}, - {0x2B47, 0x2B4C}, {0x2B4D, 0x2B4F}, {0x2B51, 0x2B54}, - {0x2B5A, 0x2B73}, {0x2B76, 0x2B95}, {0x2B98, 0x2BB9}, - {0x2BBD, 0x2BC8}, {0x2BCA, 0x2BD1}, {0x2BEC, 0x2BEF}, - {0x2C00, 0x2C2E}, {0x2C30, 0x2C5E}, {0x2C60, 0x2C7B}, - {0x2C7C, 0x2C7D}, {0x2C7E, 0x2C7F}, {0x2C80, 0x2CE4}, - {0x2CE5, 0x2CEA}, {0x2CEB, 0x2CEE}, {0x2CEF, 0x2CF1}, - {0x2CF2, 0x2CF3}, {0x2CF9, 0x2CFC}, {0x2CFD, 0x2CFD}, - {0x2CFE, 0x2CFF}, {0x2D00, 0x2D25}, {0x2D27, 0x2D27}, - {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D6F}, - {0x2D70, 0x2D70}, {0x2D7F, 0x2D7F}, {0x2D80, 0x2D96}, - {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE}, {0x2DB0, 0x2DB6}, - {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6}, {0x2DC8, 0x2DCE}, - {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE}, {0x2DE0, 0x2DFF}, - {0x2E00, 0x2E01}, {0x2E02, 0x2E02}, {0x2E03, 0x2E03}, - {0x2E04, 0x2E04}, {0x2E05, 0x2E05}, {0x2E06, 0x2E08}, - {0x2E09, 0x2E09}, {0x2E0A, 0x2E0A}, {0x2E0B, 0x2E0B}, - {0x2E0C, 0x2E0C}, {0x2E0D, 0x2E0D}, {0x2E0E, 0x2E16}, - {0x2E17, 0x2E17}, {0x2E18, 0x2E19}, {0x2E1A, 0x2E1A}, - {0x2E1B, 0x2E1B}, {0x2E1C, 0x2E1C}, {0x2E1D, 0x2E1D}, - {0x2E1E, 0x2E1F}, {0x2E20, 0x2E20}, {0x2E21, 0x2E21}, - {0x2E22, 0x2E22}, {0x2E23, 0x2E23}, {0x2E24, 0x2E24}, - {0x2E25, 0x2E25}, {0x2E26, 0x2E26}, {0x2E27, 0x2E27}, - {0x2E28, 0x2E28}, {0x2E29, 0x2E29}, {0x2E2A, 0x2E2E}, - {0x2E2F, 0x2E2F}, {0x2E30, 0x2E39}, {0x2E3A, 0x2E3B}, - {0x2E3C, 0x2E3F}, {0x2E40, 0x2E40}, {0x2E41, 0x2E41}, - {0x2E42, 0x2E42}, {0x2E43, 0x2E44}, {0x303F, 0x303F}, - {0x4DC0, 0x4DFF}, {0xA4D0, 0xA4F7}, {0xA4F8, 0xA4FD}, - {0xA4FE, 0xA4FF}, {0xA500, 0xA60B}, {0xA60C, 0xA60C}, - {0xA60D, 0xA60F}, {0xA610, 0xA61F}, {0xA620, 0xA629}, - {0xA62A, 0xA62B}, {0xA640, 0xA66D}, {0xA66E, 0xA66E}, - {0xA66F, 0xA66F}, {0xA670, 0xA672}, {0xA673, 0xA673}, - {0xA674, 0xA67D}, {0xA67E, 0xA67E}, {0xA67F, 0xA67F}, - {0xA680, 0xA69B}, {0xA69C, 0xA69D}, {0xA69E, 0xA69F}, - {0xA6A0, 0xA6E5}, {0xA6E6, 0xA6EF}, {0xA6F0, 0xA6F1}, - {0xA6F2, 0xA6F7}, {0xA700, 0xA716}, {0xA717, 0xA71F}, - {0xA720, 0xA721}, {0xA722, 0xA76F}, {0xA770, 0xA770}, - {0xA771, 0xA787}, {0xA788, 0xA788}, {0xA789, 0xA78A}, - {0xA78B, 0xA78E}, {0xA78F, 0xA78F}, {0xA790, 0xA7AE}, - {0xA7B0, 0xA7B7}, {0xA7F7, 0xA7F7}, {0xA7F8, 0xA7F9}, - {0xA7FA, 0xA7FA}, {0xA7FB, 0xA7FF}, {0xA800, 0xA801}, - {0xA802, 0xA802}, {0xA803, 0xA805}, {0xA806, 0xA806}, - {0xA807, 0xA80A}, {0xA80B, 0xA80B}, {0xA80C, 0xA822}, - {0xA823, 0xA824}, {0xA825, 0xA826}, {0xA827, 0xA827}, - {0xA828, 0xA82B}, {0xA830, 0xA835}, {0xA836, 0xA837}, - {0xA838, 0xA838}, {0xA839, 0xA839}, {0xA840, 0xA873}, - {0xA874, 0xA877}, {0xA880, 0xA881}, {0xA882, 0xA8B3}, - {0xA8B4, 0xA8C3}, {0xA8C4, 0xA8C5}, {0xA8CE, 0xA8CF}, - {0xA8D0, 0xA8D9}, {0xA8E0, 0xA8F1}, {0xA8F2, 0xA8F7}, - {0xA8F8, 0xA8FA}, {0xA8FB, 0xA8FB}, {0xA8FC, 0xA8FC}, - {0xA8FD, 0xA8FD}, {0xA900, 0xA909}, {0xA90A, 0xA925}, - {0xA926, 0xA92D}, {0xA92E, 0xA92F}, {0xA930, 0xA946}, - {0xA947, 0xA951}, {0xA952, 0xA953}, {0xA95F, 0xA95F}, - {0xA980, 0xA982}, {0xA983, 0xA983}, {0xA984, 0xA9B2}, - {0xA9B3, 0xA9B3}, {0xA9B4, 0xA9B5}, {0xA9B6, 0xA9B9}, - {0xA9BA, 0xA9BB}, {0xA9BC, 0xA9BC}, {0xA9BD, 0xA9C0}, - {0xA9C1, 0xA9CD}, {0xA9CF, 0xA9CF}, {0xA9D0, 0xA9D9}, - {0xA9DE, 0xA9DF}, {0xA9E0, 0xA9E4}, {0xA9E5, 0xA9E5}, - {0xA9E6, 0xA9E6}, {0xA9E7, 0xA9EF}, {0xA9F0, 0xA9F9}, - {0xA9FA, 0xA9FE}, {0xAA00, 0xAA28}, {0xAA29, 0xAA2E}, - {0xAA2F, 0xAA30}, {0xAA31, 0xAA32}, {0xAA33, 0xAA34}, - {0xAA35, 0xAA36}, {0xAA40, 0xAA42}, {0xAA43, 0xAA43}, - {0xAA44, 0xAA4B}, {0xAA4C, 0xAA4C}, {0xAA4D, 0xAA4D}, - {0xAA50, 0xAA59}, {0xAA5C, 0xAA5F}, {0xAA60, 0xAA6F}, - {0xAA70, 0xAA70}, {0xAA71, 0xAA76}, {0xAA77, 0xAA79}, - {0xAA7A, 0xAA7A}, {0xAA7B, 0xAA7B}, {0xAA7C, 0xAA7C}, - {0xAA7D, 0xAA7D}, {0xAA7E, 0xAA7F}, {0xAA80, 0xAAAF}, - {0xAAB0, 0xAAB0}, {0xAAB1, 0xAAB1}, {0xAAB2, 0xAAB4}, - {0xAAB5, 0xAAB6}, {0xAAB7, 0xAAB8}, {0xAAB9, 0xAABD}, - {0xAABE, 0xAABF}, {0xAAC0, 0xAAC0}, {0xAAC1, 0xAAC1}, - {0xAAC2, 0xAAC2}, {0xAADB, 0xAADC}, {0xAADD, 0xAADD}, - {0xAADE, 0xAADF}, {0xAAE0, 0xAAEA}, {0xAAEB, 0xAAEB}, - {0xAAEC, 0xAAED}, {0xAAEE, 0xAAEF}, {0xAAF0, 0xAAF1}, - {0xAAF2, 0xAAF2}, {0xAAF3, 0xAAF4}, {0xAAF5, 0xAAF5}, - {0xAAF6, 0xAAF6}, {0xAB01, 0xAB06}, {0xAB09, 0xAB0E}, - {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E}, - {0xAB30, 0xAB5A}, {0xAB5B, 0xAB5B}, {0xAB5C, 0xAB5F}, - {0xAB60, 0xAB65}, {0xAB70, 0xABBF}, {0xABC0, 0xABE2}, - {0xABE3, 0xABE4}, {0xABE5, 0xABE5}, {0xABE6, 0xABE7}, - {0xABE8, 0xABE8}, {0xABE9, 0xABEA}, {0xABEB, 0xABEB}, - {0xABEC, 0xABEC}, {0xABED, 0xABED}, {0xABF0, 0xABF9}, - {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDB7F}, - {0xDB80, 0xDBFF}, {0xDC00, 0xDFFF}, {0xFB00, 0xFB06}, - {0xFB13, 0xFB17}, {0xFB1D, 0xFB1D}, {0xFB1E, 0xFB1E}, - {0xFB1F, 0xFB28}, {0xFB29, 0xFB29}, {0xFB2A, 0xFB36}, - {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41}, - {0xFB43, 0xFB44}, {0xFB46, 0xFB4F}, {0xFB50, 0xFBB1}, - {0xFBB2, 0xFBC1}, {0xFBD3, 0xFD3D}, {0xFD3E, 0xFD3E}, - {0xFD3F, 0xFD3F}, {0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, - {0xFDF0, 0xFDFB}, {0xFDFC, 0xFDFC}, {0xFDFD, 0xFDFD}, - {0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC}, - {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFC, 0xFFFC}, - {0x10000, 0x1000B}, {0x1000D, 0x10026}, {0x10028, 0x1003A}, - {0x1003C, 0x1003D}, {0x1003F, 0x1004D}, {0x10050, 0x1005D}, - {0x10080, 0x100FA}, {0x10100, 0x10102}, {0x10107, 0x10133}, - {0x10137, 0x1013F}, {0x10140, 0x10174}, {0x10175, 0x10178}, - {0x10179, 0x10189}, {0x1018A, 0x1018B}, {0x1018C, 0x1018E}, - {0x10190, 0x1019B}, {0x101A0, 0x101A0}, {0x101D0, 0x101FC}, - {0x101FD, 0x101FD}, {0x10280, 0x1029C}, {0x102A0, 0x102D0}, - {0x102E0, 0x102E0}, {0x102E1, 0x102FB}, {0x10300, 0x1031F}, - {0x10320, 0x10323}, {0x10330, 0x10340}, {0x10341, 0x10341}, - {0x10342, 0x10349}, {0x1034A, 0x1034A}, {0x10350, 0x10375}, - {0x10376, 0x1037A}, {0x10380, 0x1039D}, {0x1039F, 0x1039F}, - {0x103A0, 0x103C3}, {0x103C8, 0x103CF}, {0x103D0, 0x103D0}, - {0x103D1, 0x103D5}, {0x10400, 0x1044F}, {0x10450, 0x1047F}, - {0x10480, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3}, - {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563}, - {0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755}, - {0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808}, - {0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C}, - {0x1083F, 0x1083F}, {0x10840, 0x10855}, {0x10857, 0x10857}, - {0x10858, 0x1085F}, {0x10860, 0x10876}, {0x10877, 0x10878}, - {0x10879, 0x1087F}, {0x10880, 0x1089E}, {0x108A7, 0x108AF}, - {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x108FF}, - {0x10900, 0x10915}, {0x10916, 0x1091B}, {0x1091F, 0x1091F}, - {0x10920, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x1099F}, - {0x109A0, 0x109B7}, {0x109BC, 0x109BD}, {0x109BE, 0x109BF}, - {0x109C0, 0x109CF}, {0x109D2, 0x109FF}, {0x10A00, 0x10A00}, - {0x10A01, 0x10A03}, {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, - {0x10A10, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A33}, - {0x10A38, 0x10A3A}, {0x10A3F, 0x10A3F}, {0x10A40, 0x10A47}, - {0x10A50, 0x10A58}, {0x10A60, 0x10A7C}, {0x10A7D, 0x10A7E}, - {0x10A7F, 0x10A7F}, {0x10A80, 0x10A9C}, {0x10A9D, 0x10A9F}, - {0x10AC0, 0x10AC7}, {0x10AC8, 0x10AC8}, {0x10AC9, 0x10AE4}, - {0x10AE5, 0x10AE6}, {0x10AEB, 0x10AEF}, {0x10AF0, 0x10AF6}, - {0x10B00, 0x10B35}, {0x10B39, 0x10B3F}, {0x10B40, 0x10B55}, - {0x10B58, 0x10B5F}, {0x10B60, 0x10B72}, {0x10B78, 0x10B7F}, - {0x10B80, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF}, - {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2}, - {0x10CFA, 0x10CFF}, {0x10E60, 0x10E7E}, {0x11000, 0x11000}, - {0x11001, 0x11001}, {0x11002, 0x11002}, {0x11003, 0x11037}, - {0x11038, 0x11046}, {0x11047, 0x1104D}, {0x11052, 0x11065}, - {0x11066, 0x1106F}, {0x1107F, 0x1107F}, {0x11080, 0x11081}, - {0x11082, 0x11082}, {0x11083, 0x110AF}, {0x110B0, 0x110B2}, - {0x110B3, 0x110B6}, {0x110B7, 0x110B8}, {0x110B9, 0x110BA}, - {0x110BB, 0x110BC}, {0x110BD, 0x110BD}, {0x110BE, 0x110C1}, - {0x110D0, 0x110E8}, {0x110F0, 0x110F9}, {0x11100, 0x11102}, - {0x11103, 0x11126}, {0x11127, 0x1112B}, {0x1112C, 0x1112C}, - {0x1112D, 0x11134}, {0x11136, 0x1113F}, {0x11140, 0x11143}, - {0x11150, 0x11172}, {0x11173, 0x11173}, {0x11174, 0x11175}, - {0x11176, 0x11176}, {0x11180, 0x11181}, {0x11182, 0x11182}, - {0x11183, 0x111B2}, {0x111B3, 0x111B5}, {0x111B6, 0x111BE}, - {0x111BF, 0x111C0}, {0x111C1, 0x111C4}, {0x111C5, 0x111C9}, - {0x111CA, 0x111CC}, {0x111CD, 0x111CD}, {0x111D0, 0x111D9}, - {0x111DA, 0x111DA}, {0x111DB, 0x111DB}, {0x111DC, 0x111DC}, - {0x111DD, 0x111DF}, {0x111E1, 0x111F4}, {0x11200, 0x11211}, - {0x11213, 0x1122B}, {0x1122C, 0x1122E}, {0x1122F, 0x11231}, - {0x11232, 0x11233}, {0x11234, 0x11234}, {0x11235, 0x11235}, - {0x11236, 0x11237}, {0x11238, 0x1123D}, {0x1123E, 0x1123E}, - {0x11280, 0x11286}, {0x11288, 0x11288}, {0x1128A, 0x1128D}, - {0x1128F, 0x1129D}, {0x1129F, 0x112A8}, {0x112A9, 0x112A9}, - {0x112B0, 0x112DE}, {0x112DF, 0x112DF}, {0x112E0, 0x112E2}, - {0x112E3, 0x112EA}, {0x112F0, 0x112F9}, {0x11300, 0x11301}, - {0x11302, 0x11303}, {0x11305, 0x1130C}, {0x1130F, 0x11310}, - {0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333}, - {0x11335, 0x11339}, {0x1133C, 0x1133C}, {0x1133D, 0x1133D}, - {0x1133E, 0x1133F}, {0x11340, 0x11340}, {0x11341, 0x11344}, - {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11350, 0x11350}, - {0x11357, 0x11357}, {0x1135D, 0x11361}, {0x11362, 0x11363}, - {0x11366, 0x1136C}, {0x11370, 0x11374}, {0x11400, 0x11434}, - {0x11435, 0x11437}, {0x11438, 0x1143F}, {0x11440, 0x11441}, - {0x11442, 0x11444}, {0x11445, 0x11445}, {0x11446, 0x11446}, - {0x11447, 0x1144A}, {0x1144B, 0x1144F}, {0x11450, 0x11459}, - {0x1145B, 0x1145B}, {0x1145D, 0x1145D}, {0x11480, 0x114AF}, - {0x114B0, 0x114B2}, {0x114B3, 0x114B8}, {0x114B9, 0x114B9}, - {0x114BA, 0x114BA}, {0x114BB, 0x114BE}, {0x114BF, 0x114C0}, - {0x114C1, 0x114C1}, {0x114C2, 0x114C3}, {0x114C4, 0x114C5}, - {0x114C6, 0x114C6}, {0x114C7, 0x114C7}, {0x114D0, 0x114D9}, - {0x11580, 0x115AE}, {0x115AF, 0x115B1}, {0x115B2, 0x115B5}, - {0x115B8, 0x115BB}, {0x115BC, 0x115BD}, {0x115BE, 0x115BE}, - {0x115BF, 0x115C0}, {0x115C1, 0x115D7}, {0x115D8, 0x115DB}, - {0x115DC, 0x115DD}, {0x11600, 0x1162F}, {0x11630, 0x11632}, - {0x11633, 0x1163A}, {0x1163B, 0x1163C}, {0x1163D, 0x1163D}, - {0x1163E, 0x1163E}, {0x1163F, 0x11640}, {0x11641, 0x11643}, - {0x11644, 0x11644}, {0x11650, 0x11659}, {0x11660, 0x1166C}, - {0x11680, 0x116AA}, {0x116AB, 0x116AB}, {0x116AC, 0x116AC}, - {0x116AD, 0x116AD}, {0x116AE, 0x116AF}, {0x116B0, 0x116B5}, - {0x116B6, 0x116B6}, {0x116B7, 0x116B7}, {0x116C0, 0x116C9}, - {0x11700, 0x11719}, {0x1171D, 0x1171F}, {0x11720, 0x11721}, - {0x11722, 0x11725}, {0x11726, 0x11726}, {0x11727, 0x1172B}, - {0x11730, 0x11739}, {0x1173A, 0x1173B}, {0x1173C, 0x1173E}, - {0x1173F, 0x1173F}, {0x118A0, 0x118DF}, {0x118E0, 0x118E9}, - {0x118EA, 0x118F2}, {0x118FF, 0x118FF}, {0x11AC0, 0x11AF8}, - {0x11C00, 0x11C08}, {0x11C0A, 0x11C2E}, {0x11C2F, 0x11C2F}, - {0x11C30, 0x11C36}, {0x11C38, 0x11C3D}, {0x11C3E, 0x11C3E}, - {0x11C3F, 0x11C3F}, {0x11C40, 0x11C40}, {0x11C41, 0x11C45}, - {0x11C50, 0x11C59}, {0x11C5A, 0x11C6C}, {0x11C70, 0x11C71}, - {0x11C72, 0x11C8F}, {0x11C92, 0x11CA7}, {0x11CA9, 0x11CA9}, - {0x11CAA, 0x11CB0}, {0x11CB1, 0x11CB1}, {0x11CB2, 0x11CB3}, - {0x11CB4, 0x11CB4}, {0x11CB5, 0x11CB6}, {0x12000, 0x12399}, - {0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543}, - {0x13000, 0x1342E}, {0x14400, 0x14646}, {0x16800, 0x16A38}, - {0x16A40, 0x16A5E}, {0x16A60, 0x16A69}, {0x16A6E, 0x16A6F}, - {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF4}, {0x16AF5, 0x16AF5}, - {0x16B00, 0x16B2F}, {0x16B30, 0x16B36}, {0x16B37, 0x16B3B}, - {0x16B3C, 0x16B3F}, {0x16B40, 0x16B43}, {0x16B44, 0x16B44}, - {0x16B45, 0x16B45}, {0x16B50, 0x16B59}, {0x16B5B, 0x16B61}, - {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, {0x16F00, 0x16F44}, - {0x16F50, 0x16F50}, {0x16F51, 0x16F7E}, {0x16F8F, 0x16F92}, - {0x16F93, 0x16F9F}, {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, - {0x1BC80, 0x1BC88}, {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BC9C}, - {0x1BC9D, 0x1BC9E}, {0x1BC9F, 0x1BC9F}, {0x1BCA0, 0x1BCA3}, - {0x1D000, 0x1D0F5}, {0x1D100, 0x1D126}, {0x1D129, 0x1D164}, - {0x1D165, 0x1D166}, {0x1D167, 0x1D169}, {0x1D16A, 0x1D16C}, - {0x1D16D, 0x1D172}, {0x1D173, 0x1D17A}, {0x1D17B, 0x1D182}, - {0x1D183, 0x1D184}, {0x1D185, 0x1D18B}, {0x1D18C, 0x1D1A9}, - {0x1D1AA, 0x1D1AD}, {0x1D1AE, 0x1D1E8}, {0x1D200, 0x1D241}, - {0x1D242, 0x1D244}, {0x1D245, 0x1D245}, {0x1D300, 0x1D356}, - {0x1D360, 0x1D371}, {0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, - {0x1D49E, 0x1D49F}, {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, - {0x1D4A9, 0x1D4AC}, {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, - {0x1D4BD, 0x1D4C3}, {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, - {0x1D50D, 0x1D514}, {0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, - {0x1D53B, 0x1D53E}, {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, - {0x1D54A, 0x1D550}, {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D6C0}, - {0x1D6C1, 0x1D6C1}, {0x1D6C2, 0x1D6DA}, {0x1D6DB, 0x1D6DB}, - {0x1D6DC, 0x1D6FA}, {0x1D6FB, 0x1D6FB}, {0x1D6FC, 0x1D714}, - {0x1D715, 0x1D715}, {0x1D716, 0x1D734}, {0x1D735, 0x1D735}, - {0x1D736, 0x1D74E}, {0x1D74F, 0x1D74F}, {0x1D750, 0x1D76E}, - {0x1D76F, 0x1D76F}, {0x1D770, 0x1D788}, {0x1D789, 0x1D789}, - {0x1D78A, 0x1D7A8}, {0x1D7A9, 0x1D7A9}, {0x1D7AA, 0x1D7C2}, - {0x1D7C3, 0x1D7C3}, {0x1D7C4, 0x1D7CB}, {0x1D7CE, 0x1D7FF}, - {0x1D800, 0x1D9FF}, {0x1DA00, 0x1DA36}, {0x1DA37, 0x1DA3A}, - {0x1DA3B, 0x1DA6C}, {0x1DA6D, 0x1DA74}, {0x1DA75, 0x1DA75}, - {0x1DA76, 0x1DA83}, {0x1DA84, 0x1DA84}, {0x1DA85, 0x1DA86}, - {0x1DA87, 0x1DA8B}, {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, - {0x1E000, 0x1E006}, {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, - {0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, {0x1E800, 0x1E8C4}, - {0x1E8C7, 0x1E8CF}, {0x1E8D0, 0x1E8D6}, {0x1E900, 0x1E943}, - {0x1E944, 0x1E94A}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F}, - {0x1EE00, 0x1EE03}, {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, - {0x1EE24, 0x1EE24}, {0x1EE27, 0x1EE27}, {0x1EE29, 0x1EE32}, - {0x1EE34, 0x1EE37}, {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B}, - {0x1EE42, 0x1EE42}, {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49}, - {0x1EE4B, 0x1EE4B}, {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52}, - {0x1EE54, 0x1EE54}, {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59}, - {0x1EE5B, 0x1EE5B}, {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F}, - {0x1EE61, 0x1EE62}, {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A}, - {0x1EE6C, 0x1EE72}, {0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C}, - {0x1EE7E, 0x1EE7E}, {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B}, - {0x1EEA1, 0x1EEA3}, {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB}, - {0x1EEF0, 0x1EEF1}, {0x1F000, 0x1F003}, {0x1F005, 0x1F02B}, - {0x1F030, 0x1F093}, {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF}, - {0x1F0C1, 0x1F0CE}, {0x1F0D1, 0x1F0F5}, {0x1F10B, 0x1F10C}, - {0x1F12E, 0x1F12E}, {0x1F16A, 0x1F16B}, {0x1F1E6, 0x1F1FF}, - {0x1F321, 0x1F32C}, {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D}, - {0x1F394, 0x1F39F}, {0x1F3CB, 0x1F3CE}, {0x1F3D4, 0x1F3DF}, - {0x1F3F1, 0x1F3F3}, {0x1F3F5, 0x1F3F7}, {0x1F43F, 0x1F43F}, - {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FE}, {0x1F53E, 0x1F54A}, - {0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594}, - {0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F}, - {0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6E0, 0x1F6EA}, - {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773}, {0x1F780, 0x1F7D4}, - {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, {0x1F850, 0x1F859}, - {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD}, {0xE0001, 0xE0001}, - {0xE0020, 0xE007F}, -} - -// Condition have flag EastAsianWidth whether the current locale is CJK or not. -type Condition struct { - EastAsianWidth bool -} - -// NewCondition return new instance of Condition which is current locale. -func NewCondition() *Condition { - return &Condition{EastAsianWidth} -} - -// RuneWidth returns the number of cells in r. -// See http://www.unicode.org/reports/tr11/ -func (c *Condition) RuneWidth(r rune) int { - switch { - case r < 0 || r > 0x10FFFF || - inTables(r, nonprint, combining, notassigned): - return 0 - case (c.EastAsianWidth && IsAmbiguousWidth(r)) || - inTables(r, doublewidth, emoji): - return 2 - default: - return 1 - } -} - -// StringWidth return width as you can see -func (c *Condition) StringWidth(s string) (width int) { - for _, r := range []rune(s) { - width += c.RuneWidth(r) - } - return width -} - -// Truncate return string truncated with w cells -func (c *Condition) Truncate(s string, w int, tail string) string { - if c.StringWidth(s) <= w { - return s - } - r := []rune(s) - tw := c.StringWidth(tail) - w -= tw - width := 0 - i := 0 - for ; i < len(r); i++ { - cw := c.RuneWidth(r[i]) - if width+cw > w { - break - } - width += cw - } - return string(r[0:i]) + tail -} - -// Wrap return string wrapped with w cells -func (c *Condition) Wrap(s string, w int) string { - width := 0 - out := "" - for _, r := range []rune(s) { - cw := RuneWidth(r) - if r == '\n' { - out += string(r) - width = 0 - continue - } else if width+cw > w { - out += "\n" - width = 0 - out += string(r) - width += cw - continue - } - out += string(r) - width += cw - } - return out -} - -// FillLeft return string filled in left by spaces in w cells -func (c *Condition) FillLeft(s string, w int) string { - width := c.StringWidth(s) - count := w - width - if count > 0 { - b := make([]byte, count) - for i := range b { - b[i] = ' ' - } - return string(b) + s - } - return s -} - -// FillRight return string filled in left by spaces in w cells -func (c *Condition) FillRight(s string, w int) string { - width := c.StringWidth(s) - count := w - width - if count > 0 { - b := make([]byte, count) - for i := range b { - b[i] = ' ' - } - return s + string(b) - } - return s -} - -// RuneWidth returns the number of cells in r. -// See http://www.unicode.org/reports/tr11/ -func RuneWidth(r rune) int { - return DefaultCondition.RuneWidth(r) -} - -// IsAmbiguousWidth returns whether is ambiguous width or not. -func IsAmbiguousWidth(r rune) bool { - return inTables(r, private, ambiguous) -} - -// IsNeutralWidth returns whether is neutral width or not. -func IsNeutralWidth(r rune) bool { - return inTable(r, neutral) -} - -// StringWidth return width as you can see -func StringWidth(s string) (width int) { - return DefaultCondition.StringWidth(s) -} - -// Truncate return string truncated with w cells -func Truncate(s string, w int, tail string) string { - return DefaultCondition.Truncate(s, w, tail) -} - -// Wrap return string wrapped with w cells -func Wrap(s string, w int) string { - return DefaultCondition.Wrap(s, w) -} - -// FillLeft return string filled in left by spaces in w cells -func FillLeft(s string, w int) string { - return DefaultCondition.FillLeft(s, w) -} - -// FillRight return string filled in left by spaces in w cells -func FillRight(s string, w int) string { - return DefaultCondition.FillRight(s, w) -} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go deleted file mode 100644 index 0ce32c5..0000000 --- a/vendor/github.com/mattn/go-runewidth/runewidth_js.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build js - -package runewidth - -func IsEastAsian() bool { - // TODO: Implement this for the web. Detect east asian in a compatible way, and return true. - return false -} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go deleted file mode 100644 index c579e9a..0000000 --- a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go +++ /dev/null @@ -1,77 +0,0 @@ -// +build !windows,!js - -package runewidth - -import ( - "os" - "regexp" - "strings" -) - -var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`) - -var mblenTable = map[string]int{ - "utf-8": 6, - "utf8": 6, - "jis": 8, - "eucjp": 3, - "euckr": 2, - "euccn": 2, - "sjis": 2, - "cp932": 2, - "cp51932": 2, - "cp936": 2, - "cp949": 2, - "cp950": 2, - "big5": 2, - "gbk": 2, - "gb2312": 2, -} - -func isEastAsian(locale string) bool { - charset := strings.ToLower(locale) - r := reLoc.FindStringSubmatch(locale) - if len(r) == 2 { - charset = strings.ToLower(r[1]) - } - - if strings.HasSuffix(charset, "@cjk_narrow") { - return false - } - - for pos, b := range []byte(charset) { - if b == '@' { - charset = charset[:pos] - break - } - } - max := 1 - if m, ok := mblenTable[charset]; ok { - max = m - } - if max > 1 && (charset[0] != 'u' || - strings.HasPrefix(locale, "ja") || - strings.HasPrefix(locale, "ko") || - strings.HasPrefix(locale, "zh")) { - return true - } - return false -} - -// IsEastAsian return true if the current locale is CJK -func IsEastAsian() bool { - locale := os.Getenv("LC_CTYPE") - if locale == "" { - locale = os.Getenv("LANG") - } - - // ignore C locale - if locale == "POSIX" || locale == "C" { - return false - } - if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') { - return false - } - - return isEastAsian(locale) -} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go deleted file mode 100644 index 0258876..0000000 --- a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -package runewidth - -import ( - "syscall" -) - -var ( - kernel32 = syscall.NewLazyDLL("kernel32") - procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP") -) - -// IsEastAsian return true if the current locale is CJK -func IsEastAsian() bool { - r1, _, _ := procGetConsoleOutputCP.Call() - if r1 == 0 { - return false - } - - switch int(r1) { - case 932, 51932, 936, 949, 950: - return true - } - - return false -} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE deleted file mode 100644 index f9c841a..0000000 --- a/vendor/github.com/mitchellh/go-homedir/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md deleted file mode 100644 index d70706d..0000000 --- a/vendor/github.com/mitchellh/go-homedir/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# go-homedir - -This is a Go library for detecting the user's home directory without -the use of cgo, so the library can be used in cross-compilation environments. - -Usage is incredibly simple, just call `homedir.Dir()` to get the home directory -for a user, and `homedir.Expand()` to expand the `~` in a path to the home -directory. - -**Why not just use `os/user`?** The built-in `os/user` package requires -cgo on Darwin systems. This means that any Go code that uses that package -cannot cross compile. But 99% of the time the use for `os/user` is just to -retrieve the home directory, which we can do for the current user without -cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/go.mod b/vendor/github.com/mitchellh/go-homedir/go.mod deleted file mode 100644 index 7efa09a..0000000 --- a/vendor/github.com/mitchellh/go-homedir/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/mitchellh/go-homedir diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go deleted file mode 100644 index fb87bef..0000000 --- a/vendor/github.com/mitchellh/go-homedir/homedir.go +++ /dev/null @@ -1,157 +0,0 @@ -package homedir - -import ( - "bytes" - "errors" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" -) - -// DisableCache will disable caching of the home directory. Caching is enabled -// by default. -var DisableCache bool - -var homedirCache string -var cacheLock sync.RWMutex - -// Dir returns the home directory for the executing user. -// -// This uses an OS-specific method for discovering the home directory. -// An error is returned if a home directory cannot be detected. -func Dir() (string, error) { - if !DisableCache { - cacheLock.RLock() - cached := homedirCache - cacheLock.RUnlock() - if cached != "" { - return cached, nil - } - } - - cacheLock.Lock() - defer cacheLock.Unlock() - - var result string - var err error - if runtime.GOOS == "windows" { - result, err = dirWindows() - } else { - // Unix-like system, so just assume Unix - result, err = dirUnix() - } - - if err != nil { - return "", err - } - homedirCache = result - return result, nil -} - -// Expand expands the path to include the home directory if the path -// is prefixed with `~`. If it isn't prefixed with `~`, the path is -// returned as-is. -func Expand(path string) (string, error) { - if len(path) == 0 { - return path, nil - } - - if path[0] != '~' { - return path, nil - } - - if len(path) > 1 && path[1] != '/' && path[1] != '\\' { - return "", errors.New("cannot expand user-specific home dir") - } - - dir, err := Dir() - if err != nil { - return "", err - } - - return filepath.Join(dir, path[1:]), nil -} - -func dirUnix() (string, error) { - homeEnv := "HOME" - if runtime.GOOS == "plan9" { - // On plan9, env vars are lowercase. - homeEnv = "home" - } - - // First prefer the HOME environmental variable - if home := os.Getenv(homeEnv); home != "" { - return home, nil - } - - var stdout bytes.Buffer - - // If that fails, try OS specific commands - if runtime.GOOS == "darwin" { - cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) - cmd.Stdout = &stdout - if err := cmd.Run(); err == nil { - result := strings.TrimSpace(stdout.String()) - if result != "" { - return result, nil - } - } - } else { - cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - // If the error is ErrNotFound, we ignore it. Otherwise, return it. - if err != exec.ErrNotFound { - return "", err - } - } else { - if passwd := strings.TrimSpace(stdout.String()); passwd != "" { - // username:password:uid:gid:gecos:home:shell - passwdParts := strings.SplitN(passwd, ":", 7) - if len(passwdParts) > 5 { - return passwdParts[5], nil - } - } - } - } - - // If all else fails, try the shell - stdout.Reset() - cmd := exec.Command("sh", "-c", "cd && pwd") - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - return "", err - } - - result := strings.TrimSpace(stdout.String()) - if result == "" { - return "", errors.New("blank output when reading home directory") - } - - return result, nil -} - -func dirWindows() (string, error) { - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); home != "" { - return home, nil - } - - // Prefer standard environment variable USERPROFILE - if home := os.Getenv("USERPROFILE"); home != "" { - return home, nil - } - - drive := os.Getenv("HOMEDRIVE") - path := os.Getenv("HOMEPATH") - home := drive + path - if drive == "" || path == "" { - return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") - } - - return home, nil -} diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml deleted file mode 100644 index d9deadb..0000000 --- a/vendor/github.com/mitchellh/mapstructure/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - 1.9.x - - tip - -script: - - go test diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE deleted file mode 100644 index f9c841a..0000000 --- a/vendor/github.com/mitchellh/mapstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md deleted file mode 100644 index 0018dc7..0000000 --- a/vendor/github.com/mitchellh/mapstructure/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) - -mapstructure is a Go library for decoding generic map values to structures -and vice versa, while providing helpful error handling. - -This library is most useful when decoding values from some data stream (JSON, -Gob, etc.) where you don't _quite_ know the structure of the underlying data -until you read a part of it. You can therefore read a `map[string]interface{}` -and use this library to decode it into the proper underlying native Go -structure. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/mapstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). - -The `Decode` function has examples associated with it there. - -## But Why?! - -Go offers fantastic standard libraries for decoding formats such as JSON. -The standard method is to have a struct pre-created, and populate that struct -from the bytes of the encoded format. This is great, but the problem is if -you have configuration or an encoding that changes slightly depending on -specific fields. For example, consider this JSON: - -```json -{ - "type": "person", - "name": "Mitchell" -} -``` - -Perhaps we can't populate a specific structure without first reading -the "type" field from the JSON. We could always do two passes over the -decoding of the JSON (reading the "type" first, and the rest later). -However, it is much simpler to just decode this into a `map[string]interface{}` -structure, read the "type" key, then use something like this library -to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go deleted file mode 100644 index 2a72757..0000000 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ /dev/null @@ -1,171 +0,0 @@ -package mapstructure - -import ( - "errors" - "reflect" - "strconv" - "strings" - "time" -) - -// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns -// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. -func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { - // Create variables here so we can reference them with the reflect pkg - var f1 DecodeHookFuncType - var f2 DecodeHookFuncKind - - // Fill in the variables into this interface and the rest is done - // automatically using the reflect package. - potential := []interface{}{f1, f2} - - v := reflect.ValueOf(h) - vt := v.Type() - for _, raw := range potential { - pt := reflect.ValueOf(raw).Type() - if vt.ConvertibleTo(pt) { - return v.Convert(pt).Interface() - } - } - - return nil -} - -// DecodeHookExec executes the given decode hook. This should be used -// since it'll naturally degrade to the older backwards compatible DecodeHookFunc -// that took reflect.Kind instead of reflect.Type. -func DecodeHookExec( - raw DecodeHookFunc, - from reflect.Type, to reflect.Type, - data interface{}) (interface{}, error) { - switch f := typedDecodeHook(raw).(type) { - case DecodeHookFuncType: - return f(from, to, data) - case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), data) - default: - return nil, errors.New("invalid decode hook signature") - } -} - -// ComposeDecodeHookFunc creates a single DecodeHookFunc that -// automatically composes multiple DecodeHookFuncs. -// -// The composed funcs are called in order, with the result of the -// previous transformation. -func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - var err error - for _, f1 := range fs { - data, err = DecodeHookExec(f1, f, t, data) - if err != nil { - return nil, err - } - - // Modify the from kind to be correct with the new data - f = nil - if val := reflect.ValueOf(data); val.IsValid() { - f = val.Type() - } - } - - return data, nil - } -} - -// StringToSliceHookFunc returns a DecodeHookFunc that converts -// string to []string by splitting on the given sep. -func StringToSliceHookFunc(sep string) DecodeHookFunc { - return func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - if f != reflect.String || t != reflect.Slice { - return data, nil - } - - raw := data.(string) - if raw == "" { - return []string{}, nil - } - - return strings.Split(raw, sep), nil - } -} - -// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts -// strings to time.Duration. -func StringToTimeDurationHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Duration(5)) { - return data, nil - } - - // Convert it by parsing - return time.ParseDuration(data.(string)) - } -} - -// StringToTimeHookFunc returns a DecodeHookFunc that converts -// strings to time.Time. -func StringToTimeHookFunc(layout string) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Time{}) { - return data, nil - } - - // Convert it by parsing - return time.Parse(layout, data.(string)) - } -} - -// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to -// the decoder. -// -// Note that this is significantly different from the WeaklyTypedInput option -// of the DecoderConfig. -func WeaklyTypedHook( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - dataVal := reflect.ValueOf(data) - switch t { - case reflect.String: - switch f { - case reflect.Bool: - if dataVal.Bool() { - return "1", nil - } - return "0", nil - case reflect.Float32: - return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil - case reflect.Int: - return strconv.FormatInt(dataVal.Int(), 10), nil - case reflect.Slice: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - if elemKind == reflect.Uint8 { - return string(dataVal.Interface().([]uint8)), nil - } - case reflect.Uint: - return strconv.FormatUint(dataVal.Uint(), 10), nil - } - } - - return data, nil -} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go deleted file mode 100644 index 47a99e5..0000000 --- a/vendor/github.com/mitchellh/mapstructure/error.go +++ /dev/null @@ -1,50 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - sort.Strings(points) - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -// WrappedErrors implements the errwrap.Wrapper interface to make this -// return value more useful with the errwrap and go-multierror libraries. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - - result := make([]error, len(e.Errors)) - for i, e := range e.Errors { - result[i] = errors.New(e) - } - - return result -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/go.mod b/vendor/github.com/mitchellh/mapstructure/go.mod deleted file mode 100644 index d2a7125..0000000 --- a/vendor/github.com/mitchellh/mapstructure/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/mitchellh/mapstructure diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go deleted file mode 100644 index d3222b8..0000000 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ /dev/null @@ -1,1064 +0,0 @@ -// Package mapstructure exposes functionality to convert an arbitrary -// map[string]interface{} into a native Go structure. -// -// The Go structure can be arbitrarily complex, containing slices, -// other structs, etc. and the decoder will properly decode nested -// maps and so on into the proper structures in the native Go struct. -// See the examples to see what the decoder is capable of. -package mapstructure - -import ( - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" -) - -// DecodeHookFunc is the callback function that can be used for -// data transformations. See "DecodeHook" in the DecoderConfig -// struct. -// -// The type should be DecodeHookFuncType or DecodeHookFuncKind. -// Either is accepted. Types are a superset of Kinds (Types can return -// Kinds) and are generally a richer thing to use, but Kinds are simpler -// if you only need those. -// -// The reason DecodeHookFunc is multi-typed is for backwards compatibility: -// we started with Kinds and then realized Types were the better solution, -// but have a promise to not break backwards compat so we now support -// both. -type DecodeHookFunc interface{} - -// DecodeHookFuncType is a DecodeHookFunc which has complete information about -// the source and target types. -type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) - -// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the -// source and target types. -type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) - -// DecoderConfig is the configuration that is used to create a new decoder -// and allows customization of various aspects of decoding. -type DecoderConfig struct { - // DecodeHook, if set, will be called before any decoding and any - // type conversion (if WeaklyTypedInput is on). This lets you modify - // the values before they're set down onto the resulting struct. - // - // If an error is returned, the entire decode will fail with that - // error. - DecodeHook DecodeHookFunc - - // If ErrorUnused is true, then it is an error for there to exist - // keys in the original map that were unused in the decoding process - // (extra keys). - ErrorUnused bool - - // ZeroFields, if set to true, will zero fields before writing them. - // For example, a map will be emptied before decoded values are put in - // it. If this is false, a map will be merged. - ZeroFields bool - - // If WeaklyTypedInput is true, the decoder will make the following - // "weak" conversions: - // - // - bools to string (true = "1", false = "0") - // - numbers to string (base 10) - // - bools to int/uint (true = 1, false = 0) - // - strings to int/uint (base implied by prefix) - // - int to bool (true if value != 0) - // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, - // FALSE, false, False. Anything else is an error) - // - empty array = empty map and vice versa - // - negative numbers to overflowed uint values (base 10) - // - slice of maps to a merged map - // - single values are converted to slices if required. Each - // element is weakly decoded. For example: "4" can become []int{4} - // if the target type is an int slice. - // - WeaklyTypedInput bool - - // Metadata is the struct that will contain extra metadata about - // the decoding. If this is nil, then no metadata will be tracked. - Metadata *Metadata - - // Result is a pointer to the struct that will contain the decoded - // value. - Result interface{} - - // The tag name that mapstructure reads for field names. This - // defaults to "mapstructure" - TagName string -} - -// A Decoder takes a raw interface value and turns it into structured -// data, keeping track of rich error information along the way in case -// anything goes wrong. Unlike the basic top-level Decode method, you can -// more finely control how the Decoder behaves using the DecoderConfig -// structure. The top-level Decode method is just a convenience that sets -// up the most basic Decoder. -type Decoder struct { - config *DecoderConfig -} - -// Metadata contains information about decoding a structure that -// is tedious or difficult to get otherwise. -type Metadata struct { - // Keys are the keys of the structure which were successfully decoded - Keys []string - - // Unused is a slice of keys that were found in the raw value but - // weren't decoded since there was no matching field in the result interface - Unused []string -} - -// Decode takes an input structure and uses reflection to translate it to -// the output structure. output must be a pointer to a map or struct. -func Decode(input interface{}, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecode is the same as Decode but is shorthand to enable -// WeaklyTypedInput. See DecoderConfig for more info. -func WeakDecode(input, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// DecodeMetadata is the same as Decode, but is shorthand to -// enable metadata collection. See DecoderConfig for more info. -func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecodeMetadata is the same as Decode, but is shorthand to -// enable both WeaklyTypedInput and metadata collection. See -// DecoderConfig for more info. -func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// NewDecoder returns a new decoder for the given configuration. Once -// a decoder has been returned, the same configuration must not be used -// again. -func NewDecoder(config *DecoderConfig) (*Decoder, error) { - val := reflect.ValueOf(config.Result) - if val.Kind() != reflect.Ptr { - return nil, errors.New("result must be a pointer") - } - - val = val.Elem() - if !val.CanAddr() { - return nil, errors.New("result must be addressable (a pointer)") - } - - if config.Metadata != nil { - if config.Metadata.Keys == nil { - config.Metadata.Keys = make([]string, 0) - } - - if config.Metadata.Unused == nil { - config.Metadata.Unused = make([]string, 0) - } - } - - if config.TagName == "" { - config.TagName = "mapstructure" - } - - result := &Decoder{ - config: config, - } - - return result, nil -} - -// Decode decodes the given raw interface to the target pointer specified -// by the configuration. -func (d *Decoder) Decode(input interface{}) error { - return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) -} - -// Decodes an unknown data type into a specific reflection value. -func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { - if input == nil { - // If the data is nil, then we don't set anything, unless ZeroFields is set - // to true. - if d.config.ZeroFields { - outVal.Set(reflect.Zero(outVal.Type())) - - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - } - return nil - } - - inputVal := reflect.ValueOf(input) - if !inputVal.IsValid() { - // If the input value is invalid, then we just set the value - // to be the zero value. - outVal.Set(reflect.Zero(outVal.Type())) - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - return nil - } - - if d.config.DecodeHook != nil { - // We have a DecodeHook, so let's pre-process the input. - var err error - input, err = DecodeHookExec( - d.config.DecodeHook, - inputVal.Type(), outVal.Type(), input) - if err != nil { - return fmt.Errorf("error decoding '%s': %s", name, err) - } - } - - var err error - inputKind := getKind(outVal) - switch inputKind { - case reflect.Bool: - err = d.decodeBool(name, input, outVal) - case reflect.Interface: - err = d.decodeBasic(name, input, outVal) - case reflect.String: - err = d.decodeString(name, input, outVal) - case reflect.Int: - err = d.decodeInt(name, input, outVal) - case reflect.Uint: - err = d.decodeUint(name, input, outVal) - case reflect.Float32: - err = d.decodeFloat(name, input, outVal) - case reflect.Struct: - err = d.decodeStruct(name, input, outVal) - case reflect.Map: - err = d.decodeMap(name, input, outVal) - case reflect.Ptr: - err = d.decodePtr(name, input, outVal) - case reflect.Slice: - err = d.decodeSlice(name, input, outVal) - case reflect.Array: - err = d.decodeArray(name, input, outVal) - case reflect.Func: - err = d.decodeFunc(name, input, outVal) - default: - // If we reached this point then we weren't able to decode it - return fmt.Errorf("%s: unsupported type: %s", name, inputKind) - } - - // If we reached here, then we successfully decoded SOMETHING, so - // mark the key as used if we're tracking metainput. - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - - return err -} - -// This decodes a basic type (bool, int, string, etc.) and sets the -// value to "data" of that type. -func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { - if val.IsValid() && val.Elem().IsValid() { - return d.decode(name, data, val.Elem()) - } - dataVal := reflect.ValueOf(data) - if !dataVal.IsValid() { - dataVal = reflect.Zero(val.Type()) - } - - dataValType := dataVal.Type() - if !dataValType.AssignableTo(val.Type()) { - return fmt.Errorf( - "'%s' expected type '%s', got '%s'", - name, val.Type(), dataValType) - } - - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - converted := true - switch { - case dataKind == reflect.String: - val.SetString(dataVal.String()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetString("1") - } else { - val.SetString("0") - } - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatInt(dataVal.Int(), 10)) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) - case dataKind == reflect.Slice && d.config.WeaklyTypedInput, - dataKind == reflect.Array && d.config.WeaklyTypedInput: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - switch elemKind { - case reflect.Uint8: - var uints []uint8 - if dataKind == reflect.Array { - uints = make([]uint8, dataVal.Len(), dataVal.Len()) - for i := range uints { - uints[i] = dataVal.Index(i).Interface().(uint8) - } - } else { - uints = dataVal.Interface().([]uint8) - } - val.SetString(string(uints)) - default: - converted = false - } - default: - converted = false - } - - if !converted { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetInt(dataVal.Int()) - case dataKind == reflect.Uint: - val.SetInt(int64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetInt(int64(dataVal.Float())) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetInt(1) - } else { - val.SetInt(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) - if err == nil { - val.SetInt(i) - } else { - return fmt.Errorf("cannot parse '%s' as int: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Int64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetInt(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Int: - i := dataVal.Int() - if i < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %d overflows uint", - name, i) - } - val.SetUint(uint64(i)) - case dataKind == reflect.Uint: - val.SetUint(dataVal.Uint()) - case dataKind == reflect.Float32: - f := dataVal.Float() - if f < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %f overflows uint", - name, f) - } - val.SetUint(uint64(f)) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetUint(1) - } else { - val.SetUint(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) - if err == nil { - val.SetUint(i) - } else { - return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Bool: - val.SetBool(dataVal.Bool()) - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Int() != 0) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Uint() != 0) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Float() != 0) - case dataKind == reflect.String && d.config.WeaklyTypedInput: - b, err := strconv.ParseBool(dataVal.String()) - if err == nil { - val.SetBool(b) - } else if dataVal.String() == "" { - val.SetBool(false) - } else { - return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.ValueOf(data) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetFloat(float64(dataVal.Int())) - case dataKind == reflect.Uint: - val.SetFloat(float64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetFloat(dataVal.Float()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetFloat(1) - } else { - val.SetFloat(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) - if err == nil { - val.SetFloat(f) - } else { - return fmt.Errorf("cannot parse '%s' as float: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Float64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetFloat(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - - return nil -} - -func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // By default we overwrite keys in the current map - valMap := val - - // If the map is nil or we're purposely zeroing fields, make a new map - if valMap.IsNil() || d.config.ZeroFields { - // Make a new map to hold our result - mapType := reflect.MapOf(valKeyType, valElemType) - valMap = reflect.MakeMap(mapType) - } - - // Check input type and based on the input type jump to the proper func - dataVal := reflect.Indirect(reflect.ValueOf(data)) - switch dataVal.Kind() { - case reflect.Map: - return d.decodeMapFromMap(name, dataVal, val, valMap) - - case reflect.Struct: - return d.decodeMapFromStruct(name, dataVal, val, valMap) - - case reflect.Array, reflect.Slice: - if d.config.WeaklyTypedInput { - return d.decodeMapFromSlice(name, dataVal, val, valMap) - } - - fallthrough - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - // Special case for BC reasons (covered by tests) - if dataVal.Len() == 0 { - val.Set(valMap) - return nil - } - - for i := 0; i < dataVal.Len(); i++ { - err := d.decode( - fmt.Sprintf("%s[%d]", name, i), - dataVal.Index(i).Interface(), val) - if err != nil { - return err - } - } - - return nil -} - -func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // Accumulate errors - errors := make([]string, 0) - - for _, k := range dataVal.MapKeys() { - fieldName := fmt.Sprintf("%s[%s]", name, k) - - // First decode the key into the proper type - currentKey := reflect.Indirect(reflect.New(valKeyType)) - if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { - errors = appendErrors(errors, err) - continue - } - - // Next decode the data into the proper type - v := dataVal.MapIndex(k).Interface() - currentVal := reflect.Indirect(reflect.New(valElemType)) - if err := d.decode(fieldName, v, currentVal); err != nil { - errors = appendErrors(errors, err) - continue - } - - valMap.SetMapIndex(currentKey, currentVal) - } - - // Set the built up map to the value - val.Set(valMap) - - // If we had errors, return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - typ := dataVal.Type() - for i := 0; i < typ.NumField(); i++ { - // Get the StructField first since this is a cheap operation. If the - // field is unexported, then ignore it. - f := typ.Field(i) - if f.PkgPath != "" { - continue - } - - // Next get the actual value of this field and verify it is assignable - // to the map value. - v := dataVal.Field(i) - if !v.Type().AssignableTo(valMap.Type().Elem()) { - return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) - } - - tagValue := f.Tag.Get(d.config.TagName) - tagParts := strings.Split(tagValue, ",") - - // Determine the name of the key in the map - keyName := f.Name - if tagParts[0] != "" { - if tagParts[0] == "-" { - continue - } - keyName = tagParts[0] - } - - // If "squash" is specified in the tag, we squash the field down. - squash := false - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - if squash && v.Kind() != reflect.Struct { - return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) - } - - switch v.Kind() { - // this is an embedded struct, so handle it differently - case reflect.Struct: - x := reflect.New(v.Type()) - x.Elem().Set(v) - - vType := valMap.Type() - vKeyType := vType.Key() - vElemType := vType.Elem() - mType := reflect.MapOf(vKeyType, vElemType) - vMap := reflect.MakeMap(mType) - - err := d.decode(keyName, x.Interface(), vMap) - if err != nil { - return err - } - - if squash { - for _, k := range vMap.MapKeys() { - valMap.SetMapIndex(k, vMap.MapIndex(k)) - } - } else { - valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) - } - - default: - valMap.SetMapIndex(reflect.ValueOf(keyName), v) - } - } - - if val.CanAddr() { - val.Set(valMap) - } - - return nil -} - -func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - valType := val.Type() - valElemType := valType.Elem() - - if val.CanSet() { - realVal := val - if realVal.IsNil() || d.config.ZeroFields { - realVal = reflect.New(valElemType) - } - - if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return err - } - - val.Set(realVal) - } else { - if err := d.decode(name, data, reflect.Indirect(val)); err != nil { - return err - } - } - return nil -} - -func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - dataVal := reflect.Indirect(reflect.ValueOf(data)) - if val.Type() != dataVal.Type() { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) - } - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - sliceType := reflect.SliceOf(valElemType) - - valSlice := val - if valSlice.IsNil() || d.config.ZeroFields { - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Empty maps turn into empty slices - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } - // Create slice of maps of other sizes - return d.decodeSlice(name, []interface{}{data}, val) - - case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: - return d.decodeSlice(name, []byte(dataVal.String()), val) - // All other types we try to convert to the slice type - // and "lift" it into it. i.e. a string becomes a string slice. - default: - // Just re-try this function with data as a slice. - return d.decodeSlice(name, []interface{}{data}, val) - } - } - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - - } - - // Make a new slice to hold our result, same size as the original data. - valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - for valSlice.Len() <= i { - valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) - } - currentField := valSlice.Index(i) - - fieldName := fmt.Sprintf("%s[%d]", name, i) - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the slice we built up - val.Set(valSlice) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - arrayType := reflect.ArrayOf(valType.Len(), valElemType) - - valArray := val - - if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Empty maps turn into empty arrays - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.Zero(arrayType)) - return nil - } - - // All other types we try to convert to the array type - // and "lift" it into it. i.e. a string becomes a string array. - default: - // Just re-try this function with data as a slice. - return d.decodeArray(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - - } - if dataVal.Len() > arrayType.Len() { - return fmt.Errorf( - "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) - - } - - // Make a new array to hold our result, same size as the original data. - valArray = reflect.New(arrayType).Elem() - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - currentField := valArray.Index(i) - - fieldName := fmt.Sprintf("%s[%d]", name, i) - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the array we built up - val.Set(valArray) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - - // If the type of the value to write to and the data match directly, - // then we just set it directly instead of recursing into the structure. - if dataVal.Type() == val.Type() { - val.Set(dataVal) - return nil - } - - dataValKind := dataVal.Kind() - if dataValKind != reflect.Map { - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) - } - - dataValType := dataVal.Type() - if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { - return fmt.Errorf( - "'%s' needs a map with string keys, has '%s' keys", - name, dataValType.Key().Kind()) - } - - dataValKeys := make(map[reflect.Value]struct{}) - dataValKeysUnused := make(map[interface{}]struct{}) - for _, dataValKey := range dataVal.MapKeys() { - dataValKeys[dataValKey] = struct{}{} - dataValKeysUnused[dataValKey.Interface()] = struct{}{} - } - - errors := make([]string, 0) - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = val - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - type field struct { - field reflect.StructField - val reflect.Value - } - fields := []field{} - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - fieldKind := fieldType.Type.Kind() - - // If "squash" is specified in the tag, we squash the field down. - squash := false - tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - - if squash { - if fieldKind != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) - } else { - structs = append(structs, structVal.FieldByName(fieldType.Name)) - } - continue - } - - // Normal struct field, store it away - fields = append(fields, field{fieldType, structVal.Field(i)}) - } - } - - // for fieldType, field := range fields { - for _, f := range fields { - field, fieldValue := f.field, f.val - fieldName := field.Name - - tagValue := field.Tag.Get(d.config.TagName) - tagValue = strings.SplitN(tagValue, ",", 2)[0] - if tagValue != "" { - fieldName = tagValue - } - - rawMapKey := reflect.ValueOf(fieldName) - rawMapVal := dataVal.MapIndex(rawMapKey) - if !rawMapVal.IsValid() { - // Do a slower search by iterating over each key and - // doing case-insensitive search. - for dataValKey := range dataValKeys { - mK, ok := dataValKey.Interface().(string) - if !ok { - // Not a string key - continue - } - - if strings.EqualFold(mK, fieldName) { - rawMapKey = dataValKey - rawMapVal = dataVal.MapIndex(dataValKey) - break - } - } - - if !rawMapVal.IsValid() { - // There was no matching key in the map for the value in - // the struct. Just ignore. - continue - } - } - - // Delete the key we're using from the unused map so we stop tracking - delete(dataValKeysUnused, rawMapKey.Interface()) - - if !fieldValue.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !fieldValue.CanSet() { - continue - } - - // If the name is empty string, then we're at the root, and we - // don't dot-join the fields. - if name != "" { - fieldName = fmt.Sprintf("%s.%s", name, fieldName) - } - - if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { - errors = appendErrors(errors, err) - } - } - - if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { - keys := make([]string, 0, len(dataValKeysUnused)) - for rawKey := range dataValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if len(errors) > 0 { - return &Error{errors} - } - - // Add the unused keys to the list of unused keys if we're tracking metadata - if d.config.Metadata != nil { - for rawKey := range dataValKeysUnused { - key := rawKey.(string) - if name != "" { - key = fmt.Sprintf("%s.%s", name, key) - } - - d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) - } - } - - return nil -} - -func getKind(val reflect.Value) reflect.Kind { - kind := val.Kind() - - switch { - case kind >= reflect.Int && kind <= reflect.Int64: - return reflect.Int - case kind >= reflect.Uint && kind <= reflect.Uint64: - return reflect.Uint - case kind >= reflect.Float32 && kind <= reflect.Float64: - return reflect.Float32 - default: - return kind - } -} diff --git a/vendor/github.com/olekukonko/tablewriter/.gitignore b/vendor/github.com/olekukonko/tablewriter/.gitignore deleted file mode 100644 index b66cec6..0000000 --- a/vendor/github.com/olekukonko/tablewriter/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -# Created by .ignore support plugin (hsz.mobi) -### Go template -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - diff --git a/vendor/github.com/olekukonko/tablewriter/.travis.yml b/vendor/github.com/olekukonko/tablewriter/.travis.yml deleted file mode 100644 index f156b3b..0000000 --- a/vendor/github.com/olekukonko/tablewriter/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - 1.8 - - tip diff --git a/vendor/github.com/olekukonko/tablewriter/LICENCE.md b/vendor/github.com/olekukonko/tablewriter/LICENCE.md deleted file mode 100644 index 1fd8484..0000000 --- a/vendor/github.com/olekukonko/tablewriter/LICENCE.md +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2014 by Oleku Konko - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/olekukonko/tablewriter/README.md b/vendor/github.com/olekukonko/tablewriter/README.md deleted file mode 100644 index 59cb86c..0000000 --- a/vendor/github.com/olekukonko/tablewriter/README.md +++ /dev/null @@ -1,277 +0,0 @@ -ASCII Table Writer -========= - -[![Build Status](https://travis-ci.org/olekukonko/tablewriter.png?branch=master)](https://travis-ci.org/olekukonko/tablewriter) -[![Total views](https://img.shields.io/sourcegraph/rrc/github.com/olekukonko/tablewriter.svg)](https://sourcegraph.com/github.com/olekukonko/tablewriter) -[![Godoc](https://godoc.org/github.com/olekukonko/tablewriter?status.svg)](https://godoc.org/github.com/olekukonko/tablewriter) - -Generate ASCII table on the fly ... Installation is simple as - - go get github.com/olekukonko/tablewriter - - -#### Features -- Automatic Padding -- Support Multiple Lines -- Supports Alignment -- Support Custom Separators -- Automatic Alignment of numbers & percentage -- Write directly to http , file etc via `io.Writer` -- Read directly from CSV file -- Optional row line via `SetRowLine` -- Normalise table header -- Make CSV Headers optional -- Enable or disable table border -- Set custom footer support -- Optional identical cells merging -- Set custom caption -- Optional reflowing of paragrpahs in multi-line cells. - -#### Example 1 - Basic -```go -data := [][]string{ - []string{"A", "The Good", "500"}, - []string{"B", "The Very very Bad Man", "288"}, - []string{"C", "The Ugly", "120"}, - []string{"D", "The Gopher", "800"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Name", "Sign", "Rating"}) - -for _, v := range data { - table.Append(v) -} -table.Render() // Send output -``` - -##### Output 1 -``` -+------+-----------------------+--------+ -| NAME | SIGN | RATING | -+------+-----------------------+--------+ -| A | The Good | 500 | -| B | The Very very Bad Man | 288 | -| C | The Ugly | 120 | -| D | The Gopher | 800 | -+------+-----------------------+--------+ -``` - -#### Example 2 - Without Border / Footer / Bulk Append -```go -data := [][]string{ - []string{"1/1/2014", "Domain name", "2233", "$10.98"}, - []string{"1/1/2014", "January Hosting", "2233", "$54.95"}, - []string{"1/4/2014", "February Hosting", "2233", "$51.00"}, - []string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) -table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer -table.SetBorder(false) // Set Border to false -table.AppendBulk(data) // Add Bulk Data -table.Render() -``` - -##### Output 2 -``` - - DATE | DESCRIPTION | CV2 | AMOUNT -+----------+--------------------------+-------+---------+ - 1/1/2014 | Domain name | 2233 | $10.98 - 1/1/2014 | January Hosting | 2233 | $54.95 - 1/4/2014 | February Hosting | 2233 | $51.00 - 1/4/2014 | February Extra Bandwidth | 2233 | $30.00 -+----------+--------------------------+-------+---------+ - TOTAL | $146 93 - +-------+---------+ - -``` - - -#### Example 3 - CSV -```go -table, _ := tablewriter.NewCSV(os.Stdout, "testdata/test_info.csv", true) -table.SetAlignment(tablewriter.ALIGN_LEFT) // Set Alignment -table.Render() -``` - -##### Output 3 -``` -+----------+--------------+------+-----+---------+----------------+ -| FIELD | TYPE | NULL | KEY | DEFAULT | EXTRA | -+----------+--------------+------+-----+---------+----------------+ -| user_id | smallint(5) | NO | PRI | NULL | auto_increment | -| username | varchar(10) | NO | | NULL | | -| password | varchar(100) | NO | | NULL | | -+----------+--------------+------+-----+---------+----------------+ -``` - -#### Example 4 - Custom Separator -```go -table, _ := tablewriter.NewCSV(os.Stdout, "testdata/test.csv", true) -table.SetRowLine(true) // Enable row line - -// Change table lines -table.SetCenterSeparator("*") -table.SetColumnSeparator("‡") -table.SetRowSeparator("-") - -table.SetAlignment(tablewriter.ALIGN_LEFT) -table.Render() -``` - -##### Output 4 -``` -*------------*-----------*---------* -╪ FIRST NAME ╪ LAST NAME ╪ SSN ╪ -*------------*-----------*---------* -╪ John ╪ Barry ╪ 123456 ╪ -*------------*-----------*---------* -╪ Kathy ╪ Smith ╪ 687987 ╪ -*------------*-----------*---------* -╪ Bob ╪ McCornick ╪ 3979870 ╪ -*------------*-----------*---------* -``` - -#### Example 5 - Markdown Format -```go -data := [][]string{ - []string{"1/1/2014", "Domain name", "2233", "$10.98"}, - []string{"1/1/2014", "January Hosting", "2233", "$54.95"}, - []string{"1/4/2014", "February Hosting", "2233", "$51.00"}, - []string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) -table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false}) -table.SetCenterSeparator("|") -table.AppendBulk(data) // Add Bulk Data -table.Render() -``` - -##### Output 5 -``` -| DATE | DESCRIPTION | CV2 | AMOUNT | -|----------|--------------------------|------|--------| -| 1/1/2014 | Domain name | 2233 | $10.98 | -| 1/1/2014 | January Hosting | 2233 | $54.95 | -| 1/4/2014 | February Hosting | 2233 | $51.00 | -| 1/4/2014 | February Extra Bandwidth | 2233 | $30.00 | -``` - -#### Example 6 - Identical cells merging -```go -data := [][]string{ - []string{"1/1/2014", "Domain name", "1234", "$10.98"}, - []string{"1/1/2014", "January Hosting", "2345", "$54.95"}, - []string{"1/4/2014", "February Hosting", "3456", "$51.00"}, - []string{"1/4/2014", "February Extra Bandwidth", "4567", "$30.00"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) -table.SetFooter([]string{"", "", "Total", "$146.93"}) -table.SetAutoMergeCells(true) -table.SetRowLine(true) -table.AppendBulk(data) -table.Render() -``` - -##### Output 6 -``` -+----------+--------------------------+-------+---------+ -| DATE | DESCRIPTION | CV2 | AMOUNT | -+----------+--------------------------+-------+---------+ -| 1/1/2014 | Domain name | 1234 | $10.98 | -+ +--------------------------+-------+---------+ -| | January Hosting | 2345 | $54.95 | -+----------+--------------------------+-------+---------+ -| 1/4/2014 | February Hosting | 3456 | $51.00 | -+ +--------------------------+-------+---------+ -| | February Extra Bandwidth | 4567 | $30.00 | -+----------+--------------------------+-------+---------+ -| TOTAL | $146 93 | -+----------+--------------------------+-------+---------+ -``` - - -#### Table with color -```go -data := [][]string{ - []string{"1/1/2014", "Domain name", "2233", "$10.98"}, - []string{"1/1/2014", "January Hosting", "2233", "$54.95"}, - []string{"1/4/2014", "February Hosting", "2233", "$51.00"}, - []string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) -table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer -table.SetBorder(false) // Set Border to false - -table.SetHeaderColor(tablewriter.Colors{tablewriter.Bold, tablewriter.BgGreenColor}, - tablewriter.Colors{tablewriter.FgHiRedColor, tablewriter.Bold, tablewriter.BgBlackColor}, - tablewriter.Colors{tablewriter.BgRedColor, tablewriter.FgWhiteColor}, - tablewriter.Colors{tablewriter.BgCyanColor, tablewriter.FgWhiteColor}) - -table.SetColumnColor(tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor}, - tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiRedColor}, - tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor}, - tablewriter.Colors{tablewriter.Bold, tablewriter.FgBlackColor}) - -table.SetFooterColor(tablewriter.Colors{}, tablewriter.Colors{}, - tablewriter.Colors{tablewriter.Bold}, - tablewriter.Colors{tablewriter.FgHiRedColor}) - -table.AppendBulk(data) -table.Render() -``` - -#### Table with color Output -![Table with Color](https://cloud.githubusercontent.com/assets/6460392/21101956/bbc7b356-c0a1-11e6-9f36-dba694746efc.png) - -#### Example 6 - Set table caption -```go -data := [][]string{ - []string{"A", "The Good", "500"}, - []string{"B", "The Very very Bad Man", "288"}, - []string{"C", "The Ugly", "120"}, - []string{"D", "The Gopher", "800"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Name", "Sign", "Rating"}) -table.SetCaption(true, "Movie ratings.") - -for _, v := range data { - table.Append(v) -} -table.Render() // Send output -``` - -Note: Caption text will wrap with total width of rendered table. - -##### Output 6 -``` -+------+-----------------------+--------+ -| NAME | SIGN | RATING | -+------+-----------------------+--------+ -| A | The Good | 500 | -| B | The Very very Bad Man | 288 | -| C | The Ugly | 120 | -| D | The Gopher | 800 | -+------+-----------------------+--------+ -Movie ratings. -``` - -#### TODO -- ~~Import Directly from CSV~~ - `done` -- ~~Support for `SetFooter`~~ - `done` -- ~~Support for `SetBorder`~~ - `done` -- ~~Support table with uneven rows~~ - `done` -- ~~Support custom alignment~~ -- General Improvement & Optimisation -- `NewHTML` Parse table from HTML diff --git a/vendor/github.com/olekukonko/tablewriter/csv.go b/vendor/github.com/olekukonko/tablewriter/csv.go deleted file mode 100644 index 9887830..0000000 --- a/vendor/github.com/olekukonko/tablewriter/csv.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Table Writer API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package tablewriter - -import ( - "encoding/csv" - "io" - "os" -) - -// Start A new table by importing from a CSV file -// Takes io.Writer and csv File name -func NewCSV(writer io.Writer, fileName string, hasHeader bool) (*Table, error) { - file, err := os.Open(fileName) - if err != nil { - return &Table{}, err - } - defer file.Close() - csvReader := csv.NewReader(file) - t, err := NewCSVReader(writer, csvReader, hasHeader) - return t, err -} - -// Start a New Table Writer with csv.Reader -// This enables customisation such as reader.Comma = ';' -// See http://golang.org/src/pkg/encoding/csv/reader.go?s=3213:3671#L94 -func NewCSVReader(writer io.Writer, csvReader *csv.Reader, hasHeader bool) (*Table, error) { - t := NewWriter(writer) - if hasHeader { - // Read the first row - headers, err := csvReader.Read() - if err != nil { - return &Table{}, err - } - t.SetHeader(headers) - } - for { - record, err := csvReader.Read() - if err == io.EOF { - break - } else if err != nil { - return &Table{}, err - } - t.Append(record) - } - return t, nil -} diff --git a/vendor/github.com/olekukonko/tablewriter/table.go b/vendor/github.com/olekukonko/tablewriter/table.go deleted file mode 100644 index 6bbef96..0000000 --- a/vendor/github.com/olekukonko/tablewriter/table.go +++ /dev/null @@ -1,839 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Table Writer API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -// Create & Generate text based table -package tablewriter - -import ( - "bytes" - "fmt" - "io" - "regexp" - "strings" -) - -const ( - MAX_ROW_WIDTH = 30 -) - -const ( - CENTER = "+" - ROW = "-" - COLUMN = "|" - SPACE = " " - NEWLINE = "\n" -) - -const ( - ALIGN_DEFAULT = iota - ALIGN_CENTER - ALIGN_RIGHT - ALIGN_LEFT -) - -var ( - decimal = regexp.MustCompile(`^-*\d*\.?\d*$`) - percent = regexp.MustCompile(`^-*\d*\.?\d*$%$`) -) - -type Border struct { - Left bool - Right bool - Top bool - Bottom bool -} - -type Table struct { - out io.Writer - rows [][]string - lines [][][]string - cs map[int]int - rs map[int]int - headers [][]string - footers [][]string - caption bool - captionText string - autoFmt bool - autoWrap bool - reflowText bool - mW int - pCenter string - pRow string - pColumn string - tColumn int - tRow int - hAlign int - fAlign int - align int - newLine string - rowLine bool - autoMergeCells bool - hdrLine bool - borders Border - colSize int - headerParams []string - columnsParams []string - footerParams []string - columnsAlign []int -} - -// Start New Table -// Take io.Writer Directly -func NewWriter(writer io.Writer) *Table { - t := &Table{ - out: writer, - rows: [][]string{}, - lines: [][][]string{}, - cs: make(map[int]int), - rs: make(map[int]int), - headers: [][]string{}, - footers: [][]string{}, - caption: false, - captionText: "Table caption.", - autoFmt: true, - autoWrap: true, - reflowText: true, - mW: MAX_ROW_WIDTH, - pCenter: CENTER, - pRow: ROW, - pColumn: COLUMN, - tColumn: -1, - tRow: -1, - hAlign: ALIGN_DEFAULT, - fAlign: ALIGN_DEFAULT, - align: ALIGN_DEFAULT, - newLine: NEWLINE, - rowLine: false, - hdrLine: true, - borders: Border{Left: true, Right: true, Bottom: true, Top: true}, - colSize: -1, - headerParams: []string{}, - columnsParams: []string{}, - footerParams: []string{}, - columnsAlign: []int{}} - return t -} - -// Render table output -func (t *Table) Render() { - if t.borders.Top { - t.printLine(true) - } - t.printHeading() - if t.autoMergeCells { - t.printRowsMergeCells() - } else { - t.printRows() - } - if !t.rowLine && t.borders.Bottom { - t.printLine(true) - } - t.printFooter() - - if t.caption { - t.printCaption() - } -} - -const ( - headerRowIdx = -1 - footerRowIdx = -2 -) - -// Set table header -func (t *Table) SetHeader(keys []string) { - t.colSize = len(keys) - for i, v := range keys { - lines := t.parseDimension(v, i, headerRowIdx) - t.headers = append(t.headers, lines) - } -} - -// Set table Footer -func (t *Table) SetFooter(keys []string) { - //t.colSize = len(keys) - for i, v := range keys { - lines := t.parseDimension(v, i, footerRowIdx) - t.footers = append(t.footers, lines) - } -} - -// Set table Caption -func (t *Table) SetCaption(caption bool, captionText ...string) { - t.caption = caption - if len(captionText) == 1 { - t.captionText = captionText[0] - } -} - -// Turn header autoformatting on/off. Default is on (true). -func (t *Table) SetAutoFormatHeaders(auto bool) { - t.autoFmt = auto -} - -// Turn automatic multiline text adjustment on/off. Default is on (true). -func (t *Table) SetAutoWrapText(auto bool) { - t.autoWrap = auto -} - -// Turn automatic reflowing of multiline text when rewrapping. Default is on (true). -func (t *Table) SetReflowDuringAutoWrap(auto bool) { - t.reflowText = auto -} - -// Set the Default column width -func (t *Table) SetColWidth(width int) { - t.mW = width -} - -// Set the minimal width for a column -func (t *Table) SetColMinWidth(column int, width int) { - t.cs[column] = width -} - -// Set the Column Separator -func (t *Table) SetColumnSeparator(sep string) { - t.pColumn = sep -} - -// Set the Row Separator -func (t *Table) SetRowSeparator(sep string) { - t.pRow = sep -} - -// Set the center Separator -func (t *Table) SetCenterSeparator(sep string) { - t.pCenter = sep -} - -// Set Header Alignment -func (t *Table) SetHeaderAlignment(hAlign int) { - t.hAlign = hAlign -} - -// Set Footer Alignment -func (t *Table) SetFooterAlignment(fAlign int) { - t.fAlign = fAlign -} - -// Set Table Alignment -func (t *Table) SetAlignment(align int) { - t.align = align -} - -func (t *Table) SetColumnAlignment(keys []int) { - for _, v := range keys { - switch v { - case ALIGN_CENTER: - break - case ALIGN_LEFT: - break - case ALIGN_RIGHT: - break - default: - v = ALIGN_DEFAULT - } - t.columnsAlign = append(t.columnsAlign, v) - } -} - -// Set New Line -func (t *Table) SetNewLine(nl string) { - t.newLine = nl -} - -// Set Header Line -// This would enable / disable a line after the header -func (t *Table) SetHeaderLine(line bool) { - t.hdrLine = line -} - -// Set Row Line -// This would enable / disable a line on each row of the table -func (t *Table) SetRowLine(line bool) { - t.rowLine = line -} - -// Set Auto Merge Cells -// This would enable / disable the merge of cells with identical values -func (t *Table) SetAutoMergeCells(auto bool) { - t.autoMergeCells = auto -} - -// Set Table Border -// This would enable / disable line around the table -func (t *Table) SetBorder(border bool) { - t.SetBorders(Border{border, border, border, border}) -} - -func (t *Table) SetBorders(border Border) { - t.borders = border -} - -// Append row to table -func (t *Table) Append(row []string) { - rowSize := len(t.headers) - if rowSize > t.colSize { - t.colSize = rowSize - } - - n := len(t.lines) - line := [][]string{} - for i, v := range row { - - // Detect string width - // Detect String height - // Break strings into words - out := t.parseDimension(v, i, n) - - // Append broken words - line = append(line, out) - } - t.lines = append(t.lines, line) -} - -// Allow Support for Bulk Append -// Eliminates repeated for loops -func (t *Table) AppendBulk(rows [][]string) { - for _, row := range rows { - t.Append(row) - } -} - -// NumLines to get the number of lines -func (t *Table) NumLines() int { - return len(t.lines) -} - -// Clear rows -func (t *Table) ClearRows() { - t.lines = [][][]string{} -} - -// Clear footer -func (t *Table) ClearFooter() { - t.footers = [][]string{} -} - -// Print line based on row width -func (t *Table) printLine(nl bool) { - fmt.Fprint(t.out, t.pCenter) - for i := 0; i < len(t.cs); i++ { - v := t.cs[i] - fmt.Fprintf(t.out, "%s%s%s%s", - t.pRow, - strings.Repeat(string(t.pRow), v), - t.pRow, - t.pCenter) - } - if nl { - fmt.Fprint(t.out, t.newLine) - } -} - -// Print line based on row width with our without cell separator -func (t *Table) printLineOptionalCellSeparators(nl bool, displayCellSeparator []bool) { - fmt.Fprint(t.out, t.pCenter) - for i := 0; i < len(t.cs); i++ { - v := t.cs[i] - if i > len(displayCellSeparator) || displayCellSeparator[i] { - // Display the cell separator - fmt.Fprintf(t.out, "%s%s%s%s", - t.pRow, - strings.Repeat(string(t.pRow), v), - t.pRow, - t.pCenter) - } else { - // Don't display the cell separator for this cell - fmt.Fprintf(t.out, "%s%s", - strings.Repeat(" ", v+2), - t.pCenter) - } - } - if nl { - fmt.Fprint(t.out, t.newLine) - } -} - -// Return the PadRight function if align is left, PadLeft if align is right, -// and Pad by default -func pad(align int) func(string, string, int) string { - padFunc := Pad - switch align { - case ALIGN_LEFT: - padFunc = PadRight - case ALIGN_RIGHT: - padFunc = PadLeft - } - return padFunc -} - -// Print heading information -func (t *Table) printHeading() { - // Check if headers is available - if len(t.headers) < 1 { - return - } - - // Identify last column - end := len(t.cs) - 1 - - // Get pad function - padFunc := pad(t.hAlign) - - // Checking for ANSI escape sequences for header - is_esc_seq := false - if len(t.headerParams) > 0 { - is_esc_seq = true - } - - // Maximum height. - max := t.rs[headerRowIdx] - - // Print Heading - for x := 0; x < max; x++ { - // Check if border is set - // Replace with space if not set - fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE)) - - for y := 0; y <= end; y++ { - v := t.cs[y] - h := "" - if y < len(t.headers) && x < len(t.headers[y]) { - h = t.headers[y][x] - } - if t.autoFmt { - h = Title(h) - } - pad := ConditionString((y == end && !t.borders.Left), SPACE, t.pColumn) - - if is_esc_seq { - fmt.Fprintf(t.out, " %s %s", - format(padFunc(h, SPACE, v), - t.headerParams[y]), pad) - } else { - fmt.Fprintf(t.out, " %s %s", - padFunc(h, SPACE, v), - pad) - } - } - // Next line - fmt.Fprint(t.out, t.newLine) - } - if t.hdrLine { - t.printLine(true) - } -} - -// Print heading information -func (t *Table) printFooter() { - // Check if headers is available - if len(t.footers) < 1 { - return - } - - // Only print line if border is not set - if !t.borders.Bottom { - t.printLine(true) - } - - // Identify last column - end := len(t.cs) - 1 - - // Get pad function - padFunc := pad(t.fAlign) - - // Checking for ANSI escape sequences for header - is_esc_seq := false - if len(t.footerParams) > 0 { - is_esc_seq = true - } - - // Maximum height. - max := t.rs[footerRowIdx] - - // Print Footer - erasePad := make([]bool, len(t.footers)) - for x := 0; x < max; x++ { - // Check if border is set - // Replace with space if not set - fmt.Fprint(t.out, ConditionString(t.borders.Bottom, t.pColumn, SPACE)) - - for y := 0; y <= end; y++ { - v := t.cs[y] - f := "" - if y < len(t.footers) && x < len(t.footers[y]) { - f = t.footers[y][x] - } - if t.autoFmt { - f = Title(f) - } - pad := ConditionString((y == end && !t.borders.Top), SPACE, t.pColumn) - - if erasePad[y] || (x == 0 && len(f) == 0) { - pad = SPACE - erasePad[y] = true - } - - if is_esc_seq { - fmt.Fprintf(t.out, " %s %s", - format(padFunc(f, SPACE, v), - t.footerParams[y]), pad) - } else { - fmt.Fprintf(t.out, " %s %s", - padFunc(f, SPACE, v), - pad) - } - - //fmt.Fprintf(t.out, " %s %s", - // padFunc(f, SPACE, v), - // pad) - } - // Next line - fmt.Fprint(t.out, t.newLine) - //t.printLine(true) - } - - hasPrinted := false - - for i := 0; i <= end; i++ { - v := t.cs[i] - pad := t.pRow - center := t.pCenter - length := len(t.footers[i][0]) - - if length > 0 { - hasPrinted = true - } - - // Set center to be space if length is 0 - if length == 0 && !t.borders.Right { - center = SPACE - } - - // Print first junction - if i == 0 { - fmt.Fprint(t.out, center) - } - - // Pad With space of length is 0 - if length == 0 { - pad = SPACE - } - // Ignore left space of it has printed before - if hasPrinted || t.borders.Left { - pad = t.pRow - center = t.pCenter - } - - // Change Center start position - if center == SPACE { - if i < end && len(t.footers[i+1][0]) != 0 { - center = t.pCenter - } - } - - // Print the footer - fmt.Fprintf(t.out, "%s%s%s%s", - pad, - strings.Repeat(string(pad), v), - pad, - center) - - } - - fmt.Fprint(t.out, t.newLine) -} - -// Print caption text -func (t Table) printCaption() { - width := t.getTableWidth() - paragraph, _ := WrapString(t.captionText, width) - for linecount := 0; linecount < len(paragraph); linecount++ { - fmt.Fprintln(t.out, paragraph[linecount]) - } -} - -// Calculate the total number of characters in a row -func (t Table) getTableWidth() int { - var chars int - for _, v := range t.cs { - chars += v - } - - // Add chars, spaces, seperators to calculate the total width of the table. - // ncols := t.colSize - // spaces := ncols * 2 - // seps := ncols + 1 - - return (chars + (3 * t.colSize) + 2) -} - -func (t Table) printRows() { - for i, lines := range t.lines { - t.printRow(lines, i) - } -} - -func (t *Table) fillAlignment(num int) { - if len(t.columnsAlign) < num { - t.columnsAlign = make([]int, num) - for i := range t.columnsAlign { - t.columnsAlign[i] = t.align - } - } -} - -// Print Row Information -// Adjust column alignment based on type - -func (t *Table) printRow(columns [][]string, rowIdx int) { - // Get Maximum Height - max := t.rs[rowIdx] - total := len(columns) - - // TODO Fix uneven col size - // if total < t.colSize { - // for n := t.colSize - total; n < t.colSize ; n++ { - // columns = append(columns, []string{SPACE}) - // t.cs[n] = t.mW - // } - //} - - // Pad Each Height - pads := []int{} - - // Checking for ANSI escape sequences for columns - is_esc_seq := false - if len(t.columnsParams) > 0 { - is_esc_seq = true - } - t.fillAlignment(total) - - for i, line := range columns { - length := len(line) - pad := max - length - pads = append(pads, pad) - for n := 0; n < pad; n++ { - columns[i] = append(columns[i], " ") - } - } - //fmt.Println(max, "\n") - for x := 0; x < max; x++ { - for y := 0; y < total; y++ { - - // Check if border is set - fmt.Fprint(t.out, ConditionString((!t.borders.Left && y == 0), SPACE, t.pColumn)) - - fmt.Fprintf(t.out, SPACE) - str := columns[y][x] - - // Embedding escape sequence with column value - if is_esc_seq { - str = format(str, t.columnsParams[y]) - } - - // This would print alignment - // Default alignment would use multiple configuration - switch t.columnsAlign[y] { - case ALIGN_CENTER: // - fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y])) - case ALIGN_RIGHT: - fmt.Fprintf(t.out, "%s", PadLeft(str, SPACE, t.cs[y])) - case ALIGN_LEFT: - fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y])) - default: - if decimal.MatchString(strings.TrimSpace(str)) || percent.MatchString(strings.TrimSpace(str)) { - fmt.Fprintf(t.out, "%s", PadLeft(str, SPACE, t.cs[y])) - } else { - fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y])) - - // TODO Custom alignment per column - //if max == 1 || pads[y] > 0 { - // fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y])) - //} else { - // fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y])) - //} - - } - } - fmt.Fprintf(t.out, SPACE) - } - // Check if border is set - // Replace with space if not set - fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE)) - fmt.Fprint(t.out, t.newLine) - } - - if t.rowLine { - t.printLine(true) - } -} - -// Print the rows of the table and merge the cells that are identical -func (t *Table) printRowsMergeCells() { - var previousLine []string - var displayCellBorder []bool - var tmpWriter bytes.Buffer - for i, lines := range t.lines { - // We store the display of the current line in a tmp writer, as we need to know which border needs to be print above - previousLine, displayCellBorder = t.printRowMergeCells(&tmpWriter, lines, i, previousLine) - if i > 0 { //We don't need to print borders above first line - if t.rowLine { - t.printLineOptionalCellSeparators(true, displayCellBorder) - } - } - tmpWriter.WriteTo(t.out) - } - //Print the end of the table - if t.rowLine { - t.printLine(true) - } -} - -// Print Row Information to a writer and merge identical cells. -// Adjust column alignment based on type - -func (t *Table) printRowMergeCells(writer io.Writer, columns [][]string, rowIdx int, previousLine []string) ([]string, []bool) { - // Get Maximum Height - max := t.rs[rowIdx] - total := len(columns) - - // Pad Each Height - pads := []int{} - - for i, line := range columns { - length := len(line) - pad := max - length - pads = append(pads, pad) - for n := 0; n < pad; n++ { - columns[i] = append(columns[i], " ") - } - } - - var displayCellBorder []bool - t.fillAlignment(total) - for x := 0; x < max; x++ { - for y := 0; y < total; y++ { - - // Check if border is set - fmt.Fprint(writer, ConditionString((!t.borders.Left && y == 0), SPACE, t.pColumn)) - - fmt.Fprintf(writer, SPACE) - - str := columns[y][x] - - if t.autoMergeCells { - //Store the full line to merge mutli-lines cells - fullLine := strings.Join(columns[y], " ") - if len(previousLine) > y && fullLine == previousLine[y] && fullLine != "" { - // If this cell is identical to the one above but not empty, we don't display the border and keep the cell empty. - displayCellBorder = append(displayCellBorder, false) - str = "" - } else { - // First line or different content, keep the content and print the cell border - displayCellBorder = append(displayCellBorder, true) - } - } - - // This would print alignment - // Default alignment would use multiple configuration - switch t.columnsAlign[y] { - case ALIGN_CENTER: // - fmt.Fprintf(writer, "%s", Pad(str, SPACE, t.cs[y])) - case ALIGN_RIGHT: - fmt.Fprintf(writer, "%s", PadLeft(str, SPACE, t.cs[y])) - case ALIGN_LEFT: - fmt.Fprintf(writer, "%s", PadRight(str, SPACE, t.cs[y])) - default: - if decimal.MatchString(strings.TrimSpace(str)) || percent.MatchString(strings.TrimSpace(str)) { - fmt.Fprintf(writer, "%s", PadLeft(str, SPACE, t.cs[y])) - } else { - fmt.Fprintf(writer, "%s", PadRight(str, SPACE, t.cs[y])) - } - } - fmt.Fprintf(writer, SPACE) - } - // Check if border is set - // Replace with space if not set - fmt.Fprint(writer, ConditionString(t.borders.Left, t.pColumn, SPACE)) - fmt.Fprint(writer, t.newLine) - } - - //The new previous line is the current one - previousLine = make([]string, total) - for y := 0; y < total; y++ { - previousLine[y] = strings.Join(columns[y], " ") //Store the full line for multi-lines cells - } - //Returns the newly added line and wether or not a border should be displayed above. - return previousLine, displayCellBorder -} - -func (t *Table) parseDimension(str string, colKey, rowKey int) []string { - var ( - raw []string - maxWidth int - ) - - raw = getLines(str) - maxWidth = 0 - for _, line := range raw { - if w := DisplayWidth(line); w > maxWidth { - maxWidth = w - } - } - - // If wrapping, ensure that all paragraphs in the cell fit in the - // specified width. - if t.autoWrap { - // If there's a maximum allowed width for wrapping, use that. - if maxWidth > t.mW { - maxWidth = t.mW - } - - // In the process of doing so, we need to recompute maxWidth. This - // is because perhaps a word in the cell is longer than the - // allowed maximum width in t.mW. - newMaxWidth := maxWidth - newRaw := make([]string, 0, len(raw)) - - if t.reflowText { - // Make a single paragraph of everything. - raw = []string{strings.Join(raw, " ")} - } - for i, para := range raw { - paraLines, _ := WrapString(para, maxWidth) - for _, line := range paraLines { - if w := DisplayWidth(line); w > newMaxWidth { - newMaxWidth = w - } - } - if i > 0 { - newRaw = append(newRaw, " ") - } - newRaw = append(newRaw, paraLines...) - } - raw = newRaw - maxWidth = newMaxWidth - } - - // Store the new known maximum width. - v, ok := t.cs[colKey] - if !ok || v < maxWidth || v == 0 { - t.cs[colKey] = maxWidth - } - - // Remember the number of lines for the row printer. - h := len(raw) - v, ok = t.rs[rowKey] - - if !ok || v < h || v == 0 { - t.rs[rowKey] = h - } - //fmt.Printf("Raw %+v %d\n", raw, len(raw)) - return raw -} diff --git a/vendor/github.com/olekukonko/tablewriter/table_with_color.go b/vendor/github.com/olekukonko/tablewriter/table_with_color.go deleted file mode 100644 index 5a4a53e..0000000 --- a/vendor/github.com/olekukonko/tablewriter/table_with_color.go +++ /dev/null @@ -1,134 +0,0 @@ -package tablewriter - -import ( - "fmt" - "strconv" - "strings" -) - -const ESC = "\033" -const SEP = ";" - -const ( - BgBlackColor int = iota + 40 - BgRedColor - BgGreenColor - BgYellowColor - BgBlueColor - BgMagentaColor - BgCyanColor - BgWhiteColor -) - -const ( - FgBlackColor int = iota + 30 - FgRedColor - FgGreenColor - FgYellowColor - FgBlueColor - FgMagentaColor - FgCyanColor - FgWhiteColor -) - -const ( - BgHiBlackColor int = iota + 100 - BgHiRedColor - BgHiGreenColor - BgHiYellowColor - BgHiBlueColor - BgHiMagentaColor - BgHiCyanColor - BgHiWhiteColor -) - -const ( - FgHiBlackColor int = iota + 90 - FgHiRedColor - FgHiGreenColor - FgHiYellowColor - FgHiBlueColor - FgHiMagentaColor - FgHiCyanColor - FgHiWhiteColor -) - -const ( - Normal = 0 - Bold = 1 - UnderlineSingle = 4 - Italic -) - -type Colors []int - -func startFormat(seq string) string { - return fmt.Sprintf("%s[%sm", ESC, seq) -} - -func stopFormat() string { - return fmt.Sprintf("%s[%dm", ESC, Normal) -} - -// Making the SGR (Select Graphic Rendition) sequence. -func makeSequence(codes []int) string { - codesInString := []string{} - for _, code := range codes { - codesInString = append(codesInString, strconv.Itoa(code)) - } - return strings.Join(codesInString, SEP) -} - -// Adding ANSI escape sequences before and after string -func format(s string, codes interface{}) string { - var seq string - - switch v := codes.(type) { - - case string: - seq = v - case []int: - seq = makeSequence(v) - default: - return s - } - - if len(seq) == 0 { - return s - } - return startFormat(seq) + s + stopFormat() -} - -// Adding header colors (ANSI codes) -func (t *Table) SetHeaderColor(colors ...Colors) { - if t.colSize != len(colors) { - panic("Number of header colors must be equal to number of headers.") - } - for i := 0; i < len(colors); i++ { - t.headerParams = append(t.headerParams, makeSequence(colors[i])) - } -} - -// Adding column colors (ANSI codes) -func (t *Table) SetColumnColor(colors ...Colors) { - if t.colSize != len(colors) { - panic("Number of column colors must be equal to number of headers.") - } - for i := 0; i < len(colors); i++ { - t.columnsParams = append(t.columnsParams, makeSequence(colors[i])) - } -} - -// Adding column colors (ANSI codes) -func (t *Table) SetFooterColor(colors ...Colors) { - if len(t.footers) != len(colors) { - panic("Number of footer colors must be equal to number of footer.") - } - for i := 0; i < len(colors); i++ { - t.footerParams = append(t.footerParams, makeSequence(colors[i])) - } -} - -func Color(colors ...int) []int { - return colors -} diff --git a/vendor/github.com/olekukonko/tablewriter/util.go b/vendor/github.com/olekukonko/tablewriter/util.go deleted file mode 100644 index 9e8f0cb..0000000 --- a/vendor/github.com/olekukonko/tablewriter/util.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Table Writer API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package tablewriter - -import ( - "math" - "regexp" - "strings" - - "github.com/mattn/go-runewidth" -) - -var ansi = regexp.MustCompile("\033\\[(?:[0-9]{1,3}(?:;[0-9]{1,3})*)?[m|K]") - -func DisplayWidth(str string) int { - return runewidth.StringWidth(ansi.ReplaceAllLiteralString(str, "")) -} - -// Simple Condition for string -// Returns value based on condition -func ConditionString(cond bool, valid, inValid string) string { - if cond { - return valid - } - return inValid -} - -func isNumOrSpace(r rune) bool { - return ('0' <= r && r <= '9') || r == ' ' -} - -// Format Table Header -// Replace _ , . and spaces -func Title(name string) string { - origLen := len(name) - rs := []rune(name) - for i, r := range rs { - switch r { - case '_': - rs[i] = ' ' - case '.': - // ignore floating number 0.0 - if (i != 0 && !isNumOrSpace(rs[i-1])) || (i != len(rs)-1 && !isNumOrSpace(rs[i+1])) { - rs[i] = ' ' - } - } - } - name = string(rs) - name = strings.TrimSpace(name) - if len(name) == 0 && origLen > 0 { - // Keep at least one character. This is important to preserve - // empty lines in multi-line headers/footers. - name = " " - } - return strings.ToUpper(name) -} - -// Pad String -// Attempts to play string in the center -func Pad(s, pad string, width int) string { - gap := width - DisplayWidth(s) - if gap > 0 { - gapLeft := int(math.Ceil(float64(gap / 2))) - gapRight := gap - gapLeft - return strings.Repeat(string(pad), gapLeft) + s + strings.Repeat(string(pad), gapRight) - } - return s -} - -// Pad String Right position -// This would pace string at the left side fo the screen -func PadRight(s, pad string, width int) string { - gap := width - DisplayWidth(s) - if gap > 0 { - return s + strings.Repeat(string(pad), gap) - } - return s -} - -// Pad String Left position -// This would pace string at the right side fo the screen -func PadLeft(s, pad string, width int) string { - gap := width - DisplayWidth(s) - if gap > 0 { - return strings.Repeat(string(pad), gap) + s - } - return s -} diff --git a/vendor/github.com/olekukonko/tablewriter/wrap.go b/vendor/github.com/olekukonko/tablewriter/wrap.go deleted file mode 100644 index a092ee1..0000000 --- a/vendor/github.com/olekukonko/tablewriter/wrap.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Table Writer API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package tablewriter - -import ( - "math" - "strings" - - "github.com/mattn/go-runewidth" -) - -var ( - nl = "\n" - sp = " " -) - -const defaultPenalty = 1e5 - -// Wrap wraps s into a paragraph of lines of length lim, with minimal -// raggedness. -func WrapString(s string, lim int) ([]string, int) { - words := strings.Split(strings.Replace(s, nl, sp, -1), sp) - var lines []string - max := 0 - for _, v := range words { - max = runewidth.StringWidth(v) - if max > lim { - lim = max - } - } - for _, line := range WrapWords(words, 1, lim, defaultPenalty) { - lines = append(lines, strings.Join(line, sp)) - } - return lines, lim -} - -// WrapWords is the low-level line-breaking algorithm, useful if you need more -// control over the details of the text wrapping process. For most uses, -// WrapString will be sufficient and more convenient. -// -// WrapWords splits a list of words into lines with minimal "raggedness", -// treating each rune as one unit, accounting for spc units between adjacent -// words on each line, and attempting to limit lines to lim units. Raggedness -// is the total error over all lines, where error is the square of the -// difference of the length of the line and lim. Too-long lines (which only -// happen when a single word is longer than lim units) have pen penalty units -// added to the error. -func WrapWords(words []string, spc, lim, pen int) [][]string { - n := len(words) - - length := make([][]int, n) - for i := 0; i < n; i++ { - length[i] = make([]int, n) - length[i][i] = runewidth.StringWidth(words[i]) - for j := i + 1; j < n; j++ { - length[i][j] = length[i][j-1] + spc + runewidth.StringWidth(words[j]) - } - } - nbrk := make([]int, n) - cost := make([]int, n) - for i := range cost { - cost[i] = math.MaxInt32 - } - for i := n - 1; i >= 0; i-- { - if length[i][n-1] <= lim { - cost[i] = 0 - nbrk[i] = n - } else { - for j := i + 1; j < n; j++ { - d := lim - length[i][j-1] - c := d*d + cost[j] - if length[i][j-1] > lim { - c += pen // too-long lines get a worse penalty - } - if c < cost[i] { - cost[i] = c - nbrk[i] = j - } - } - } - } - var lines [][]string - i := 0 - for i < n { - lines = append(lines, words[i:nbrk[i]]) - i = nbrk[i] - } - return lines -} - -// getLines decomposes a multiline string into a slice of strings. -func getLines(s string) []string { - return strings.Split(s, nl) -} diff --git a/vendor/github.com/pkg/browser/LICENSE b/vendor/github.com/pkg/browser/LICENSE deleted file mode 100644 index 65f78fb..0000000 --- a/vendor/github.com/pkg/browser/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2014, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/browser/README.md b/vendor/github.com/pkg/browser/README.md deleted file mode 100644 index 72b1976..0000000 --- a/vendor/github.com/pkg/browser/README.md +++ /dev/null @@ -1,55 +0,0 @@ - -# browser - import "github.com/pkg/browser" - -Package browser provides helpers to open files, readers, and urls in a browser window. - -The choice of which browser is started is entirely client dependant. - - - - - -## Variables -``` go -var Stderr io.Writer = os.Stderr -``` -Stderr is the io.Writer to which executed commands write standard error. - -``` go -var Stdout io.Writer = os.Stdout -``` -Stdout is the io.Writer to which executed commands write standard output. - - -## func OpenFile -``` go -func OpenFile(path string) error -``` -OpenFile opens new browser window for the file path. - - -## func OpenReader -``` go -func OpenReader(r io.Reader) error -``` -OpenReader consumes the contents of r and presents the -results in a new browser window. - - -## func OpenURL -``` go -func OpenURL(url string) error -``` -OpenURL opens a new browser window pointing to url. - - - - - - - - - -- - - -Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/vendor/github.com/pkg/browser/browser.go b/vendor/github.com/pkg/browser/browser.go deleted file mode 100644 index d92c4cd..0000000 --- a/vendor/github.com/pkg/browser/browser.go +++ /dev/null @@ -1,62 +0,0 @@ -// Package browser provides helpers to open files, readers, and urls in a browser window. -// -// The choice of which browser is started is entirely client dependant. -package browser - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" -) - -// Stdout is the io.Writer to which executed commands write standard output. -var Stdout io.Writer = os.Stdout - -// Stderr is the io.Writer to which executed commands write standard error. -var Stderr io.Writer = os.Stderr - -// OpenFile opens new browser window for the file path. -func OpenFile(path string) error { - path, err := filepath.Abs(path) - if err != nil { - return err - } - return OpenURL("file://" + path) -} - -// OpenReader consumes the contents of r and presents the -// results in a new browser window. -func OpenReader(r io.Reader) error { - f, err := ioutil.TempFile("", "browser") - if err != nil { - return fmt.Errorf("browser: could not create temporary file: %v", err) - } - if _, err := io.Copy(f, r); err != nil { - f.Close() - return fmt.Errorf("browser: caching temporary file failed: %v", err) - } - if err := f.Close(); err != nil { - return fmt.Errorf("browser: caching temporary file failed: %v", err) - } - oldname := f.Name() - newname := oldname + ".html" - if err := os.Rename(oldname, newname); err != nil { - return fmt.Errorf("browser: renaming temporary file failed: %v", err) - } - return OpenFile(newname) -} - -// OpenURL opens a new browser window pointing to url. -func OpenURL(url string) error { - return openBrowser(url) -} - -func runCmd(prog string, args ...string) error { - cmd := exec.Command(prog, args...) - cmd.Stdout = Stdout - cmd.Stderr = Stderr - return cmd.Run() -} diff --git a/vendor/github.com/pkg/browser/browser_darwin.go b/vendor/github.com/pkg/browser/browser_darwin.go deleted file mode 100644 index 8507cf7..0000000 --- a/vendor/github.com/pkg/browser/browser_darwin.go +++ /dev/null @@ -1,5 +0,0 @@ -package browser - -func openBrowser(url string) error { - return runCmd("open", url) -} diff --git a/vendor/github.com/pkg/browser/browser_linux.go b/vendor/github.com/pkg/browser/browser_linux.go deleted file mode 100644 index bed47dd..0000000 --- a/vendor/github.com/pkg/browser/browser_linux.go +++ /dev/null @@ -1,5 +0,0 @@ -package browser - -func openBrowser(url string) error { - return runCmd("xdg-open", url) -} diff --git a/vendor/github.com/pkg/browser/browser_openbsd.go b/vendor/github.com/pkg/browser/browser_openbsd.go deleted file mode 100644 index 4fc7ff0..0000000 --- a/vendor/github.com/pkg/browser/browser_openbsd.go +++ /dev/null @@ -1,14 +0,0 @@ -package browser - -import ( - "errors" - "os/exec" -) - -func openBrowser(url string) error { - err := runCmd("xdg-open", url) - if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { - return errors.New("xdg-open: command not found - install xdg-utils from ports(8)") - } - return err -} diff --git a/vendor/github.com/pkg/browser/browser_unsupported.go b/vendor/github.com/pkg/browser/browser_unsupported.go deleted file mode 100644 index e29d220..0000000 --- a/vendor/github.com/pkg/browser/browser_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux,!windows,!darwin,!openbsd - -package browser - -import ( - "fmt" - "runtime" -) - -func openBrowser(url string) error { - return fmt.Errorf("openBrowser: unsupported operating system: %v", runtime.GOOS) -} diff --git a/vendor/github.com/pkg/browser/browser_windows.go b/vendor/github.com/pkg/browser/browser_windows.go deleted file mode 100644 index f65e0ee..0000000 --- a/vendor/github.com/pkg/browser/browser_windows.go +++ /dev/null @@ -1,10 +0,0 @@ -package browser - -import ( - "strings" -) - -func openBrowser(url string) error { - r := strings.NewReplacer("&", "^&") - return runCmd("cmd", "/c", "start", r.Replace(url)) -} diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE deleted file mode 100644 index c67dad6..0000000 --- a/vendor/github.com/pmezard/go-difflib/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013, Patrick Mezard -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - The names of its contributors may not be used to endorse or promote -products derived from this software without specific prior written -permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go deleted file mode 100644 index 003e99f..0000000 --- a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go +++ /dev/null @@ -1,772 +0,0 @@ -// Package difflib is a partial port of Python difflib module. -// -// It provides tools to compare sequences of strings and generate textual diffs. -// -// The following class and functions have been ported: -// -// - SequenceMatcher -// -// - unified_diff -// -// - context_diff -// -// Getting unified diffs was the main goal of the port. Keep in mind this code -// is mostly suitable to output text differences in a human friendly way, there -// are no guarantees generated diffs are consumable by patch(1). -package difflib - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" -) - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func calculateRatio(matches, length int) float64 { - if length > 0 { - return 2.0 * float64(matches) / float64(length) - } - return 1.0 -} - -type Match struct { - A int - B int - Size int -} - -type OpCode struct { - Tag byte - I1 int - I2 int - J1 int - J2 int -} - -// SequenceMatcher compares sequence of strings. The basic -// algorithm predates, and is a little fancier than, an algorithm -// published in the late 1980's by Ratcliff and Obershelp under the -// hyperbolic name "gestalt pattern matching". The basic idea is to find -// the longest contiguous matching subsequence that contains no "junk" -// elements (R-O doesn't address junk). The same idea is then applied -// recursively to the pieces of the sequences to the left and to the right -// of the matching subsequence. This does not yield minimal edit -// sequences, but does tend to yield matches that "look right" to people. -// -// SequenceMatcher tries to compute a "human-friendly diff" between two -// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the -// longest *contiguous* & junk-free matching subsequence. That's what -// catches peoples' eyes. The Windows(tm) windiff has another interesting -// notion, pairing up elements that appear uniquely in each sequence. -// That, and the method here, appear to yield more intuitive difference -// reports than does diff. This method appears to be the least vulnerable -// to synching up on blocks of "junk lines", though (like blank lines in -// ordinary text files, or maybe "

" lines in HTML files). That may be -// because this is the only method of the 3 that has a *concept* of -// "junk" . -// -// Timing: Basic R-O is cubic time worst case and quadratic time expected -// case. SequenceMatcher is quadratic time for the worst case and has -// expected-case behavior dependent in a complicated way on how many -// elements the sequences have in common; best case time is linear. -type SequenceMatcher struct { - a []string - b []string - b2j map[string][]int - IsJunk func(string) bool - autoJunk bool - bJunk map[string]struct{} - matchingBlocks []Match - fullBCount map[string]int - bPopular map[string]struct{} - opCodes []OpCode -} - -func NewMatcher(a, b []string) *SequenceMatcher { - m := SequenceMatcher{autoJunk: true} - m.SetSeqs(a, b) - return &m -} - -func NewMatcherWithJunk(a, b []string, autoJunk bool, - isJunk func(string) bool) *SequenceMatcher { - - m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} - m.SetSeqs(a, b) - return &m -} - -// Set two sequences to be compared. -func (m *SequenceMatcher) SetSeqs(a, b []string) { - m.SetSeq1(a) - m.SetSeq2(b) -} - -// Set the first sequence to be compared. The second sequence to be compared is -// not changed. -// -// SequenceMatcher computes and caches detailed information about the second -// sequence, so if you want to compare one sequence S against many sequences, -// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other -// sequences. -// -// See also SetSeqs() and SetSeq2(). -func (m *SequenceMatcher) SetSeq1(a []string) { - if &a == &m.a { - return - } - m.a = a - m.matchingBlocks = nil - m.opCodes = nil -} - -// Set the second sequence to be compared. The first sequence to be compared is -// not changed. -func (m *SequenceMatcher) SetSeq2(b []string) { - if &b == &m.b { - return - } - m.b = b - m.matchingBlocks = nil - m.opCodes = nil - m.fullBCount = nil - m.chainB() -} - -func (m *SequenceMatcher) chainB() { - // Populate line -> index mapping - b2j := map[string][]int{} - for i, s := range m.b { - indices := b2j[s] - indices = append(indices, i) - b2j[s] = indices - } - - // Purge junk elements - m.bJunk = map[string]struct{}{} - if m.IsJunk != nil { - junk := m.bJunk - for s, _ := range b2j { - if m.IsJunk(s) { - junk[s] = struct{}{} - } - } - for s, _ := range junk { - delete(b2j, s) - } - } - - // Purge remaining popular elements - popular := map[string]struct{}{} - n := len(m.b) - if m.autoJunk && n >= 200 { - ntest := n/100 + 1 - for s, indices := range b2j { - if len(indices) > ntest { - popular[s] = struct{}{} - } - } - for s, _ := range popular { - delete(b2j, s) - } - } - m.bPopular = popular - m.b2j = b2j -} - -func (m *SequenceMatcher) isBJunk(s string) bool { - _, ok := m.bJunk[s] - return ok -} - -// Find longest matching block in a[alo:ahi] and b[blo:bhi]. -// -// If IsJunk is not defined: -// -// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi -// and for all (i',j',k') meeting those conditions, -// k >= k' -// i <= i' -// and if i == i', j <= j' -// -// In other words, of all maximal matching blocks, return one that -// starts earliest in a, and of all those maximal matching blocks that -// start earliest in a, return the one that starts earliest in b. -// -// If IsJunk is defined, first the longest matching block is -// determined as above, but with the additional restriction that no -// junk element appears in the block. Then that block is extended as -// far as possible by matching (only) junk elements on both sides. So -// the resulting block never matches on junk except as identical junk -// happens to be adjacent to an "interesting" match. -// -// If no blocks match, return (alo, blo, 0). -func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { - // CAUTION: stripping common prefix or suffix would be incorrect. - // E.g., - // ab - // acab - // Longest matching block is "ab", but if common prefix is - // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so - // strip, so ends up claiming that ab is changed to acab by - // inserting "ca" in the middle. That's minimal but unintuitive: - // "it's obvious" that someone inserted "ac" at the front. - // Windiff ends up at the same place as diff, but by pairing up - // the unique 'b's and then matching the first two 'a's. - besti, bestj, bestsize := alo, blo, 0 - - // find longest junk-free match - // during an iteration of the loop, j2len[j] = length of longest - // junk-free match ending with a[i-1] and b[j] - j2len := map[int]int{} - for i := alo; i != ahi; i++ { - // look at all instances of a[i] in b; note that because - // b2j has no junk keys, the loop is skipped if a[i] is junk - newj2len := map[int]int{} - for _, j := range m.b2j[m.a[i]] { - // a[i] matches b[j] - if j < blo { - continue - } - if j >= bhi { - break - } - k := j2len[j-1] + 1 - newj2len[j] = k - if k > bestsize { - besti, bestj, bestsize = i-k+1, j-k+1, k - } - } - j2len = newj2len - } - - // Extend the best by non-junk elements on each end. In particular, - // "popular" non-junk elements aren't in b2j, which greatly speeds - // the inner loop above, but also means "the best" match so far - // doesn't contain any junk *or* popular non-junk elements. - for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - !m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - // Now that we have a wholly interesting match (albeit possibly - // empty!), we may as well suck up the matching junk on each - // side of it too. Can't think of a good reason not to, and it - // saves post-processing the (possibly considerable) expense of - // figuring out what to do with it. In the case of an empty - // interesting match, this is clearly the right thing to do, - // because no other kind of match is possible in the regions. - for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - return Match{A: besti, B: bestj, Size: bestsize} -} - -// Return list of triples describing matching subsequences. -// -// Each triple is of the form (i, j, n), and means that -// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in -// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are -// adjacent triples in the list, and the second is not the last triple in the -// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe -// adjacent equal blocks. -// -// The last triple is a dummy, (len(a), len(b), 0), and is the only -// triple with n==0. -func (m *SequenceMatcher) GetMatchingBlocks() []Match { - if m.matchingBlocks != nil { - return m.matchingBlocks - } - - var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match - matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { - match := m.findLongestMatch(alo, ahi, blo, bhi) - i, j, k := match.A, match.B, match.Size - if match.Size > 0 { - if alo < i && blo < j { - matched = matchBlocks(alo, i, blo, j, matched) - } - matched = append(matched, match) - if i+k < ahi && j+k < bhi { - matched = matchBlocks(i+k, ahi, j+k, bhi, matched) - } - } - return matched - } - matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) - - // It's possible that we have adjacent equal blocks in the - // matching_blocks list now. - nonAdjacent := []Match{} - i1, j1, k1 := 0, 0, 0 - for _, b := range matched { - // Is this block adjacent to i1, j1, k1? - i2, j2, k2 := b.A, b.B, b.Size - if i1+k1 == i2 && j1+k1 == j2 { - // Yes, so collapse them -- this just increases the length of - // the first block by the length of the second, and the first - // block so lengthened remains the block to compare against. - k1 += k2 - } else { - // Not adjacent. Remember the first block (k1==0 means it's - // the dummy we started with), and make the second block the - // new block to compare against. - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - i1, j1, k1 = i2, j2, k2 - } - } - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - - nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) - m.matchingBlocks = nonAdjacent - return m.matchingBlocks -} - -// Return list of 5-tuples describing how to turn a into b. -// -// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple -// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the -// tuple preceding it, and likewise for j1 == the previous j2. -// -// The tags are characters, with these meanings: -// -// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] -// -// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. -// -// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. -// -// 'e' (equal): a[i1:i2] == b[j1:j2] -func (m *SequenceMatcher) GetOpCodes() []OpCode { - if m.opCodes != nil { - return m.opCodes - } - i, j := 0, 0 - matching := m.GetMatchingBlocks() - opCodes := make([]OpCode, 0, len(matching)) - for _, m := range matching { - // invariant: we've pumped out correct diffs to change - // a[:i] into b[:j], and the next matching block is - // a[ai:ai+size] == b[bj:bj+size]. So we need to pump - // out a diff to change a[i:ai] into b[j:bj], pump out - // the matching block, and move (i,j) beyond the match - ai, bj, size := m.A, m.B, m.Size - tag := byte(0) - if i < ai && j < bj { - tag = 'r' - } else if i < ai { - tag = 'd' - } else if j < bj { - tag = 'i' - } - if tag > 0 { - opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) - } - i, j = ai+size, bj+size - // the list of matching blocks is terminated by a - // sentinel with size 0 - if size > 0 { - opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) - } - } - m.opCodes = opCodes - return m.opCodes -} - -// Isolate change clusters by eliminating ranges with no changes. -// -// Return a generator of groups with up to n lines of context. -// Each group is in the same format as returned by GetOpCodes(). -func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { - if n < 0 { - n = 3 - } - codes := m.GetOpCodes() - if len(codes) == 0 { - codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} - } - // Fixup leading and trailing groups if they show no changes. - if codes[0].Tag == 'e' { - c := codes[0] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} - } - if codes[len(codes)-1].Tag == 'e' { - c := codes[len(codes)-1] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} - } - nn := n + n - groups := [][]OpCode{} - group := []OpCode{} - for _, c := range codes { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - // End the current group and start a new one whenever - // there is a large range with no changes. - if c.Tag == 'e' && i2-i1 > nn { - group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), - j1, min(j2, j1+n)}) - groups = append(groups, group) - group = []OpCode{} - i1, j1 = max(i1, i2-n), max(j1, j2-n) - } - group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) - } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { - groups = append(groups, group) - } - return groups -} - -// Return a measure of the sequences' similarity (float in [0,1]). -// -// Where T is the total number of elements in both sequences, and -// M is the number of matches, this is 2.0*M / T. -// Note that this is 1 if the sequences are identical, and 0 if -// they have nothing in common. -// -// .Ratio() is expensive to compute if you haven't already computed -// .GetMatchingBlocks() or .GetOpCodes(), in which case you may -// want to try .QuickRatio() or .RealQuickRation() first to get an -// upper bound. -func (m *SequenceMatcher) Ratio() float64 { - matches := 0 - for _, m := range m.GetMatchingBlocks() { - matches += m.Size - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() relatively quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute. -func (m *SequenceMatcher) QuickRatio() float64 { - // viewing a and b as multisets, set matches to the cardinality - // of their intersection; this counts the number of matches - // without regard to order, so is clearly an upper bound - if m.fullBCount == nil { - m.fullBCount = map[string]int{} - for _, s := range m.b { - m.fullBCount[s] = m.fullBCount[s] + 1 - } - } - - // avail[x] is the number of times x appears in 'b' less the - // number of times we've seen it in 'a' so far ... kinda - avail := map[string]int{} - matches := 0 - for _, s := range m.a { - n, ok := avail[s] - if !ok { - n = m.fullBCount[s] - } - avail[s] = n - 1 - if n > 0 { - matches += 1 - } - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() very quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute than either .Ratio() or .QuickRatio(). -func (m *SequenceMatcher) RealQuickRatio() float64 { - la, lb := len(m.a), len(m.b) - return calculateRatio(min(la, lb), la+lb) -} - -// Convert range to the "ed" format -func formatRangeUnified(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 1 { - return fmt.Sprintf("%d", beginning) - } - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - return fmt.Sprintf("%d,%d", beginning, length) -} - -// Unified diff parameters -type UnifiedDiff struct { - A []string // First sequence lines - FromFile string // First file name - FromDate string // First file time - B []string // Second sequence lines - ToFile string // Second file name - ToDate string // Second file time - Eol string // Headers end of line, defaults to LF - Context int // Number of context lines -} - -// Compare two sequences of lines; generate the delta as a unified diff. -// -// Unified diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by 'n' which -// defaults to three. -// -// By default, the diff control lines (those with ---, +++, or @@) are -// created with a trailing newline. This is helpful so that inputs -// created from file.readlines() result in diffs that are suitable for -// file.writelines() since both the inputs and outputs have trailing -// newlines. -// -// For inputs that do not have trailing newlines, set the lineterm -// argument to "" so that the output will be uniformly newline free. -// -// The unidiff format normally has a header for filenames and modification -// times. Any or all of these may be specified using strings for -// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. -// The modification times are normally expressed in the ISO 8601 format. -func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - return err - } - ws := func(s string) error { - _, err := buf.WriteString(s) - return err - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) - if err != nil { - return err - } - err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) - if err != nil { - return err - } - } - } - first, last := g[0], g[len(g)-1] - range1 := formatRangeUnified(first.I1, last.I2) - range2 := formatRangeUnified(first.J1, last.J2) - if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { - return err - } - for _, c := range g { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - if c.Tag == 'e' { - for _, line := range diff.A[i1:i2] { - if err := ws(" " + line); err != nil { - return err - } - } - continue - } - if c.Tag == 'r' || c.Tag == 'd' { - for _, line := range diff.A[i1:i2] { - if err := ws("-" + line); err != nil { - return err - } - } - } - if c.Tag == 'r' || c.Tag == 'i' { - for _, line := range diff.B[j1:j2] { - if err := ws("+" + line); err != nil { - return err - } - } - } - } - } - return nil -} - -// Like WriteUnifiedDiff but returns the diff a string. -func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteUnifiedDiff(w, diff) - return string(w.Bytes()), err -} - -// Convert range to the "ed" format. -func formatRangeContext(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - if length <= 1 { - return fmt.Sprintf("%d", beginning) - } - return fmt.Sprintf("%d,%d", beginning, beginning+length-1) -} - -type ContextDiff UnifiedDiff - -// Compare two sequences of lines; generate the delta as a context diff. -// -// Context diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by diff.Context -// which defaults to three. -// -// By default, the diff control lines (those with *** or ---) are -// created with a trailing newline. -// -// For inputs that do not have trailing newlines, set the diff.Eol -// argument to "" so that the output will be uniformly newline free. -// -// The context diff format normally has a header for filenames and -// modification times. Any or all of these may be specified using -// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. -// The modification times are normally expressed in the ISO 8601 format. -// If not specified, the strings default to blanks. -func WriteContextDiff(writer io.Writer, diff ContextDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - var diffErr error - wf := func(format string, args ...interface{}) { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - if diffErr == nil && err != nil { - diffErr = err - } - } - ws := func(s string) { - _, err := buf.WriteString(s) - if diffErr == nil && err != nil { - diffErr = err - } - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - prefix := map[byte]string{ - 'i': "+ ", - 'd': "- ", - 'r': "! ", - 'e': " ", - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) - wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) - } - } - - first, last := g[0], g[len(g)-1] - ws("***************" + diff.Eol) - - range1 := formatRangeContext(first.I1, last.I2) - wf("*** %s ****%s", range1, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'd' { - for _, cc := range g { - if cc.Tag == 'i' { - continue - } - for _, line := range diff.A[cc.I1:cc.I2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - - range2 := formatRangeContext(first.J1, last.J2) - wf("--- %s ----%s", range2, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'i' { - for _, cc := range g { - if cc.Tag == 'd' { - continue - } - for _, line := range diff.B[cc.J1:cc.J2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - } - return diffErr -} - -// Like WriteContextDiff but returns the diff a string. -func GetContextDiffString(diff ContextDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteContextDiff(w, diff) - return string(w.Bytes()), err -} - -// Split a string on "\n" while preserving them. The output can be used -// as input for UnifiedDiff and ContextDiff structures. -func SplitLines(s string) []string { - lines := strings.SplitAfter(s, "\n") - lines[len(lines)-1] += "\n" - return lines -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/.gitignore b/vendor/github.com/snowflakedb/gosnowflake/.gitignore deleted file mode 100644 index 469f82a..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -.idea/ -parameters.json -coverage.txt -fuzz-*/ -/select1 -/selectmany -/verifycert diff --git a/vendor/github.com/snowflakedb/gosnowflake/.travis.yml b/vendor/github.com/snowflakedb/gosnowflake/.travis.yml deleted file mode 100644 index 36fe704..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -sudo: false -language: go -go: - - "1.8" - - "1.9" - - "1.10" - - tip -before_install: - - openssl aes-256-cbc -k "$super_secret_password" -in parameters.json.enc -out parameters.json -d - - curl -L -s https://github.com/golang/dep/releases/download/v0.3.2/dep-linux-amd64 -o $GOPATH/bin/dep - - chmod +x $GOPATH/bin/dep -install: - - dep ensure -script: - - make fmt lint cov -after_success: - - bash <(curl -s https://codecov.io/bash) -env: - global: - secure: Zp20tCeBJ1KdavtqQ20ICprMTerlNXU8t+2hUzGG4Fjz1mZz12mp0sXZJhhKZ4+bX3jsc5IWLVoB+1eb1uFFg+NKZeC7ZCScRiZmZ8r2etscCkTQkUQNgJAwAw1mTycJU/9qjq7YX3RvyHZ3SeFlPNcusHVm0c1ZKVpXJn+KQuh5Z5+c7FUBTzuY2Aap99pg3N7mMdajH+t5GUCrxbMltDtGiSEd70VdCxQ8/BUsylbKGjYQ0vm2Rd7IFPyXv9SgHR07nNzeeXD3/raf6JZyMdixb/esqS6fY50t74jT4DoXV5an5fD8jCBIxYxmBOKenK2LGP/VptyZGpM/Bos57VceyZoqeAnDUXU4NabeLNxUt7IBs/bAADceH9vrc5A5pPEXCx2wrIUufGFfvjNu8haaBLF8ydvQ7VJhauXH/ktEDxfcwC+lJJTWSbHad/ya7UNe+1OFP3z1GAvtZFC2a3/IoFVvsVS+r+71PHR1QQunWavGvhaJ+c34h3p1EZ2yEHHHE8msx8FjIvHSiMNh9FaKyPskv5r3SNpv1aGODCrdf3uAfskr6NBSeYplkKYQe23Yasdx3CXKjCahILOqd8zDXnX5LSNW46ZBEHjKlgV0RbLY6rYrWN+ugQJ2NHwd1QpMo2Weq9QXPkMaRbwuR/Kb4y5MNUzrnFnPXyRTCnk= diff --git a/vendor/github.com/snowflakedb/gosnowflake/CHANGELOG.md b/vendor/github.com/snowflakedb/gosnowflake/CHANGELOG.md deleted file mode 100644 index e76c2c7..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/CHANGELOG.md +++ /dev/null @@ -1,52 +0,0 @@ -## Version 1.1.8 - -- Removed username restrition for oAuth - -## Version 1.1.7 - -- Added `client_session_keep_alive` option to have a heartbeat in the background every hour to keep the connection alive. Fixed #160 -- Corrected doc about OCSP. -- Added OS session info to the session. - -## Version 1.1.6 - -- Fixed memory leak in the large result set. The chunk of memory is freed as soon as the cursor moved forward. -- Removed glide dependency in favor of dep #149 (@tjj5036) -- Fixed username and password URL escape issue #151 -- Added Go 1.10 test. - -## Version 1.1.5 - -- Added externalbrowser authenticator support PR #141, #142 (@tjj5036) - -## Version 1.1.4 - -- Raise HTTP 403 errors immediately after the authentication failure instead of retry until the timeout. Issue #138 (@dominicbarnes) -- Fixed vararg error message. - -## Version 1.1.3 - -- Removed hardcoded `public` schema name in case not specified. -- Fixed `requestId` value - -## Version 1.1.2 - -- `nil` should set to the target value instead of the pointer to the target - -## Version 1.1.1 - -- Fixed HTTP 403 errors when getting result sets from AWS S3. The change in the server release 2.23.0 will enforce a signature of key for result set. - -## Version 1.1.0 - -- Fixed #125. Dropped proxy parameters. HTTP_PROXY, HTTPS_PROXY and NO_PROXY should be used. -- Improved logging based on security code review. No sensitive information is logged. -- Added no connection pool example -- Fixed #110. Raise error if the specified db, schema or warehouse doesn't exist. role was already supported. -- Added go 1.9 config in TravisCI -- Added session parameter support in DSN. - -## Vesrion 1.0.0 - -- Added [dep](https://github.com/golang/dep) manifest (@CrimsonVoid) -- Bumped up the version to 1.0.0 diff --git a/vendor/github.com/snowflakedb/gosnowflake/CONTRIBUTING.md b/vendor/github.com/snowflakedb/gosnowflake/CONTRIBUTING.md deleted file mode 100644 index 75e2eca..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/CONTRIBUTING.md +++ /dev/null @@ -1,18 +0,0 @@ -# Contributing Guidelines - -## Reporting Issues - -Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/snowflakedb/gosnowflake/issues?state=open) or was [recently closed](https://github.com/snowflakedb/gosnowflake/issues?direction=desc&page=1&sort=updated&state=closed). - -## Contributing Code - -By contributing to this project, you share your code under the Apache License 2, as specified in the LICENSE file. - -### Code Review - -Everyone is invited to review and comment on pull requests. -If it looks fine to you, comment with "LGTM" (Looks good to me). - -If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes. - -Before merging the Pull Request, at least one Snowflake team member must have commented with "LGTM". diff --git a/vendor/github.com/snowflakedb/gosnowflake/Gopkg.lock b/vendor/github.com/snowflakedb/gosnowflake/Gopkg.lock deleted file mode 100644 index a0c8f19..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/Gopkg.lock +++ /dev/null @@ -1,46 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - digest = "1:1ba1d79f2810270045c328ae5d674321db34e3aae468eb4233883b473c5c0467" - name = "github.com/golang/glog" - packages = ["."] - pruneopts = "UT" - revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" - -[[projects]] - digest = "1:8f8811f9be822914c3a25c6a071e93beb4c805d7b026cbf298bc577bc1cc945b" - name = "github.com/google/uuid" - packages = ["."] - pruneopts = "UT" - revision = "064e2069ce9c359c118179501254f67d7d37ba24" - version = "0.2" - -[[projects]] - branch = "master" - digest = "1:7b4cd865877639770d2711dc6278b98466be7ebbe5cdf2c933659b73d6e58213" - name = "github.com/pkg/browser" - packages = ["."] - pruneopts = "UT" - revision = "c90ca0c84f15f81c982e32665bffd8d7aac8f097" - -[[projects]] - branch = "master" - digest = "1:b6b9212412c3335645f285510729b18c0cc8d06e288f956dba4ace7b3fe8ad7c" - name = "golang.org/x/crypto" - packages = ["ocsp"] - pruneopts = "UT" - revision = "91a49db82a88618983a78a06c1cbd4e00ab749ab" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/golang/glog", - "github.com/google/uuid", - "github.com/pkg/browser", - "golang.org/x/crypto/ocsp", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/snowflakedb/gosnowflake/Gopkg.toml b/vendor/github.com/snowflakedb/gosnowflake/Gopkg.toml deleted file mode 100644 index cb98b04..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/Gopkg.toml +++ /dev/null @@ -1,15 +0,0 @@ -[[constraint]] - branch = "master" - name = "github.com/golang/glog" - -[[constraint]] - branch = "master" - name = "github.com/pkg/browser" - -[[constraint]] - branch = "master" - name = "golang.org/x/crypto" - -[prune] - go-tests = true - unused-packages = true diff --git a/vendor/github.com/snowflakedb/gosnowflake/LICENSE b/vendor/github.com/snowflakedb/gosnowflake/LICENSE deleted file mode 100644 index 01f96dc..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/snowflakedb/gosnowflake/Makefile b/vendor/github.com/snowflakedb/gosnowflake/Makefile deleted file mode 100644 index a98e488..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/Makefile +++ /dev/null @@ -1,48 +0,0 @@ -NAME:=gosnowflake -VERSION:=$(shell git describe --tags --abbrev=0) -REVISION:=$(shell git rev-parse --short HEAD) -COVFLAGS:= - -## Run fmt, lint and test -all: fmt lint cov - -include gosnowflake.mak - -## Run tests -test: deps - eval $$(jq -r '.testconnection | to_entries | map("export \(.key)=\(.value|tostring)")|.[]' parameters.json) && \ - go test -tags=sfdebug -race $(COVFLAGS) -v . # -stderrthreshold=INFO -vmodule=*=2 or -log_dir=$(HOME) -vmodule=connection=2,driver=2 - -## Run Coverage tests -cov: - make test COVFLAGS="-coverprofile=coverage.txt -covermode=atomic" - -## Lint -lint: clint - for c in $$(ls cmd); do \ - (cd cmd/$$c; make lint); \ - done - -## Format source codes -fmt: cfmt - for c in $$(ls cmd); do \ - (cd cmd/$$c; make fmt); \ - done - -## Install sample programs -install: - for c in $$(ls cmd); do \ - (cd cmd/$$c; GOBIN=$$GOPATH/bin go install $$c.go); \ - done - -## Build fuzz tests -fuzz-build: - for c in $$(ls | grep -E "fuzz-*"); do \ - (cd $$c; make fuzz-build); \ - done - -## Run fuzz-dsn -fuzz-dsn: - (cd fuzz-dsn; go-fuzz -bin=./dsn-fuzz.zip -workdir=.) - -.PHONY: setup deps update test lint help fuzz-dsn diff --git a/vendor/github.com/snowflakedb/gosnowflake/README.rst b/vendor/github.com/snowflakedb/gosnowflake/README.rst deleted file mode 100644 index ac2a62b..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/README.rst +++ /dev/null @@ -1,108 +0,0 @@ -******************************************************************************** -Go Snowflake Driver -******************************************************************************** - -.. image:: https://travis-ci.org/snowflakedb/gosnowflake.svg?branch=master - :target: https://travis-ci.org/snowflakedb/gosnowflake - -.. image:: https://codecov.io/gh/snowflakedb/gosnowflake/branch/master/graph/badge.svg - :target: https://codecov.io/gh/snowflakedb/gosnowflake - -.. image:: http://img.shields.io/:license-Apache%202-brightgreen.svg - :target: http://www.apache.org/licenses/LICENSE-2.0.txt - -.. image:: https://goreportcard.com/badge/github.com/snowflakedb/gosnowflake - :target: https://goreportcard.com/report/github.com/snowflakedb/gosnowflake - -This topic provides instructions for installing, running, and modifying the Go Snowflake Driver. The driver supports Go's `database/sql `_ package. - -Prerequisites -================================================================================ - -The following software packages are required to use the Go Snowflake Driver. - -Go ----------------------------------------------------------------------- - -The driver requires the `Go language `_ 1.8 or higher. The supported operating systems are Linux, Mac OS, and Windows, but you may run the driver on other platforms if the Go language works correctly on those platforms. - - -Installation -================================================================================ - -Get Gosnowflake source code and `dep `_ (dependency managment tool), if not installed, and ensure the dependent libraries are installed. - -.. code-block:: bash - - go get -u github.com/snowflakedb/gosnowflake - go get -u github.com/golang/dep/cmd/dep - cd $GOPATH/src/github.com/snowflakedb/gosnowflake/ - dep ensure - -Docs -==== - -For detailed documentation and basic usage examples, please see the documentation at -`godoc.org `_. - -Sample Programs -================================================================================ - -Snowflake provides a set of sample programs to test with. Set the environment variable ``$GOPATH`` to the top directory of your workspace, e.g., ``~/go`` and make certain to -include ``$GOPATH/bin`` in the environment variable ``$PATH``. Run the ``make`` command to build all sample programs. - -.. code-block:: go - - make install - -In the following example, the program ``select1.go`` is built and installed in ``$GOPATH/bin`` and can be run from the command line: - -.. code-block:: bash - - SNOWFLAKE_TEST_ACCOUNT= \ - SNOWFLAKE_TEST_USER= \ - SNOWFLAKE_TEST_PASSWORD= \ - select1 - Congrats! You have successfully run SELECT 1 with Snowflake DB! - -Development -================================================================================ - -The developer notes are hosted with the source code on `GitHub `_. - -Testing Code ----------------------------------------------------------------------- - -Set the Snowflake connection info in ``parameters.json``: - -.. code-block:: json - - { - "testconnection": { - "SNOWFLAKE_TEST_USER": "", - "SNOWFLAKE_TEST_PASSWORD": "", - "SNOWFLAKE_TEST_ACCOUNT": "", - "SNOWFLAKE_TEST_WAREHOUSE": "", - "SNOWFLAKE_TEST_DATABASE": "", - "SNOWFLAKE_TEST_SCHEMA": "", - "SNOWFLAKE_TEST_ROLE": "" - } - } - -Install `jq `_ so that the parameters can get parsed correctly, and run ``make test`` in your Go development environment: - -.. code-block:: bash - - make test - -Submitting Pull Requests ----------------------------------------------------------------------- - -You may use your preferred editor to edit the driver code. Make certain to run ``make fmt lint`` before submitting any pull request to Snowflake. This command formats your source code according to the standard Go style and detects any coding style issues. - -Support ----------------------------------------------------------------------- - -For official support, contact Snowflake support at: -https://support.snowflake.net/s/snowflake-support - diff --git a/vendor/github.com/snowflakedb/gosnowflake/auth.go b/vendor/github.com/snowflakedb/gosnowflake/auth.go deleted file mode 100644 index 09a3ca3..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/auth.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "runtime" - "strconv" - "strings" - "time" - - "github.com/google/uuid" -) - -const ( - clientType = "Go" -) - -const ( - authenticatorExternalBrowser = "EXTERNALBROWSER" - authenticatorOAuth = "OAUTH" - authenticatorSnowflake = "SNOWFLAKE" - authenticatorOkta = "OKTA" -) - -// platform consists of compiler and architecture type in string -var platform = fmt.Sprintf("%v-%v", runtime.Compiler, runtime.GOARCH) - -// operatingSystem is the runtime operating system. -var operatingSystem = runtime.GOOS - -// userAgent shows up in User-Agent HTTP header -var userAgent = fmt.Sprintf("%v/%v/%v/%v", clientType, SnowflakeGoDriverVersion, runtime.Version(), platform) - -type authRequestClientEnvironment struct { - Application string `json:"APPLICATION"` - Os string `json:"OS"` - OsVersion string `json:"OS_VERSION"` -} -type authRequestData struct { - ClientAppID string `json:"CLIENT_APP_ID"` - ClientAppVersion string `json:"CLIENT_APP_VERSION"` - SvnRevision string `json:"SVN_REVISION"` - AccountName string `json:"ACCOUNT_NAME"` - LoginName string `json:"LOGIN_NAME,omitempty"` - Password string `json:"PASSWORD,omitempty"` - RawSAMLResponse string `json:"RAW_SAML_RESPONSE,omitempty"` - ExtAuthnDuoMethod string `json:"EXT_AUTHN_DUO_METHOD,omitempty"` - Passcode string `json:"PASSCODE,omitempty"` - Authenticator string `json:"AUTHENTICATOR,omitempty"` - SessionParameters map[string]string `json:"SESSION_PARAMETERS,omitempty"` - ClientEnvironment authRequestClientEnvironment `json:"CLIENT_ENVIRONMENT"` - BrowserModeRedirectPort string `json:"BROWSER_MODE_REDIRECT_PORT,omitempty"` - ProofKey string `json:"PROOF_KEY,omitempty"` - Token string `json:"TOKEN,omitempty"` -} -type authRequest struct { - Data authRequestData `json:"data"` -} - -type nameValueParameter struct { - Name string `json:"name"` - Value interface{} `json:"value"` -} - -type authResponseSessionInfo struct { - DatabaseName string `json:"databaseName"` - SchemaName string `json:"schemaName"` - WarehouseName string `json:"warehouseName"` - RoleName string `json:"roleName"` -} - -type authResponseMain struct { - Token string `json:"token,omitempty"` - ValidityInSeconds time.Duration `json:"validityInSeconds,omitempty"` - MasterToken string `json:"masterToken,omitempty"` - MasterValidityInSeconds time.Duration `json:"masterValidityInSeconds"` - DisplayUserName string `json:"displayUserName"` - ServerVersion string `json:"serverVersion"` - FirstLogin bool `json:"firstLogin"` - RemMeToken string `json:"remMeToken"` - RemMeValidityInSeconds time.Duration `json:"remMeValidityInSeconds"` - HealthCheckInterval time.Duration `json:"healthCheckInterval"` - NewClientForUpgrade string `json:"newClientForUpgrade"` - SessionID int `json:"sessionId"` - Parameters []nameValueParameter `json:"parameters"` - SessionInfo authResponseSessionInfo `json:"sessionInfo"` - TokenURL string `json:"tokenUrl,omitempty"` - SSOURL string `json:"ssoUrl,omitempty"` - ProofKey string `json:"proofKey,omitempty"` -} -type authResponse struct { - Data authResponseMain `json:"data"` - Message string `json:"message"` - Code string `json:"code"` - Success bool `json:"success"` -} - -func postAuth( - sr *snowflakeRestful, - params *url.Values, - headers map[string]string, - body []byte, - timeout time.Duration) ( - data *authResponse, err error) { - params.Add("requestId", uuid.New().String()) - fullURL := fmt.Sprintf( - "%s://%s:%d%s", sr.Protocol, sr.Host, sr.Port, - "/session/v1/login-request?"+params.Encode()) - glog.V(2).Infof("full URL: %v", fullURL) - resp, err := sr.FuncPost(context.TODO(), sr, fullURL, headers, body, timeout, true) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusOK { - var respd authResponse - err = json.NewDecoder(resp.Body).Decode(&respd) - if err != nil { - glog.V(1).Infof("failed to decode JSON. err: %v", err) - glog.Flush() - return nil, err - } - return &respd, nil - } - switch resp.StatusCode { - case http.StatusBadGateway, http.StatusServiceUnavailable, http.StatusGatewayTimeout: - // service availability or connectivity issue. Most likely server side issue. - return nil, &SnowflakeError{ - Number: ErrCodeServiceUnavailable, - SQLState: SQLStateConnectionWasNotEstablished, - Message: errMsgServiceUnavailable, - MessageArgs: []interface{}{resp.StatusCode, fullURL}, - } - case http.StatusUnauthorized, http.StatusForbidden: - // failed to connect to db. account name may be wrong - return nil, &SnowflakeError{ - Number: ErrCodeFailedToConnect, - SQLState: SQLStateConnectionRejected, - Message: errMsgFailedToConnect, - MessageArgs: []interface{}{resp.StatusCode, fullURL}, - } - } - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - glog.V(1).Infof("failed to extract HTTP response body. err: %v", err) - glog.Flush() - return nil, err - } - glog.V(1).Infof("HTTP: %v, URL: %v, Body: %v", resp.StatusCode, fullURL, b) - glog.V(1).Infof("Header: %v", resp.Header) - glog.Flush() - return nil, &SnowflakeError{ - Number: ErrFailedToAuth, - SQLState: SQLStateConnectionRejected, - Message: errMsgFailedToAuth, - MessageArgs: []interface{}{resp.StatusCode, fullURL}, - } -} - -// Generates a map of headers needed to authenticate -// with Snowflake. -func getHeaders() map[string]string { - headers := make(map[string]string) - headers["Content-Type"] = headerContentTypeApplicationJSON - headers["accept"] = headerAcceptTypeApplicationSnowflake - headers["User-Agent"] = userAgent - return headers -} - -// Used to authenticate the user with Snowflake. -func authenticate( - sc *snowflakeConn, - samlResponse []byte, - proofKey []byte, -) (resp *authResponseMain, err error) { - - headers := getHeaders() - clientEnvironment := authRequestClientEnvironment{ - Application: sc.cfg.Application, - Os: operatingSystem, - OsVersion: platform, - } - - sessionParameters := make(map[string]string) - for k, v := range sc.cfg.Params { - // upper casing to normalize keys - sessionParameters[strings.ToUpper(k)] = *v - } - - requestMain := authRequestData{ - ClientAppID: clientType, - ClientAppVersion: SnowflakeGoDriverVersion, - AccountName: sc.cfg.Account, - SessionParameters: sessionParameters, - ClientEnvironment: clientEnvironment, - } - - authenticator := strings.ToUpper(sc.cfg.Authenticator) - switch authenticator { - case authenticatorExternalBrowser: - requestMain.ProofKey = string(proofKey) - requestMain.Token = string(samlResponse) - requestMain.LoginName = sc.cfg.User - requestMain.Authenticator = authenticatorExternalBrowser - case authenticatorOAuth: - requestMain.LoginName = sc.cfg.User - requestMain.Authenticator = authenticatorOAuth - requestMain.Token = sc.cfg.Token - case authenticatorOkta: - requestMain.RawSAMLResponse = string(samlResponse) - case authenticatorSnowflake: - fallthrough - default: - glog.V(2).Info("Username and password") - requestMain.LoginName = sc.cfg.User - requestMain.Password = sc.cfg.Password - switch { - case sc.cfg.PasscodeInPassword: - requestMain.ExtAuthnDuoMethod = "passcode" - case sc.cfg.Passcode != "": - requestMain.Passcode = sc.cfg.Passcode - requestMain.ExtAuthnDuoMethod = "passcode" - } - } - - authRequest := authRequest{ - Data: requestMain, - } - params := &url.Values{} - if sc.cfg.Database != "" { - params.Add("databaseName", sc.cfg.Database) - } - if sc.cfg.Schema != "" { - params.Add("schemaName", sc.cfg.Schema) - } - if sc.cfg.Warehouse != "" { - params.Add("warehouse", sc.cfg.Warehouse) - } - if sc.cfg.Role != "" { - params.Add("roleName", sc.cfg.Role) - } - - jsonBody, err := json.Marshal(authRequest) - if err != nil { - return - } - - glog.V(2).Infof("PARAMS for Auth: %v, %v, %v, %v, %v, %v", - params, sc.rest.Protocol, sc.rest.Host, sc.rest.Port, sc.rest.LoginTimeout, sc.rest.Authenticator) - - respd, err := sc.rest.FuncPostAuth(sc.rest, params, headers, jsonBody, sc.rest.LoginTimeout) - if err != nil { - return nil, err - } - if !respd.Success { - glog.V(1).Infoln("Authentication FAILED") - glog.Flush() - sc.rest.Token = "" - sc.rest.MasterToken = "" - sc.rest.SessionID = -1 - code, err := strconv.Atoi(respd.Code) - if err != nil { - code = -1 - return nil, err - } - return nil, &SnowflakeError{ - Number: code, - SQLState: SQLStateConnectionRejected, - Message: respd.Message, - } - } - glog.V(2).Info("Authentication SUCCESS") - sc.rest.Token = respd.Data.Token - sc.rest.MasterToken = respd.Data.MasterToken - sc.rest.SessionID = respd.Data.SessionID - return &respd.Data, nil -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/authexternalbrowser.go b/vendor/github.com/snowflakedb/gosnowflake/authexternalbrowser.go deleted file mode 100644 index 37b9385..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/authexternalbrowser.go +++ /dev/null @@ -1,252 +0,0 @@ -package gosnowflake - -// -// Copyright (c) 2018 Snowflake Computing Inc. All right reserved. -// - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "strconv" - "strings" - - "github.com/pkg/browser" -) - -const ( - successHTML = ` -SAML Response for Snowflake - -Your identity was confirmed and propagated to Snowflake %v. -You can close this window now and go back where you started from. -` -) - -const ( - bufSize = 8192 -) - -// Builds a response to show to the user after successfully -// getting a response from Snowflake. -func buildResponse(application string) bytes.Buffer { - body := fmt.Sprintf(successHTML, application) - t := &http.Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Body: ioutil.NopCloser(bytes.NewBufferString(body)), - ContentLength: int64(len(body)), - Request: nil, - Header: make(http.Header), - } - var b bytes.Buffer - t.Write(&b) - return b -} - -// This opens a socket that listens on all available unicast -// and any anycast IP addresses locally. By specifying "0", we are -// able to bind to a free port. -func bindToPort() (net.Listener, error) { - l, err := net.Listen("tcp", "localhost:0") - if err != nil { - glog.V(1).Infof("unable to bind to a port on localhost, err: %v", err) - return nil, err - } - return l, nil -} - -// Opens a browser window (or new tab) with the configured IDP Url. -// This can / will fail if running inside a shell with no display, ie -// ssh'ing into a box attempting to authenticate via external browser. -func openBrowser(idpURL string) error { - err := browser.OpenURL(idpURL) - if err != nil { - glog.V(1).Infof("failed to open a browser. err: %v", err) - return err - } - return nil -} - -// Gets the IDP Url and Proof Key from Snowflake. -// Note: FuncPostAuthSaml will return a fully qualified error if -// there is something wrong getting data from Snowflake. -func getIdpURLProofKey( - sr *snowflakeRestful, - authenticator string, - application string, - account string, - callbackPort int) (string, string, error) { - - headers := make(map[string]string) - headers["Content-Type"] = headerContentTypeApplicationJSON - headers["accept"] = headerContentTypeApplicationJSON - headers["User-Agent"] = userAgent - - clientEnvironment := authRequestClientEnvironment{ - Application: application, - Os: operatingSystem, - OsVersion: platform, - } - - requestMain := authRequestData{ - ClientAppID: clientType, - ClientAppVersion: SnowflakeGoDriverVersion, - AccountName: account, - ClientEnvironment: clientEnvironment, - Authenticator: authenticator, - BrowserModeRedirectPort: strconv.Itoa(callbackPort), - } - - authRequest := authRequest{ - Data: requestMain, - } - - jsonBody, err := json.Marshal(authRequest) - if err != nil { - glog.V(1).Infof("failed to serialize json. err: %v", err) - return "", "", err - } - - respd, err := sr.FuncPostAuthSAML(sr, headers, jsonBody, sr.LoginTimeout) - if err != nil { - return "", "", err - } - return respd.Data.SSOURL, respd.Data.ProofKey, nil -} - -// The response returned from Snowflake looks like so: -// GET /?token=encodedSamlToken -// Host: localhost:54001 -// Connection: keep-alive -// Upgrade-Insecure-Requests: 1 -// User-Agent: userAgentStr -// Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8 -// Referer: https://myaccount.snowflakecomputing.com/fed/login -// Accept-Encoding: gzip, deflate, br -// Accept-Language: en-US,en;q=0.9 -// This extracts the token portion of the response. -func getTokenFromResponse(response string) (string, error) { - start := "GET /?token=" - arr := strings.Split(response, "\r\n") - if !strings.HasPrefix(arr[0], start) { - glog.V(1).Infof("response is malformed. resp: %s", arr[0]) - return "", &SnowflakeError{ - Number: ErrFailedToParseResponse, - SQLState: SQLStateConnectionRejected, - Message: errMsgFailedToParseResponse, - MessageArgs: []interface{}{response}, - } - } - token := strings.TrimLeft(arr[0], start) - token = strings.Split(token, " ")[0] - return token, nil -} - -// Authentication by an external browser takes place via the following: -// - the golang snowflake driver communicates to Snowflake that the user wishes to -// authenticate via external browser -// - snowflake sends back the IDP Url configured at the Snowflake side for the -// provided account -// - the default browser is opened to that URL -// - user authenticates at the IDP, and is redirected to Snowflake -// - Snowflake directs the user back to the driver -// - authenticate is complete! -func authenticateByExternalBrowser( - sr *snowflakeRestful, - authenticator string, - application string, - account string, - user string, - password string, -) ([]byte, []byte, error) { - l, err := bindToPort() - if err != nil { - return nil, nil, err - } - defer l.Close() - - callbackPort := l.Addr().(*net.TCPAddr).Port - idpURL, proofKey, err := getIdpURLProofKey( - sr, authenticator, application, account, callbackPort) - if err != nil { - return nil, nil, err - } - - if err = openBrowser(idpURL); err != nil { - return nil, nil, err - } - - var encodedSamlResponse string - var acceptErr error - var tokenErr error - acceptErr = nil - for { - conn, err := l.Accept() - if err != nil { - glog.V(1).Infof("unable to accept connection. err: %v", err) - log.Fatal(err) - } - go func(c net.Conn) { - var buf bytes.Buffer - total := 0 - for { - b := make([]byte, bufSize) - n, err := c.Read(b) - if err != nil { - if err != io.EOF { - glog.V(1).Infof("error reading from socket. err: %v", err) - acceptErr = err - } - break - } - total += n - buf.Write(b) - if n < bufSize { - // We successfully read all data - s := string(buf.Bytes()[:total]) - encodedSamlResponse, tokenErr = getTokenFromResponse(s) - break - } - buf.Grow(bufSize) - } - if encodedSamlResponse != "" { - httpResponse := buildResponse(application) - c.Write(httpResponse.Bytes()) - } - c.Close() - }(conn) - if acceptErr != nil || encodedSamlResponse != "" { - break - } - } - - if tokenErr != nil { - return nil, nil, tokenErr - } - - if acceptErr != nil { - return nil, nil, &SnowflakeError{ - Number: ErrFailedToGetExternalBrowserResponse, - SQLState: SQLStateConnectionRejected, - Message: errMsgFailedToGetExternalBrowserResponse, - MessageArgs: []interface{}{acceptErr}, - } - } - - escapedSamlResponse, err := url.QueryUnescape(encodedSamlResponse) - if err != nil { - glog.V(1).Infof("unable to unescape saml response. err: %v", err) - return nil, nil, err - } - return []byte(escapedSamlResponse), []byte(proofKey), nil -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/authokta.go b/vendor/github.com/snowflakedb/gosnowflake/authokta.go deleted file mode 100644 index 9b13eae..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/authokta.go +++ /dev/null @@ -1,346 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "html" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "time" - - "github.com/google/uuid" -) - -type authOKTARequest struct { - Username string `json:"username"` - Password string `json:"password"` -} - -type authOKTAResponse struct { - CookieToken string `json:"cookieToken"` -} - -/* -authenticateBySAML authenticates a user by SAML -SAML Authentication -1. query GS to obtain IDP token and SSO url -2. IMPORTANT Client side validation: - validate both token url and sso url contains same prefix - (protocol + host + port) as the given authenticator url. - Explanation: - This provides a way for the user to 'authenticate' the IDP it is - sending his/her credentials to. Without such a check, the user could - be coerced to provide credentials to an IDP impersonator. -3. query IDP token url to authenticate and retrieve access token -4. given access token, query IDP URL snowflake app to get SAML response -5. IMPORTANT Client side validation: - validate the post back url come back with the SAML response - contains the same prefix as the Snowflake's server url, which is the - intended destination url to Snowflake. -Explanation: - This emulates the behavior of IDP initiated login flow in the user - browser where the IDP instructs the browser to POST the SAML - assertion to the specific SP endpoint. This is critical in - preventing a SAML assertion issued to one SP from being sent to - another SP. -*/ -func authenticateBySAML( - sr *snowflakeRestful, - authenticator string, - application string, - account string, - user string, - password string, -) (samlResponse []byte, err error) { - glog.V(2).Info("step 1: query GS to obtain IDP token and SSO url") - headers := make(map[string]string) - headers["Content-Type"] = headerContentTypeApplicationJSON - headers["accept"] = headerContentTypeApplicationJSON - headers["User-Agent"] = userAgent - - clientEnvironment := authRequestClientEnvironment{ - Application: application, - Os: operatingSystem, - OsVersion: platform, - } - requestMain := authRequestData{ - ClientAppID: clientType, - ClientAppVersion: SnowflakeGoDriverVersion, - AccountName: account, - ClientEnvironment: clientEnvironment, - Authenticator: authenticator, - } - authRequest := authRequest{ - Data: requestMain, - } - params := &url.Values{} - jsonBody, err := json.Marshal(authRequest) - if err != nil { - return nil, err - } - glog.V(2).Infof("PARAMS for Auth: %v, %v", params, sr) - respd, err := sr.FuncPostAuthSAML(sr, headers, jsonBody, sr.LoginTimeout) - if err != nil { - return nil, err - } - if !respd.Success { - glog.V(1).Infoln("Authentication FAILED") - glog.Flush() - sr.Token = "" - sr.MasterToken = "" - sr.SessionID = -1 - code, err := strconv.Atoi(respd.Code) - if err != nil { - code = -1 - return nil, err - } - return nil, &SnowflakeError{ - Number: code, - SQLState: SQLStateConnectionRejected, - Message: respd.Message, - } - } - glog.V(2).Info("step 2: validate Token and SSO URL has the same prefix as authenticator") - var b1, b2 bool - if b1, err = isPrefixEqual(authenticator, respd.Data.TokenURL); err != nil { - return nil, err - } - if b2, err = isPrefixEqual(authenticator, respd.Data.SSOURL); err != nil { - return nil, err - } - if !b1 || !b2 { - return nil, &SnowflakeError{ - Number: ErrCodeIdpConnectionError, - SQLState: SQLStateConnectionRejected, - Message: errMsgIdpConnectionError, - MessageArgs: []interface{}{authenticator, respd.Data.TokenURL, respd.Data.SSOURL}, - } - } - glog.V(2).Info("step 3: query IDP token url to authenticate and retrieve access token") - jsonBody, err = json.Marshal(authOKTARequest{ - Username: user, - Password: password, - }) - if err != nil { - return nil, err - } - respa, err := sr.FuncPostAuthOKTA(sr, headers, jsonBody, respd.Data.TokenURL, sr.LoginTimeout) - if err != nil { - return nil, err - } - - glog.V(2).Info("step 4: query IDP URL snowflake app to get SAML response") - params = &url.Values{} - params.Add("RelayState", "/some/deep/link") - params.Add("onetimetoken", respa.CookieToken) - - headers = make(map[string]string) - headers["accept"] = "*/*" - bd, err := sr.FuncGetSSO(sr, params, headers, respd.Data.SSOURL, sr.LoginTimeout) - if err != nil { - return nil, err - } - glog.V(2).Info("step 5: validate post_back_url matches Snowflake URL") - tgtURL, err := postBackURL(bd) - if err != nil { - return nil, err - } - fullURL := fmt.Sprintf("%s://%s:%d", sr.Protocol, sr.Host, sr.Port) - glog.V(2).Infof("tgtURL: %v, origURL: %v", tgtURL, fullURL) - if b2, err = isPrefixEqual(tgtURL, fullURL); err != nil { - return nil, err - } - if !b2 { - return nil, &SnowflakeError{ - Number: ErrCodeSSOURLNotMatch, - SQLState: SQLStateConnectionRejected, - Message: errMsgSSOURLNotMatch, - MessageArgs: []interface{}{tgtURL, fullURL}, - } - } - return bd, nil -} - -func postBackURL(htmlData []byte) (urlp string, err error) { - idx0 := bytes.Index(htmlData, []byte(" 0 { - req.Bindings = make(map[string]execBindParameter, len(parameters)) - for i, n := 0, len(parameters); i < n; i++ { - t := goTypeToSnowflake(parameters[i].Value, tsmode) - glog.V(2).Infof("tmode: %v\n", t) - if t == "CHANGE_TYPE" { - tsmode, err = dataTypeMode(parameters[i].Value) - if err != nil { - return nil, err - } - } else { - v1, err := valueToString(parameters[i].Value, tsmode) - if err != nil { - return nil, err - } - req.Bindings[strconv.Itoa(idx)] = execBindParameter{ - Type: t, - Value: v1, - } - idx++ - } - } - } - glog.V(2).Infof("bindings: %v", req.Bindings) - - headers := make(map[string]string) - headers["Content-Type"] = headerContentTypeApplicationJSON - headers["accept"] = headerAcceptTypeApplicationSnowflake // TODO v1.1: change to JSON in case of PUT/GET - headers["User-Agent"] = userAgent - - jsonBody, err := json.Marshal(req) - if err != nil { - return nil, err - } - - var data *execResponse - data, err = sc.rest.FuncPostQuery(ctx, sc.rest, &url.Values{}, headers, jsonBody, sc.rest.RequestTimeout) - if err != nil { - return nil, err - } - var code int - if data.Code != "" { - code, err = strconv.Atoi(data.Code) - if err != nil { - code = -1 - return nil, err - } - } else { - code = -1 - } - glog.V(2).Infof("Success: %v, Code: %v", data.Success, code) - if !data.Success { - return nil, &SnowflakeError{ - Number: code, - SQLState: data.Data.SQLState, - Message: data.Message, - QueryID: data.Data.QueryID, - } - } - glog.V(2).Info("Exec/Query SUCCESS") - sc.cfg.Database = data.Data.FinalDatabaseName - sc.cfg.Schema = data.Data.FinalSchemaName - sc.cfg.Role = data.Data.FinalRoleName - sc.cfg.Warehouse = data.Data.FinalWarehouseName - sc.QueryID = data.Data.QueryID - sc.SQLState = data.Data.SQLState - sc.populateSessionParameters(data.Data.Parameters) - return data, err -} - -func (sc *snowflakeConn) Begin() (driver.Tx, error) { - return sc.BeginTx(context.TODO(), driver.TxOptions{}) -} - -func (sc *snowflakeConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { - glog.V(2).Info("BeginTx") - if opts.ReadOnly { - return nil, &SnowflakeError{ - Number: ErrNoReadOnlyTransaction, - SQLState: SQLStateFeatureNotSupported, - Message: errMsgNoReadOnlyTransaction, - } - } - if int(opts.Isolation) != int(sql.LevelDefault) { - return nil, &SnowflakeError{ - Number: ErrNoDefaultTransactionIsolationLevel, - SQLState: SQLStateFeatureNotSupported, - Message: errMsgNoDefaultTransactionIsolationLevel, - } - } - if sc.rest == nil { - return nil, driver.ErrBadConn - } - _, err := sc.exec(ctx, "BEGIN", false, false, nil) - if err != nil { - return nil, err - } - return &snowflakeTx{sc}, err -} - -func (sc *snowflakeConn) cleanup() { - glog.Flush() // must flush log buffer while the process is running. - sc.rest = nil - sc.cfg = nil -} - -func (sc *snowflakeConn) Close() (err error) { - glog.V(2).Infoln("Close") - sc.stopHeartBeat() - - // ensure transaction is rollbacked - _, err = sc.exec(context.Background(), "ROLLBACK", false, false, nil) - if err != nil { - glog.V(2).Info(err) - } - err = sc.rest.FuncCloseSession(sc.rest) - if err != nil { - glog.V(2).Info(err) - } - sc.cleanup() - return nil -} -func (sc *snowflakeConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { - glog.V(2).Infoln("Prepare") - if sc.rest == nil { - return nil, driver.ErrBadConn - } - stmt := &snowflakeStmt{ - sc: sc, - query: query, - } - return stmt, nil -} - -func (sc *snowflakeConn) Prepare(query string) (driver.Stmt, error) { - return sc.PrepareContext(context.TODO(), query) -} - -func (sc *snowflakeConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { - glog.V(2).Infof("Exec: %#v, %v", query, args) - if sc.rest == nil { - return nil, driver.ErrBadConn - } - // TODO: handle noResult and isInternal - data, err := sc.exec(ctx, query, false, false, args) - if err != nil { - return nil, err - } - var updatedRows int64 - if sc.isDml(data.Data.StatementTypeID) { - // collects all values from the returned row sets - updatedRows = 0 - for i, n := 0, len(data.Data.RowType); i < n; i++ { - v, err := strconv.ParseInt(*data.Data.RowSet[0][i], 10, 64) - if err != nil { - return nil, err - } - updatedRows += v - } - glog.V(2).Infof("number of updated rows: %#v", updatedRows) - return &snowflakeResult{ - affectedRows: updatedRows, - insertID: -1}, nil // last insert id is not supported by Snowflake - } - glog.V(2).Info("DDL") - return driver.ResultNoRows, nil -} - -func (sc *snowflakeConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { - glog.V(2).Infoln("Query") - if sc.rest == nil { - return nil, driver.ErrBadConn - } - // TODO: handle noResult and isInternal - data, err := sc.exec(ctx, query, false, false, args) - if err != nil { - glog.V(2).Infof("error: %v", err) - return nil, err - } - - rows := new(snowflakeRows) - rows.sc = sc - rows.RowType = data.Data.RowType - rows.ChunkDownloader = &snowflakeChunkDownloader{ - sc: sc, - ctx: ctx, - CurrentChunk: data.Data.RowSet, - ChunkMetas: data.Data.Chunks, - Total: int64(data.Data.Total), - TotalRowIndex: int64(-1), - Qrmk: data.Data.Qrmk, - ChunkHeader: data.Data.ChunkHeaders, - FuncDownload: downloadChunk, - FuncDownloadHelper: downloadChunkHelper, - FuncGet: getChunk, - } - rows.ChunkDownloader.start() - return rows, err -} - -func (sc *snowflakeConn) Exec( - query string, - args []driver.Value) ( - driver.Result, error) { - return sc.ExecContext(context.TODO(), query, toNamedValues(args)) -} - -func (sc *snowflakeConn) Query( - query string, - args []driver.Value) ( - driver.Rows, error) { - return sc.QueryContext(context.TODO(), query, toNamedValues(args)) -} - -func (sc *snowflakeConn) Ping(ctx context.Context) error { - glog.V(2).Infoln("Ping") - if sc.rest == nil { - return driver.ErrBadConn - } - // TODO: handle noResult and isInternal - _, err := sc.exec(ctx, "SELECT 1", false, false, []driver.NamedValue{}) - return err -} - -func (sc *snowflakeConn) populateSessionParameters(parameters []nameValueParameter) { - // other session parameters (not all) - glog.V(2).Infof("params: %#v", parameters) - for _, param := range parameters { - v := "" - switch param.Value.(type) { - case int64: - if vv, ok := param.Value.(int64); ok { - v = strconv.FormatInt(vv, 10) - } - case float64: - if vv, ok := param.Value.(float64); ok { - v = strconv.FormatFloat(vv, 'g', -1, 64) - } - case bool: - if vv, ok := param.Value.(bool); ok { - v = strconv.FormatBool(vv) - } - default: - if vv, ok := param.Value.(string); ok { - v = vv - } - } - glog.V(3).Infof("parameter. name: %v, value: %v", param.Name, v) - sc.cfg.Params[strings.ToLower(param.Name)] = &v - } -} - -func (sc *snowflakeConn) isClientSessionKeepAliveEnabled() bool { - v, ok := sc.cfg.Params[sessionClientSessionKeepAlive] - if !ok { - return false - } - return strings.Compare(*v, "true") == 0 -} - -func (sc *snowflakeConn) startHeartBeat() { - if !sc.isClientSessionKeepAliveEnabled() { - return - } - sc.rest.HeartBeat = &heartbeat{ - restful: sc.rest, - } - sc.rest.HeartBeat.start() -} - -func (sc *snowflakeConn) stopHeartBeat() { - if !sc.isClientSessionKeepAliveEnabled() { - return - } - sc.rest.HeartBeat.stop() -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/converter.go b/vendor/github.com/snowflakedb/gosnowflake/converter.go deleted file mode 100644 index 4a88dee..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/converter.go +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -import ( - "database/sql/driver" - "encoding/hex" - "fmt" - "reflect" - "strconv" - "strings" - "time" -) - -// goTypeToSnowflake translates Go data type to Snowflake data type. -func goTypeToSnowflake(v driver.Value, tsmode string) string { - switch v.(type) { - case int64: - return "FIXED" - case float64: - return "REAL" - case bool: - return "BOOLEAN" - case string: - return "TEXT" - case []byte: - if tsmode == "BINARY" { - return "BINARY" // may be redundant but ensures BINARY type - } - if bd, ok := v.([]byte); ok { - if bd == nil || len(bd) != 1 { - return "TEXT" // invalid byte array. won't take as BINARY - } - _, err := dataTypeMode(v) - if err != nil { - return "TEXT" // not supported dataType - } - return "CHANGE_TYPE" - } - case time.Time: - return tsmode - } - return "TEXT" -} - -// snowflakeTypeToGo translates Snowflake data type to Go data type. -func snowflakeTypeToGo(dbtype string, scale int64) reflect.Type { - switch dbtype { - case "fixed": - if scale == 0 { - return reflect.TypeOf(int64(0)) - } - return reflect.TypeOf(float64(0)) - case "real": - return reflect.TypeOf(float64(0)) - case "text", "variant", "object", "array": - return reflect.TypeOf("") - case "date", "time", "timestamp_ltz", "timestamp_ntz", "timestamp_tz": - return reflect.TypeOf(time.Now()) - case "binary": - return reflect.TypeOf([]byte{}) - case "boolean": - return reflect.TypeOf(true) - } - glog.V(1).Infof("unsupported dbtype is specified. %v", dbtype) - glog.Flush() - return reflect.TypeOf("") -} - -// valueToString converts arbitrary golang type to a string. This is mainly used in binding data with placeholders -// in queries. -func valueToString(v driver.Value, tsmode string) (*string, error) { - glog.V(2).Infof("TYPE: %v, %v", reflect.TypeOf(v), reflect.ValueOf(v)) - if v == nil { - return nil, nil - } - v1 := reflect.ValueOf(v) - switch v1.Kind() { - case reflect.Bool: - s := strconv.FormatBool(v1.Bool()) - return &s, nil - case reflect.Int64: - s := strconv.FormatInt(v1.Int(), 10) - return &s, nil - case reflect.Float64: - s := strconv.FormatFloat(v1.Float(), 'g', -1, 32) - return &s, nil - case reflect.String: - s := v1.String() - return &s, nil - case reflect.Slice, reflect.Map: - if v1.IsNil() { - return nil, nil - } - if bd, ok := v.([]byte); ok { - if tsmode == "BINARY" { - s := hex.EncodeToString(bd) - return &s, nil - } - } - // TODO: is this good enough? - s := v1.String() - return &s, nil - case reflect.Struct: - if tm, ok := v.(time.Time); ok { - switch tsmode { - case "DATE": - _, offset := tm.Zone() - tm = tm.Add(time.Second * time.Duration(offset)) - s := fmt.Sprintf("%d", tm.Unix()*1000) - return &s, nil - case "TIME": - s := fmt.Sprintf("%d", - (tm.Hour()*3600+tm.Minute()*60+tm.Second())*1e9+tm.Nanosecond()) - return &s, nil - case "TIMESTAMP_NTZ": - s := fmt.Sprintf("%d", tm.UnixNano()) - return &s, nil - case "TIMESTAMP_LTZ": - _, offset := tm.Zone() - tm = tm.Add(time.Second * time.Duration(offset)) - s := fmt.Sprintf("%d", tm.UnixNano()) - return &s, nil - case "TIMESTAMP_TZ": - _, offset := tm.Zone() - s := fmt.Sprintf("%v %v", tm.UnixNano(), offset/60+1440) - return &s, nil - } - } - } - return nil, fmt.Errorf("unsupported type: %v", v1.Kind()) -} - -// extractTimestamp extracts the internal timestamp data to epoch time in seconds and milliseconds -func extractTimestamp(srcValue *string) (sec int64, nsec int64, err error) { - glog.V(2).Infof("SRC: %v", srcValue) - var i int - for i = 0; i < len(*srcValue); i++ { - if (*srcValue)[i] == '.' { - sec, err = strconv.ParseInt((*srcValue)[0:i], 10, 64) - if err != nil { - return 0, 0, err - } - break - } - } - if i == len(*srcValue) { - // no fraction - sec, err = strconv.ParseInt(*srcValue, 10, 64) - if err != nil { - return 0, 0, err - } - nsec = 0 - } else { - s := (*srcValue)[i+1:] - nsec, err = strconv.ParseInt(s+strings.Repeat("0", 9-len(s)), 10, 64) - if err != nil { - return 0, 0, err - } - } - glog.V(2).Infof("sec: %v, nsec: %v", sec, nsec) - return sec, nsec, nil -} - -// stringToValue converts a pointer of string data to an arbitrary golang variable. This is mainly used in fetching -// data. -func stringToValue(dest *driver.Value, srcColumnMeta execResponseRowType, srcValue *string) error { - if srcValue == nil { - glog.V(3).Infof("snowflake data type: %v, raw value: nil", srcColumnMeta.Type) - *dest = nil - return nil - } - glog.V(3).Infof("snowflake data type: %v, raw value: %v", srcColumnMeta.Type, *srcValue) - switch srcColumnMeta.Type { - case "text", "fixed", "real", "variant", "object": - *dest = *srcValue - return nil - case "date": - v, err := strconv.ParseInt(*srcValue, 10, 64) - if err != nil { - return err - } - *dest = time.Unix(v*86400, 0).UTC() - return nil - case "time": - sec, nsec, err := extractTimestamp(srcValue) - if err != nil { - return err - } - t0 := time.Time{} - *dest = t0.Add(time.Duration(sec*1e9 + nsec)) - return nil - case "timestamp_ntz": - sec, nsec, err := extractTimestamp(srcValue) - if err != nil { - return err - } - *dest = time.Unix(sec, nsec).UTC() - return nil - case "timestamp_ltz": - sec, nsec, err := extractTimestamp(srcValue) - if err != nil { - return err - } - tt := time.Unix(sec, nsec) - zone, offset := tt.Zone() // get timezone for the given datetime - glog.V(2).Infof("local: %v, %v", zone, offset) - *dest = tt.Add(time.Second * time.Duration(-offset)) - return nil - case "timestamp_tz": - glog.V(2).Infof("tz: %v", *srcValue) - - tm := strings.Split(*srcValue, " ") - if len(tm) != 2 { - return &SnowflakeError{ - Number: ErrInvalidTimestampTz, - SQLState: SQLStateInvalidDataTimeFormat, - Message: fmt.Sprintf("invalid TIMESTAMP_TZ data. The value doesn't consist of two numeric values separated by a space: %v", *srcValue), - } - } - sec, nsec, err := extractTimestamp(&tm[0]) - if err != nil { - return err - } - offset, err := strconv.ParseInt(tm[1], 10, 64) - if err != nil { - return &SnowflakeError{ - Number: ErrInvalidTimestampTz, - SQLState: SQLStateInvalidDataTimeFormat, - Message: fmt.Sprintf("invalid TIMESTAMP_TZ data. The offset value is not integer: %v", tm[1]), - } - } - loc := Location(int(offset) - 1440) - tt := time.Unix(sec, nsec) - *dest = tt.In(loc) - return nil - case "binary": - b, err := hex.DecodeString(*srcValue) - if err != nil { - return &SnowflakeError{ - Number: ErrInvalidBinaryHexForm, - SQLState: SQLStateNumericValueOutOfRange, - Message: err.Error(), - } - } - *dest = b - return nil - } - *dest = *srcValue - return nil -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/datatype.go b/vendor/github.com/snowflakedb/gosnowflake/datatype.go deleted file mode 100644 index 7257d34..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/datatype.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -import ( - "bytes" - "database/sql" - "database/sql/driver" - "fmt" -) - -const ( - fixedType byte = iota - realType - textType - dateType - variantType - timestampLtzType - timestampNtzType - timestampTzType - objectType - arrayType - binaryType - timeType - booleanType -) - -var ( - // DataTypeFixed is a FIXED datatype. - DataTypeFixed = []byte{fixedType} - // DataTypeReal is a REAL datatype. - DataTypeReal = []byte{realType} - // DataTypeText is a TEXT datatype. - DataTypeText = []byte{textType} - // DataTypeDate is a Date datatype. - DataTypeDate = []byte{dateType} - // DataTypeVariant is a TEXT datatype. - DataTypeVariant = []byte{variantType} - // DataTypeTimestampLtz is a TIMESTAMP_LTZ datatype. - DataTypeTimestampLtz = []byte{timestampLtzType} - // DataTypeTimestampNtz is a TIMESTAMP_NTZ datatype. - DataTypeTimestampNtz = []byte{timestampNtzType} - // DataTypeTimestampTz is a TIMESTAMP_TZ datatype. - DataTypeTimestampTz = []byte{timestampTzType} - // DataTypeObject is a OBJECT datatype. - DataTypeObject = []byte{objectType} - // DataTypeArray is a ARRAY datatype. - DataTypeArray = []byte{arrayType} - // DataTypeBinary is a BINARY datatype. - DataTypeBinary = []byte{binaryType} - // DataTypeTime is a TIME datatype. - DataTypeTime = []byte{timeType} - // DataTypeBoolean is a BOOLEAN datatype. - DataTypeBoolean = []byte{booleanType} -) - -// dataTypeMode returns the subsequent data type in a string representation. -func dataTypeMode(v driver.Value) (tsmode string, err error) { - if bd, ok := v.([]byte); ok { - switch { - case bytes.Equal(bd, DataTypeDate): - tsmode = "DATE" - case bytes.Equal(bd, DataTypeTime): - tsmode = "TIME" - case bytes.Equal(bd, DataTypeTimestampLtz): - tsmode = "TIMESTAMP_LTZ" - case bytes.Equal(bd, DataTypeTimestampNtz): - tsmode = "TIMESTAMP_NTZ" - case bytes.Equal(bd, DataTypeTimestampTz): - tsmode = "TIMESTAMP_TZ" - case bytes.Equal(bd, DataTypeBinary): - tsmode = "BINARY" - default: - return "", fmt.Errorf(errMsgInvalidByteArray, v) - } - } else { - return "", fmt.Errorf(errMsgInvalidByteArray, v) - } - return tsmode, nil -} - -// SnowflakeParameter includes the columns output from SHOW PARAMETER command. -type SnowflakeParameter struct { - Key string - Value string - Default string - Level string - Description string - SetByUser string - SetInJob string - SetOn string - SetByThreadID string - SetByThreadName string - SetByClass string - ParameterComment string -} - -func populateSnowflakeParameter(colname string, p *SnowflakeParameter) interface{} { - switch colname { - case "key": - return &p.Key - case "value": - return &p.Value - case "default": - return &p.Default - case "level": - return &p.Level - case "description": - return &p.Description - case "set_by_user": - return &p.SetByUser - case "set_in_job": - return &p.SetInJob - case "set_on": - return &p.SetOn - case "set_by_thread_id": - return &p.SetByThreadID - case "set_by_thread_name": - return &p.SetByThreadName - case "set_by_class": - return &p.SetByClass - case "parameter_comment": - return &p.ParameterComment - default: - panic("unknown type " + colname) - } -} - -// ScanSnowflakeParameter binds SnowflakeParameter variable with an array of column buffer. -func ScanSnowflakeParameter(rows *sql.Rows) (*SnowflakeParameter, error) { - var err error - var columns []string - columns, err = rows.Columns() - if err != nil { - return nil, err - } - colNum := len(columns) - p := SnowflakeParameter{} - cols := make([]interface{}, colNum) - for i := 0; i < colNum; i++ { - cols[i] = populateSnowflakeParameter(columns[i], &p) - } - err = rows.Scan(cols...) - return &p, err -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/doc.go b/vendor/github.com/snowflakedb/gosnowflake/doc.go deleted file mode 100644 index 06a644e..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/doc.go +++ /dev/null @@ -1,255 +0,0 @@ -/* -Package gosnowflake is a pure Go Snowflake driver for the database/sql package. - -Clients can use the database/sql package directly. For example: - - import ( - "database/sql" - - _ "github.com/snowflakedb/gosnowflake" - ) - - func main() { - db, err := sql.Open("snowflake", "user:password@myaccount/mydb") - if err != nil { - log.Fatal(err) - } - defer db.Close() - ... - } - -Connection String - -Use Open to create a database handle with connection parameters: - - db, err := sql.Open("snowflake", "") - -The Go Snowflake Driver supports the following connection syntaxes (or data source name formats): - - * username[:password]@accountname/dbname/schemaname[?param1=value&...¶mN=valueN - * username[:password]@accountname/dbname[?param1=value&...¶mN=valueN - * username[:password]@hostname:port/dbname/schemaname?account=[¶m1=value&...¶mN=valueN] - -where all parameters must be escaped or use `Config` and `DSN` to construct a DSN string. - -The following example opens a database handle with the Snowflake account -myaccount where the username is jsmith, password is mypassword, database is -mydb, schema is testschema, and warehouse is mywh: - - db, err := sql.Open("snowflake", "jsmith:mypassword@myaccount/mydb/testschema?warehouse=mywh") - -Connection Parameters - -The following connection parameters are supported: - - * region : Specifies the Snowflake region. By default, the US West region is used. - US East region, specify us-east-1. - EU (Frankfurt) region, specify eu-central-1. - AU (Australia) region, specify ap-southeast-2. - - * account : Specifies the name of your Snowflake account, where string is the name - assigned to your account by Snowflake. In the URL you received from - Snowflake, your account name is the first segment in the domain (e.g. - abc123 in https://abc123.snowflakecomputing.com). This parameter is - optional if your account is specified after the @ character. - - * database: Specifies the database to use by default in the client session - (can be changed after login). - - * schema: Specifies the database schema to use by default in the client - session (can be changed after login). - - * warehouse: Specifies the virtual warehouse to use by default for queries, - loading, etc. in the client session (can be changed after login). - - * role: Specifies the role to use by default for accessing Snowflake - objects in the client session (can be changed after login). - - * passcode: Specifies the passcode provided by Duo when using MFA for login. - - * passcodeInPassword: false by default. Set to true if the MFA passcode is - embedded in the login password. Appends the MFA passcode to the end of the - password. - - * loginTimeout: Specifies the timeout, in seconds, for login. The default - is 60 seconds. The login request gives up after the timeout length if the - HTTP response is success. - - * authenticator: Specifies the authenticator to use for authenticating user credentials: - - To use the internal Snowflake authenticator, specify snowflake (Default). - - To authenticate through Okta, specify https://.okta.com (URL prefix for Okta). - - To authenticate using your IDP via a browser, specify externalbrowser. - - To authenticate via OAuth, specify oauth and provide an OAuth Access Token (see the token parameter below). - - * application: Identifies your application to Snowflake Support. - - * insecureMode false by default. Set to true to bypass the Online - Certificate Status Protocol (OCSP) certificate revocation check. - IMPORTANT: Change the default value for testing or emergency situations only. - - * token: a token that can be used to authenticate. Should be used in conjunction with the "oauth" authenticator. - - * client_session_keep_alive: Set to true have a heartbeat in the background every hour to keep the connection alive - such that the connection session will never expire. Care should be taken in using this option as it opens up - the access forever as long as the process is alive. - - -All other parameters are taken as session parameters. For example, TIMESTAMP_OUTPUT_FORMAT session parameter can be -set by adding: - - ...&TIMESTAMP_OUTPUT_FORMAT=MM-DD-YYYY... - -Proxy - -The Go Snowflake Driver honors the environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY for the forward proxy setting. - -Logging - -By default, the driver's builtin logger is NOP; no output is generated. This is -intentional for those applications that use the same set of logger parameters -not to conflict with glog, which is incorporated in the driver logging -framework. - -In order to enable debug logging for the driver, add a build tag sfdebug to the -go tool command lines, for example: - - go build -tags=sfdebug - -For tests, run the test command with the tag along with glog parameters. For -example, the following command will generate all acitivty logs in the standard -error. - - go test -tags=sfdebug -v . -vmodule=*=2 -stderrthreshold=INFO - -Likewise, if you build your application with the tag, you may specify the same -set of glog parameters. - - your_go_program -vmodule=*=2 -stderrthreshold=INFO - -To get the logs for a specific module, use the -vmodule option. For example, to -retrieve the driver.go and connection.go module logs: - - your_go_program -vmodule=driver=2,connection=2 -stderrthreshold=INFO - -Note: If your request retrieves no logs, call db.Close() or glog.flush() to flush the glog buffer. - -Note: The logger may be changed in the future for better logging. Currently if -the applications use the same parameters as glog, you cannot collect both -application and driver logs at the same time. - -Canceling Query by CtrlC - -From 0.5.0, a signal handling responsibility has moved to the applications. If you want to cancel a -query/command by Ctrl+C, add a os.Interrupt trap in context to execute methods that can take the context parameter, -e.g., QueryContext, ExecContext. - - // handle interrupt signal - ctx, cancel := context.WithCancel(context.Background()) - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt) - defer func() { - signal.Stop(c) - }() - go func() { - <-c - log.Println("Caught signal, canceling...") - cancel() - }() - ... (connection) - // execute a query - rows, err := db.QueryContext(ctx, query) - ... (Ctrl+C to cancel the query) - -See cmd/selectmany.go for the full example. - -Supported Data Types - -Queries return SQL column type information in the ColumnType type. The -DatabaseTypeName method returns the following strings representing Snowflake -data types: - - String Representation Snowflake Data Type - FIXED NUMBER/INT - REAL REAL - TEXT VARCHAR/STRING - DATE DATE - TIME TIME - TIMESTAMP_LTZ TIMESTAMP_LTZ - TIMESTAMP_NTZ TIMESTAMP_NTZ - TIMESTAMP_TZ TIMESTAMP_TZ - VARIANT VARIANT - OBJECT OBJECT - ARRAY ARRAY - BINARY BINARY - BOOLEAN BOOLEAN - -Binding Time Type - -Go's database/sql package limits Go's data types to the following for binding and fetching: - - int64 - float64 - bool - []byte - string - time.Time - -Fetching data isn't an issue since the database data type is provided along -with the data so the Go Snowflake Driver can translate Snowflake data types to -Go native data types. - -When the client binds data to send to the server, however, the driver cannot -determine the date/timestamp data types to associate with binding parameters. -For example: - - dbt.mustExec("CREATE OR REPLACE TABLE tztest (id int, ntz, timestamp_ntz, ltz timestamp_ltz)") - // ... - stmt, err :=dbt.db.Prepare("INSERT INTO tztest(id,ntz,ltz) VALUES(1, ?, ?)") - // ... - tmValue time.Now() - // ... Is tmValue a TIMESTAMP_NTZ or TIMESTAMP_LTZ? - _, err = stmt.Exec(tmValue, tmValue) - -To resolve this issue, a binding parameter flag is introduced that associates -any subsequent time.Time type to the DATE, TIME, TIMESTAMP_LTZ, TIMESTAMP_NTZ -or BINARY data type. The above example could be rewritten as follows: - - import ( - sf "github.com/snowflakedb/gosnowflake" - ) - dbt.mustExec("CREATE OR REPLACE TABLE tztest (id int, ntz, timestamp_ntz, ltz timestamp_ltz)") - // ... - stmt, err :=dbt.db.Prepare("INSERT INTO tztest(id,ntz,ltz) VALUES(1, ?, ?)") - // ... - tmValue time.Now() - // ... - _, err = stmt.Exec(sf.DataTypeTimestampNtz, tmValue, sf.DataTypeTimestampLtz, tmValue) - -Timestamps with Time Zones - -The driver fetches TIMESTAMP_TZ (timestamp with time zone) data using the -offset-based Location types, which represent a collection of time offsets in -use in a geographical area, such as CET (Central European Time) or UTC -(Coordinated Universal Time). The offset-based Location data is generated and -cached when a Go Snowflake Driver application starts, and if the given offset -is not in the cache, it is generated dynamically. - -Currently, Snowflake doesn't support the name-based Location types, e.g., -America/Los_Angeles. - -For more information about Location types, see the Go documentation for https://golang.org/pkg/time/#Location. - -Binary Data - -Internally, this feature leverages the []byte data type. As a result, BINARY -data cannot be bound without the binding parameter flag. In the following -example, sf is an alias for the gosnowflake package: - - var b = []byte{0x01, 0x02, 0x03} - _, err = stmt.Exec(sf.DataTypeBinary, b) - -Limitations - -Currently, GET and PUT operations are unsupported. -*/ -package gosnowflake diff --git a/vendor/github.com/snowflakedb/gosnowflake/driver.go b/vendor/github.com/snowflakedb/gosnowflake/driver.go deleted file mode 100644 index b8638ef..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/driver.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -import ( - "database/sql" - "database/sql/driver" - "net/http" - "strings" -) - -// SnowflakeDriver is a context of Go Driver -type SnowflakeDriver struct{} - -// Open creates a new connection. -func (d SnowflakeDriver) Open(dsn string) (driver.Conn, error) { - glog.V(2).Info("Open") - var err error - sc := &snowflakeConn{ - SequeceCounter: 0, - } - sc.cfg, err = ParseDSN(dsn) - if err != nil { - sc.cleanup() - return nil, err - } - st := SnowflakeTransport - if sc.cfg.InsecureMode { - // no revocation check with OCSP. Think twice when you want to enable this option. - st = snowflakeInsecureTransport - } - if err != nil { - return nil, err - } - // authenticate - sc.rest = &snowflakeRestful{ - Host: sc.cfg.Host, - Port: sc.cfg.Port, - Protocol: sc.cfg.Protocol, - Client: &http.Client{ - Timeout: defaultLoginTimeout, // each request timeout - Transport: st, - }, - Authenticator: sc.cfg.Authenticator, - LoginTimeout: sc.cfg.LoginTimeout, - RequestTimeout: sc.cfg.RequestTimeout, - FuncPost: postRestful, - FuncGet: getRestful, - FuncPostQuery: postRestfulQuery, - FuncPostQueryHelper: postRestfulQueryHelper, - FuncRenewSession: renewRestfulSession, - FuncPostAuth: postAuth, - FuncCloseSession: closeSession, - FuncCancelQuery: cancelQuery, - FuncPostAuthSAML: postAuthSAML, - FuncPostAuthOKTA: postAuthOKTA, - FuncGetSSO: getSSO, - } - var authData *authResponseMain - var samlResponse []byte - var proofKey []byte - - authenticator := strings.ToUpper(sc.cfg.Authenticator) - glog.V(2).Infof("Authenticating via %v", authenticator) - switch authenticator { - case authenticatorExternalBrowser: - samlResponse, proofKey, err = authenticateByExternalBrowser( - sc.rest, - sc.cfg.Authenticator, - sc.cfg.Application, - sc.cfg.Account, - sc.cfg.User, - sc.cfg.Password) - if err != nil { - sc.cleanup() - return nil, err - } - case authenticatorOAuth: - case authenticatorSnowflake: - // Nothing to do, parameters needed for auth should be already set in sc.cfg - break - default: - // this is actually okta, which is something misleading - samlResponse, err = authenticateBySAML( - sc.rest, - sc.cfg.Authenticator, - sc.cfg.Application, - sc.cfg.Account, - sc.cfg.User, - sc.cfg.Password) - if err != nil { - sc.cleanup() - return nil, err - } - } - authData, err = authenticate( - sc, - samlResponse, - proofKey) - if err != nil { - sc.cleanup() - return nil, err - } - err = d.validateDefaultParameters(authData.SessionInfo.DatabaseName, &sc.cfg.Database) - if err != nil { - return nil, err - } - err = d.validateDefaultParameters(authData.SessionInfo.SchemaName, &sc.cfg.Schema) - if err != nil { - return nil, err - } - err = d.validateDefaultParameters(authData.SessionInfo.WarehouseName, &sc.cfg.Warehouse) - if err != nil { - return nil, err - } - err = d.validateDefaultParameters(authData.SessionInfo.RoleName, &sc.cfg.Role) - if err != nil { - return nil, err - } - sc.populateSessionParameters(authData.Parameters) - sc.startHeartBeat() - return sc, nil -} - -func (d SnowflakeDriver) validateDefaultParameters(sessionValue string, defaultValue *string) error { - if *defaultValue != "" && strings.ToLower(*defaultValue) != strings.ToLower(sessionValue) { - return &SnowflakeError{ - Number: ErrCodeObjectNotExists, - SQLState: SQLStateConnectionFailure, - Message: errMsgObjectNotExists, - MessageArgs: []interface{}{*defaultValue}, - } - } - *defaultValue = sessionValue - return nil -} - -func init() { - sql.Register("snowflake", &SnowflakeDriver{}) -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/dsn.go b/vendor/github.com/snowflakedb/gosnowflake/dsn.go deleted file mode 100644 index f739ad1..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/dsn.go +++ /dev/null @@ -1,452 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -import ( - "fmt" - "net/url" - "strconv" - "strings" - "time" -) - -const ( - defaultLoginTimeout = 60 * time.Second - defaultRequestTimeout = 0 * time.Second - defaultAuthenticator = "snowflake" - defaultDomain = ".snowflakecomputing.com" -) - -// Config is a set of configuration parameters -type Config struct { - Account string // Account name - User string // Username - Password string // Password (requires User) - Database string // Database name - Schema string // Schema - Warehouse string // Warehouse - Role string // Role - Region string // Region - Params map[string]*string // other connection parameters - - Protocol string // http or https (optional) - Host string // hostname (optional) - Port int // port (optional) - - Authenticator string // snowflake, okta URL, oauth or externalbrowser - Passcode string - PasscodeInPassword bool - - LoginTimeout time.Duration // Login timeout - RequestTimeout time.Duration // request timeout - - Application string // application name. - InsecureMode bool // driver doesn't check certificate revocation status - - Token string // Token to use for OAuth / JWT / other forms of token based auth -} - -// DSN constructs a DSN for Snowflake db. -func DSN(cfg *Config) (dsn string, err error) { - hasHost := true - if cfg.Host == "" { - hasHost = false - if cfg.Region == "" { - cfg.Host = cfg.Account + defaultDomain - } else { - cfg.Host = cfg.Account + "." + cfg.Region + defaultDomain - } - } - // in case account includes region - posDot := strings.Index(cfg.Account, ".") - if posDot > 0 { - cfg.Region = cfg.Account[posDot+1:] - cfg.Account = cfg.Account[:posDot] - } - - err = fillMissingConfigParameters(cfg) - if err != nil { - return "", err - } - params := &url.Values{} - if hasHost && cfg.Account != "" { - // account may not be included in a Host string - params.Add("account", cfg.Account) - } - if cfg.Database != "" { - params.Add("database", cfg.Database) - } - if cfg.Schema != "" { - params.Add("schema", cfg.Schema) - } - if cfg.Warehouse != "" { - params.Add("warehouse", cfg.Warehouse) - } - if cfg.Role != "" { - params.Add("role", cfg.Role) - } - if cfg.Region != "" { - params.Add("region", cfg.Region) - } - if cfg.Authenticator != defaultAuthenticator { - params.Add("authenticator", strings.ToLower(cfg.Authenticator)) - } - if cfg.Passcode != "" { - params.Add("passcode", cfg.Passcode) - } - if cfg.PasscodeInPassword { - params.Add("passcodeInPassword", strconv.FormatBool(cfg.PasscodeInPassword)) - } - if cfg.LoginTimeout != defaultLoginTimeout { - params.Add("loginTimeout", strconv.FormatInt(int64(cfg.LoginTimeout/time.Second), 10)) - } - if cfg.RequestTimeout != defaultRequestTimeout { - params.Add("requestTimeout", strconv.FormatInt(int64(cfg.RequestTimeout/time.Second), 10)) - } - if cfg.Application != clientType { - params.Add("application", cfg.Application) - } - if cfg.Protocol != "" && cfg.Protocol != "https" { - params.Add("protocol", cfg.Protocol) - } - if cfg.Token != "" { - params.Add("token", cfg.Token) - } - if cfg.Params != nil { - for k, v := range cfg.Params { - params.Add(k, *v) - } - } - dsn = fmt.Sprintf("%v:%v@%v:%v", url.QueryEscape(cfg.User), url.QueryEscape(cfg.Password), cfg.Host, cfg.Port) - if params.Encode() != "" { - dsn += "?" + params.Encode() - } - return -} - -// ParseDSN parses the DSN string to a Config. -func ParseDSN(dsn string) (cfg *Config, err error) { - // New config with some default values - cfg = &Config{ - Params: make(map[string]*string), - } - - // user[:password]@account/database/schema[?param1=value1¶mN=valueN] - // or - // user[:password]@account/database[?param1=value1¶mN=valueN] - // or - // user[:password]@host:port/database/schema?account=user_account[?param1=value1¶mN=valueN] - // or - // host:port/database/schema?account=user_account[?param1=value1¶mN=valueN] - - foundSlash := false - secondSlash := false - done := false - var i int - posQuestion := len(dsn) - for i = len(dsn) - 1; i >= 0; i-- { - switch { - case dsn[i] == '/': - foundSlash = true - - // left part is empty if i <= 0 - var j int - posSecondSlash := i - if i > 0 { - for j = i - 1; j >= 0; j-- { - switch { - case dsn[j] == '/': - // second slash - secondSlash = true - posSecondSlash = j - case dsn[j] == '@': - // username[:password]@... - cfg.User, cfg.Password = parseUserPassword(j, dsn) - } - if dsn[j] == '@' { - break - } - } - - // account or host:port - err = parseAccountHostPort(cfg, j, posSecondSlash, dsn) - if err != nil { - return nil, err - } - } - // [?param1=value1&...¶mN=valueN] - // Find the first '?' in dsn[i+1:] - err = parseParams(cfg, i, dsn) - if err != nil { - return - } - if secondSlash { - cfg.Database = dsn[posSecondSlash+1 : i] - cfg.Schema = dsn[i+1 : posQuestion] - } else { - cfg.Database = dsn[posSecondSlash+1 : posQuestion] - } - done = true - case dsn[i] == '?': - posQuestion = i - } - if done { - break - } - } - if !foundSlash { - // no db or schema is specified - var j int - for j = len(dsn) - 1; j >= 0; j-- { - switch { - case dsn[j] == '@': - cfg.User, cfg.Password = parseUserPassword(j, dsn) - case dsn[j] == '?': - posQuestion = j - } - if dsn[j] == '@' { - break - } - } - err = parseAccountHostPort(cfg, j, posQuestion, dsn) - if err != nil { - return nil, err - } - err = parseParams(cfg, posQuestion-1, dsn) - if err != nil { - return - } - } - if cfg.Account == "" && strings.HasSuffix(cfg.Host, defaultDomain) { - posDot := strings.Index(cfg.Host, ".") - if posDot > 0 { - cfg.Account = cfg.Host[:posDot] - } - } - - err = fillMissingConfigParameters(cfg) - if err != nil { - return nil, err - } - - // unescape parameters - var s string - s, err = url.QueryUnescape(cfg.User) - if err != nil { - return nil, err - } - cfg.User = s - s, err = url.QueryUnescape(cfg.Password) - if err != nil { - return nil, err - } - cfg.Password = s - s, err = url.QueryUnescape(cfg.Database) - if err != nil { - return nil, err - } - cfg.Database = s - s, err = url.QueryUnescape(cfg.Schema) - if err != nil { - return nil, err - } - cfg.Schema = s - s, err = url.QueryUnescape(cfg.Role) - if err != nil { - return nil, err - } - cfg.Role = s - s, err = url.QueryUnescape(cfg.Warehouse) - if err != nil { - return nil, err - } - cfg.Warehouse = s - return cfg, nil -} - -func fillMissingConfigParameters(cfg *Config) error { - if strings.Trim(cfg.Authenticator, " ") == "" { - cfg.Authenticator = defaultAuthenticator - } - if strings.Trim(cfg.Account, " ") == "" { - return ErrEmptyAccount - } - authenticator := strings.ToUpper(cfg.Authenticator) - - if authenticator != authenticatorOAuth && strings.Trim(cfg.User, " ") == "" { - // oauth does not require a username - return ErrEmptyUsername - } - - if authenticator != authenticatorExternalBrowser && authenticator != authenticatorOAuth && strings.Trim(cfg.Password, " ") == "" { - // no password parameter is required for EXTERNALBROWSER and OAUTH. - return ErrEmptyPassword - } - if strings.Trim(cfg.Protocol, " ") == "" { - cfg.Protocol = "https" - } - if cfg.Port == 0 { - cfg.Port = 443 - } - - cfg.Region = strings.Trim(cfg.Region, " ") - if cfg.Region != "" { - // region is specified but not included in Host - i := strings.Index(cfg.Host, defaultDomain) - if i >= 1 { - hostPrefix := cfg.Host[0:i] - if !strings.HasSuffix(hostPrefix, cfg.Region) { - cfg.Host = hostPrefix + "." + cfg.Region + defaultDomain - } - } - } - if cfg.LoginTimeout == 0 { - cfg.LoginTimeout = defaultLoginTimeout - } - if cfg.RequestTimeout == 0 { - cfg.RequestTimeout = defaultRequestTimeout - } - if strings.Trim(cfg.Application, " ") == "" { - cfg.Application = clientType - } - if strings.HasSuffix(cfg.Host, defaultDomain) && len(cfg.Host) == len(defaultDomain) { - return &SnowflakeError{ - Number: ErrCodeFailedToParseHost, - Message: errMsgFailedToParseHost, - MessageArgs: []interface{}{cfg.Host}, - } - } - return nil -} - -// transformAccountToHost transforms host to accout name -func transformAccountToHost(cfg *Config) (err error) { - if cfg.Port == 0 && !strings.HasSuffix(cfg.Host, defaultDomain) && cfg.Host != "" { - // account name is specified instead of host:port - cfg.Account = cfg.Host - cfg.Host = cfg.Account + defaultDomain - cfg.Port = 443 - posDot := strings.Index(cfg.Account, ".") - if posDot > 0 { - cfg.Region = cfg.Account[posDot+1:] - cfg.Account = cfg.Account[:posDot] - } - } - return nil -} - -// parseAccountHostPort parses the DSN string to attempt to get account or host and port. -func parseAccountHostPort(cfg *Config, posAt, posSlash int, dsn string) (err error) { - // account or host:port - var k int - for k = posAt + 1; k < posSlash; k++ { - if dsn[k] == ':' { - cfg.Port, err = strconv.Atoi(dsn[k+1 : posSlash]) - if err != nil { - err = &SnowflakeError{ - Number: ErrCodeFailedToParsePort, - Message: errMsgFailedToParsePort, - MessageArgs: []interface{}{dsn[k+1 : posSlash]}, - } - return - } - break - } - } - cfg.Host = dsn[posAt+1 : k] - return transformAccountToHost(cfg) -} - -// parseUserPassword parses the DSN string for username and password -func parseUserPassword(posAt int, dsn string) (user, password string) { - var k int - for k = 0; k < posAt; k++ { - if dsn[k] == ':' { - password = dsn[k+1 : posAt] - break - } - } - user = dsn[:k] - return -} - -// parseParams parse parameters -func parseParams(cfg *Config, posQuestion int, dsn string) (err error) { - for j := posQuestion + 1; j < len(dsn); j++ { - if dsn[j] == '?' { - if err = parseDSNParams(cfg, dsn[j+1:]); err != nil { - return - } - break - } - } - return -} - -// parseDSNParams parses the DSN "query string". Values must be url.QueryEscape'ed -func parseDSNParams(cfg *Config, params string) (err error) { - glog.V(2).Infof("Query String: %v\n", params) - for _, v := range strings.Split(params, "&") { - param := strings.SplitN(v, "=", 2) - if len(param) != 2 { - continue - } - var value string - value, err = url.QueryUnescape(param[1]) - if err != nil { - return err - } - switch param[0] { - // Disable INFILE whitelist / enable all files - case "account": - cfg.Account = value - case "warehouse": - cfg.Warehouse = value - case "database": - cfg.Database = value - case "schema": - cfg.Schema = value - case "role": - cfg.Role = value - case "region": - cfg.Region = value - case "protocol": - cfg.Protocol = value - case "passcode": - cfg.Passcode = value - case "passcodeInPassword": - var vv bool - vv, err = strconv.ParseBool(value) - if err != nil { - return - } - cfg.PasscodeInPassword = vv - case "loginTimeout": - var vv int64 - vv, err = strconv.ParseInt(value, 10, 64) - if err != nil { - return - } - cfg.LoginTimeout = time.Duration(vv * int64(time.Second)) - case "application": - cfg.Application = value - case "authenticator": - cfg.Authenticator = strings.ToLower(value) - case "insecureMode": - var vv bool - vv, err = strconv.ParseBool(value) - if err != nil { - return - } - cfg.InsecureMode = vv - case "token": - cfg.Token = value - default: - if cfg.Params == nil { - cfg.Params = make(map[string]*string) - } - cfg.Params[param[0]] = &value - } - } - return -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/errors.go b/vendor/github.com/snowflakedb/gosnowflake/errors.go deleted file mode 100644 index d2d5fe3..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/errors.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -import ( - "fmt" -) - -// SnowflakeError is a error type including various Snowflake specific information. -type SnowflakeError struct { - Number int - SQLState string - QueryID string - Message string - MessageArgs []interface{} - IncludeQueryID bool // TODO: populate this in connection -} - -func (se *SnowflakeError) Error() string { - message := se.Message - if len(se.MessageArgs) > 0 { - message = fmt.Sprintf(se.Message, se.MessageArgs...) - } - if se.SQLState != "" { - if se.IncludeQueryID { - return fmt.Sprintf("%06d (%s): %s: %s", se.Number, se.SQLState, se.QueryID, message) - } - return fmt.Sprintf("%06d (%s): %s", se.Number, se.SQLState, message) - } - if se.IncludeQueryID { - return fmt.Sprintf("%06d: %s: %s", se.Number, se.QueryID, message) - } - return fmt.Sprintf("%06d: %s", se.Number, message) -} - -const ( - /* connection */ - - // ErrCodeEmptyAccountCode is an error code for the case where a DNS doesn't include account parameter - ErrCodeEmptyAccountCode = 260000 - // ErrCodeEmptyUsernameCode is an error code for the case where a DNS doesn't include user parameter - ErrCodeEmptyUsernameCode = 260001 - // ErrCodeEmptyPasswordCode is an error code for the case where a DNS doesn't include password parameter - ErrCodeEmptyPasswordCode = 260002 - // ErrCodeFailedToParseHost is an error code for the case where a DNS includes an invalid host name - ErrCodeFailedToParseHost = 260003 - // ErrCodeFailedToParsePort is an error code for the case where a DNS includes an invalid port number - ErrCodeFailedToParsePort = 260004 - // ErrCodeIdpConnectionError is an error code for the case where a IDP connection failed - ErrCodeIdpConnectionError = 260005 - // ErrCodeSSOURLNotMatch is an error code for the case where a SSO URL doesn't match - ErrCodeSSOURLNotMatch = 260006 - // ErrCodeServiceUnavailable is an error code for the case where service is unavailable. - ErrCodeServiceUnavailable = 260007 - // ErrCodeFailedToConnect is an error code for the case where a DB connection failed due to wrong account name - ErrCodeFailedToConnect = 260008 - // ErrCodeObjectNotExists is an error code for the case where the specified database object doesn't exist - ErrCodeObjectNotExists = 260009 - - /* network */ - - // ErrFailedToPostQuery is an error code for the case where HTTP POST failed. - ErrFailedToPostQuery = 261000 - // ErrFailedToRenewSession is an error code for the case where session renewal failed. - ErrFailedToRenewSession = 261001 - // ErrFailedToCancelQuery is an error code for the case where cancel query failed. - ErrFailedToCancelQuery = 261002 - // ErrFailedToCloseSession is an error code for the case where close session failed. - ErrFailedToCloseSession = 261003 - // ErrFailedToAuth is an error code for the case where authentication failed for unknown reason. - ErrFailedToAuth = 261004 - // ErrFailedToAuthSAML is an error code for the case where authentication via SAML failed for unknown reason. - ErrFailedToAuthSAML = 261005 - // ErrFailedToAuthOKTA is an error code for the case where authentication via OKTA failed for unknown reason. - ErrFailedToAuthOKTA = 261006 - // ErrFailedToGetSSO is an error code for the case where authentication via OKTA failed for unknown reason. - ErrFailedToGetSSO = 261007 - // ErrFailedToParseResponse is an error code for when we cannot parse an external browser response from Snowflake. - ErrFailedToParseResponse = 261008 - // ErrFailedToGetExternalBrowserResponse is an error code for when there's an error reading from the open socket. - ErrFailedToGetExternalBrowserResponse = 261009 - // ErrFailedToHeartbeat is an error code when a heartbeat fails. - ErrFailedToHeartbeat = 261010 - - /* rows */ - - // ErrFailedToGetChunk is an error code for the case where it failed to get chunk of result set - ErrFailedToGetChunk = 262000 - - /* transaction*/ - - // ErrNoReadOnlyTransaction is an error code for the case where readonly mode is specified. - ErrNoReadOnlyTransaction = 263000 - // ErrNoDefaultTransactionIsolationLevel is an error code for the case where non default isolation level is specified. - ErrNoDefaultTransactionIsolationLevel = 263001 - - /* converter */ - - // ErrInvalidTimestampTz is an error code for the case where a returned TIMESTAMP_TZ internal value is invalid - ErrInvalidTimestampTz = 268000 - // ErrInvalidOffsetStr is an error code for the case where a offset string is invalid. The input string must - // consist of sHHMI where one sign character '+'/'-' followed by zero filled hours and minutes - ErrInvalidOffsetStr = 268001 - // ErrInvalidBinaryHexForm is an error code for the case where a binary data in hex form is invalid. - ErrInvalidBinaryHexForm = 268002 -) - -const ( - errMsgFailedToParseHost = "failed to parse a host name. host: %v" - errMsgFailedToParsePort = "failed to parse a port number. port: %v" - errMsgInvalidOffsetStr = "offset must be a string consist of sHHMI where one sign character '+'/'-' followed by zero filled hours and minutes: %v" - errMsgInvalidByteArray = "invalid byte array: %v" - errMsgIdpConnectionError = "failed to verify URLs. authenticator: %v, token URL:%v, SSO URL:%v" - errMsgSSOURLNotMatch = "SSO URL didn't match. expected: %v, got: %v" - errMsgFailedToGetChunk = "failed to get a chunk of result sets. idx: %v" - errMsgFailedToPostQuery = "failed to POST. HTTP: %v, URL: %v" - errMsgFailedToRenew = "failed to renew session. HTTP: %v, URL: %v" - errMsgFailedToCancelQuery = "failed to cancel query. HTTP: %v, URL: %v" - errMsgFailedToCloseSession = "failed to close session. HTTP: %v, URL: %v" - errMsgFailedToAuth = "failed to auth for unknown reason. HTTP: %v, URL: %v" - errMsgFailedToAuthSAML = "failed to auth via SAML for unknown reason. HTTP: %v, URL: %v" - errMsgFailedToAuthOKTA = "failed to auth via OKTA for unknown reason. HTTP: %v, URL: %v" - errMsgFailedToGetSSO = "failed to auth via OKTA for unknown reason. HTTP: %v, URL: %v" - errMsgFailedToParseResponse = "failed to parse a response from Snowflake. Response: %v" - errMsgFailedToGetExternalBrowserResponse = "failed to get an external browser response from Snowflake, err: %s" - errMsgNoReadOnlyTransaction = "no readonly mode is supported" - errMsgNoDefaultTransactionIsolationLevel = "no default isolation transaction level is supported" - errMsgServiceUnavailable = "service is unavailable. check your connectivity. you may need a proxy server. HTTP: %v, URL: %v" - errMsgFailedToConnect = "failed to connect to db. verify account name is correct. HTTP: %v, URL: %v" - errMsgObjectNotExists = "specified object doesn't exists: %v" -) - -var ( - // ErrEmptyAccount is returned if a DNS doesn't include account parameter. - ErrEmptyAccount = &SnowflakeError{ - Number: ErrCodeEmptyAccountCode, - Message: "account is empty", - } - // ErrEmptyUsername is returned if a DNS doesn't include user parameter. - ErrEmptyUsername = &SnowflakeError{ - Number: ErrCodeEmptyUsernameCode, - Message: "user is empty", - } - // ErrEmptyPassword is returned if a DNS doesn't include password parameter. - ErrEmptyPassword = &SnowflakeError{ - Number: ErrCodeEmptyPasswordCode, - Message: "password is empty"} -) diff --git a/vendor/github.com/snowflakedb/gosnowflake/gosnowflake.mak b/vendor/github.com/snowflakedb/gosnowflake/gosnowflake.mak deleted file mode 100644 index 9d3ff6b..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/gosnowflake.mak +++ /dev/null @@ -1,45 +0,0 @@ -## Setup -SHELL := /bin/bash -SRC = $(shell find . -type f -name '*.go' -not -path "./vendor/*") - -setup: - go get -u github.com/golang/dep/cmd/dep - go get github.com/golang/lint/golint - go get github.com/Songmu/make2help/cmd/make2help - [[ $$(go version | awk '{print $3}' | cut -d'.' -f 2) != "8" ]] && go get honnef.co/go/tools/cmd/megacheck || true - -## Install dependencies -deps: setup - dep ensure - -## Update dependencies -update: setup - dep ensure -update - -## Show help -help: - @make2help $(MAKEFILE_LIST) - -# Format source codes (internally used) -cfmt: setup - gofmt -l -w $(SRC) - -# Lint (internally used) -clint: setup - [[ $$(go version | awk '{print $3}' | cut -d'.' -f 2) != "8" ]] && echo "Running megacheck" && megacheck || echo "No megacheck run, because Go1.8 is not supported." - for pkg in $$(go list ./... | grep -v /vendor/); do \ - echo "Verifying $$pkg"; \ - go vet $$pkg; \ - golint -set_exit_status $$pkg || exit $$?; \ - done - -# Install (internally used) -cinstall: - export GOBIN=$$GOPATH/bin; \ - go install -tags=sfdebug $(CMD_TARGET).go - -# Run (internally used) -crun: install - $(CMD_TARGET) - -.PHONY: setup help cfmt clint cinstall crun diff --git a/vendor/github.com/snowflakedb/gosnowflake/heartbeat.go b/vendor/github.com/snowflakedb/gosnowflake/heartbeat.go deleted file mode 100644 index 5a4fa9d..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/heartbeat.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "time" - - "github.com/google/uuid" -) - -const ( - // One hour interval should be good enough to renew tokens for four hours master token validity - heartBeatInterval = 3600 * time.Second -) - -type heartbeat struct { - restful *snowflakeRestful - shutdownChan chan bool -} - -func (hc *heartbeat) run() { - hbTicker := time.NewTicker(heartBeatInterval) - defer hbTicker.Stop() - for { - select { - case <-hbTicker.C: - err := hc.heartbeatMain() - if err != nil { - glog.V(2).Info("failed to heartbeat") - } - case <-hc.shutdownChan: - glog.V(2).Info("stopping heartbeat") - return - } - } -} - -func (hc *heartbeat) start() { - hc.shutdownChan = make(chan bool) - go hc.run() - glog.V(2).Info("heartbeat started") -} - -func (hc *heartbeat) stop() { - hc.shutdownChan <- true - close(hc.shutdownChan) - glog.V(2).Info("heartbeat stopped") -} - -func (hc *heartbeat) heartbeatMain() error { - glog.V(2).Info("Heartbeating!") - params := &url.Values{} - params.Add("requestId", uuid.New().String()) - fullURL := fmt.Sprintf( - "%s://%s:%d%s", hc.restful.Protocol, hc.restful.Host, hc.restful.Port, "/session/heartbeat?"+params.Encode()) - headers := make(map[string]string) - headers["Content-Type"] = headerContentTypeApplicationJSON - headers["accept"] = headerAcceptTypeApplicationSnowflake - headers["User-Agent"] = userAgent - headers[headerAuthorizationKey] = fmt.Sprintf(headerSnowflakeToken, hc.restful.Token) - - resp, err := hc.restful.FuncPost(context.TODO(), hc.restful, fullURL, headers, nil, hc.restful.RequestTimeout, false) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusOK { - glog.V(2).Infof("heartbeatMain: resp: %v", resp) - var respd execResponse - err = json.NewDecoder(resp.Body).Decode(&respd) - if err != nil { - glog.V(1).Infof("failed to decode JSON. err: %v", err) - glog.Flush() - return err - } - if respd.Code == sessionExpiredCode { - err = hc.restful.FuncRenewSession(context.TODO(), hc.restful) - if err != nil { - return err - } - } - return nil - } - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - glog.V(1).Infof("failed to extract HTTP response body. err: %v", err) - return err - } - glog.V(1).Infof("HTTP: %v, URL: %v, Body: %v", resp.StatusCode, fullURL, b) - glog.V(1).Infof("Header: %v", resp.Header) - glog.Flush() - return &SnowflakeError{ - Number: ErrFailedToHeartbeat, - SQLState: SQLStateConnectionFailure, - Message: "Failed to heartbeat.", - } -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/location.go b/vendor/github.com/snowflakedb/gosnowflake/location.go deleted file mode 100644 index cc71e5e..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/location.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -import ( - "fmt" - "strconv" - "sync" - "time" -) - -var timezones map[int]*time.Location -var updateTimezoneMutex *sync.Mutex - -// Location returns an offset (minutes) based Location object for Snowflake database. -func Location(offset int) *time.Location { - updateTimezoneMutex.Lock() - defer updateTimezoneMutex.Unlock() - loc := timezones[offset] - if loc != nil { - return loc - } - loc = genTimezone(offset) - timezones[offset] = loc - return loc -} - -// LocationWithOffsetString returns an offset based Location object. The offset string must consist of sHHMI where one sign -// character '+'/'-' followed by zero filled hours and minutes. -func LocationWithOffsetString(offsets string) (loc *time.Location, err error) { - if len(offsets) != 5 { - return nil, &SnowflakeError{ - Number: ErrInvalidOffsetStr, - SQLState: SQLStateInvalidDataTimeFormat, - Message: errMsgInvalidOffsetStr, - MessageArgs: []interface{}{offsets}, - } - } - if offsets[0] != '-' && offsets[0] != '+' { - return nil, &SnowflakeError{ - Number: ErrInvalidOffsetStr, - SQLState: SQLStateInvalidDataTimeFormat, - Message: errMsgInvalidOffsetStr, - MessageArgs: []interface{}{offsets}, - } - } - s := 1 - if offsets[0] == '-' { - s = -1 - } - var h, m int64 - h, err = strconv.ParseInt(offsets[1:3], 10, 64) - if err != nil { - return - } - m, err = strconv.ParseInt(offsets[3:], 10, 64) - if err != nil { - return - } - offset := s * (int(h)*60 + int(m)) - loc = Location(offset) - return -} - -func genTimezone(offset int) *time.Location { - var offsetSign string - var toffset int - if offset < 0 { - offsetSign = "-" - toffset = -offset - } else { - offsetSign = "+" - toffset = offset - } - glog.V(2).Infof("offset: %v", offset) - return time.FixedZone( - fmt.Sprintf("%v%02d%02d", - offsetSign, toffset/60, toffset%60), int(offset)*60) -} - -func init() { - updateTimezoneMutex = &sync.Mutex{} - timezones = make(map[int]*time.Location, 48) - // pre-generate all common timezones - for i := -720; i <= 720; i += 30 { - glog.V(2).Infof("offset: %v", i) - timezones[i] = genTimezone(i) - } -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/log.go b/vendor/github.com/snowflakedb/gosnowflake/log.go deleted file mode 100644 index 5236a52..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/log.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build !sfdebug - -// Wrapper for glog to replace direct use, so that glog usage remains optional. -// This file contains the no-op glog wrapper/emulator. - -package gosnowflake - -// glogWrapper is an empty struct to create a no-op glog wrapper -type glogWrapper struct{} - -// V emulates the glog.V() call -func (glogWrapper) V(int) glogWrapper { - return glogWrapper{} -} - -// Check if the logging is enabled. Returns always False by default -func (glogWrapper) IsEnabled(int) bool { - return false -} - -// Flush emulates the glog.Flush() call -func (glogWrapper) Flush() {} - -// Info emulates the glog.V(?).Info call -func (glogWrapper) Info(...interface{}) {} - -// Infoln emulates the glog.V(?).Infoln call -func (glogWrapper) Infoln(...interface{}) {} - -// Infof emulates the glog.V(?).Infof call -func (glogWrapper) Infof(...interface{}) {} - -// InfoDepth emulates the glog.V(?).InfoDepth call -func (glogWrapper) InfoDepth(...interface{}) {} - -// NOTE: Warning* and Error* methods are not emulated since they are not used. -// NOTE: Fatal* and Exit* methods are not emulated, since they also require additional calls (like os.Exit() and stack traces) to be compatible. - -// glog is our glog emulator -var glog = glogWrapper{} diff --git a/vendor/github.com/snowflakedb/gosnowflake/log_debug.go b/vendor/github.com/snowflakedb/gosnowflake/log_debug.go deleted file mode 100644 index de10f05..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/log_debug.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build sfdebug - -// Wrapper for glog to replace direct use, so that glog usage remains optional. -// This file contains the actual/operational glog wrapper. - -package gosnowflake - -import logger "github.com/golang/glog" - -// glogWrapper wraps glog's Verbose type, enabling the use of glog.V().* methods directly -type glogWrapper struct { - logger.Verbose -} - -// V provides a wrapper for the glog.V() call -func (l *glogWrapper) V(level int32) glogWrapper { - return glogWrapper{logger.V(logger.Level(level))} -} - -func (l *glogWrapper) IsEnabled(level int32) bool { - return bool(logger.V(logger.Level(level))) -} - -// Flush calls flush on the underlying logger -func (l *glogWrapper) Flush() { - logger.Flush() -} - -// glog is our glog wrapper -var glog = glogWrapper{} diff --git a/vendor/github.com/snowflakedb/gosnowflake/ocsp.go b/vendor/github.com/snowflakedb/gosnowflake/ocsp.go deleted file mode 100644 index a791351..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/ocsp.go +++ /dev/null @@ -1,604 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -import ( - "context" - "crypto" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/base64" - "encoding/json" - "encoding/pem" - "fmt" - "io/ioutil" - "math/big" - "net/http" - "net/url" - "os" - "path/filepath" - "runtime" - "sync" - "time" - - "golang.org/x/crypto/ocsp" -) - -// caRoot includes the CA certificates. -var caRoot map[string]*x509.Certificate - -// certPOol includes the CA certificates. -var certPool *x509.CertPool - -// cacheDir is the location of OCSP response cache file -var cacheDir = "" - -// cacheFileName is the file name of OCSP response cache file -var cacheFileName = "" - -const ( - // retryOCSPTimeout is the total timeout for OCSP checks. - retryOCSPTimeout = 120 * time.Second - retryOCSPHTTPTimeout = 30 * time.Second -) - -const ( - cacheFileBaseName = "ocsp_response_cache" - // cacheExpire specifies cache data expiration time in seconds. - cacheExpire = float64(24 * 60 * 60) -) - -const ( - tolerableValidityRatio = 100 // buffer for certificate revocation update time - maxClockSkew = 900 * time.Second // buffer for clock skew -) - -type ocspStatusCode int - -type ocspStatus struct { - code ocspStatusCode - err error -} - -const ( - ocspSuccess ocspStatusCode = 0 - ocspNoServer ocspStatusCode = -1 - ocspFailedParseOCSPHost ocspStatusCode = -2 - ocspFailedComposeRequest ocspStatusCode = -3 - ocspFailedDecomposeRequst ocspStatusCode = -4 - ocspFailedEncodeCertID ocspStatusCode = -5 - ocspFailedSubmit ocspStatusCode = -6 - ocspFailedResponse ocspStatusCode = -7 - ocspFailedExtractResponse ocspStatusCode = -8 - ocspFailedParseResponse ocspStatusCode = -9 - ocspInvalidValidity ocspStatusCode = -10 - ocspRevokedOrUnknown ocspStatusCode = -11 - ocspMissedCache ocspStatusCode = -12 - ocspCacheExpired ocspStatusCode = -13 - ocspFailedDecodeResponse ocspStatusCode = -14 -) - -var ( - ocspResponseCache map[string][]interface{} - ocspResponseCacheLock *sync.RWMutex -) - -// copied from crypto/ocsp -var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{ - crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}), - crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}), - crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}), - crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}), -} - -// copied from crypto/ocsp.go -type certID struct { - HashAlgorithm pkix.AlgorithmIdentifier - NameHash []byte - IssuerKeyHash []byte - SerialNumber *big.Int -} - -// copied from crypto/ocsp.go -func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier { - for hash, oid := range hashOIDs { - if hash == target { - return oid - } - } - glog.V(0).Infof("no valid OID is found for the hash algorithm. %#v", target) - return nil -} - -// calcTolerableValidity returns the maximum validity buffer -func calcTolerableValidity(thisUpdate, nextUpdate time.Time) time.Duration { - return durationMax(time.Duration(nextUpdate.Sub(thisUpdate)/tolerableValidityRatio), maxClockSkew) -} - -// isInValidityRange checks the validity -func isInValidityRange(currTime, thisUpdate, nextUpdate time.Time) bool { - if currTime.Sub(thisUpdate.Add(-maxClockSkew)) < 0 { - return false - } - if nextUpdate.Add(calcTolerableValidity(thisUpdate, nextUpdate)).Sub(currTime) < 0 { - return false - } - return true -} - -func retryRevocationStatusCheck(totalTimeout *time.Duration, sleepTime time.Duration) (ok bool) { - if *totalTimeout > 0 { - *totalTimeout -= sleepTime - } - if *totalTimeout <= 0 { - return false - } - glog.V(2).Infof("sleeping %v for retryOCSP. to timeout: %v. retrying", sleepTime, *totalTimeout) - time.Sleep(sleepTime) - return true -} - -func encodeCertID(ocspReq []byte) ([]byte, *ocspStatus) { - r, err := ocsp.ParseRequest(ocspReq) - if err != nil { - return nil, &ocspStatus{ - code: ocspFailedDecomposeRequst, - err: err, - } - } - - // encode CertID, used as a key in the cache - hashAlg := getOIDFromHashAlgorithm(r.HashAlgorithm) - encodedCertID, err := asn1.Marshal(certID{ - pkix.AlgorithmIdentifier{ - Algorithm: hashAlg, - Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, - }, - r.IssuerNameHash, - r.IssuerKeyHash, - r.SerialNumber, - }) - if err != nil { - return nil, &ocspStatus{ - code: ocspFailedEncodeCertID, - err: err, - } - } - return encodedCertID, &ocspStatus{ - code: ocspSuccess, - } -} - -func checkOCSPResponseCache(encodedCertID []byte, subject, issuer *x509.Certificate) *ocspStatus { - encodedCertIDBase64 := base64.StdEncoding.EncodeToString(encodedCertID) - ocspResponseCacheLock.Lock() - gotValueFromCache := ocspResponseCache[encodedCertIDBase64] - ocspResponseCacheLock.Unlock() - if len(gotValueFromCache) != 2 { - return &ocspStatus{ - code: ocspMissedCache, - err: fmt.Errorf("miss cache data. CertID: %v", encodedCertIDBase64), - } - } - glog.V(2).Infof("hit cache: %v, time: %v, value: %v", encodedCertIDBase64, gotValueFromCache[0], gotValueFromCache[1]) - currentTime := float64(time.Now().UTC().Unix()) - if epoch, ok := gotValueFromCache[0].(float64); ok { - if currentTime-epoch >= cacheExpire { - ocspResponseCacheLock.Lock() - delete(ocspResponseCache, encodedCertIDBase64) - ocspResponseCacheLock.Unlock() - return &ocspStatus{ - code: ocspCacheExpired, - err: fmt.Errorf("cache expired. current: %v, cache: %v, CertID: %v", - time.Unix(int64(currentTime), 0).UTC(), time.Unix(int64(epoch), 0).UTC(), encodedCertIDBase64), - } - } - if s, ok := gotValueFromCache[1].(string); ok { - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - ocspResponseCacheLock.Lock() - delete(ocspResponseCache, encodedCertIDBase64) - ocspResponseCacheLock.Unlock() - return &ocspStatus{ - code: ocspFailedDecodeResponse, - err: fmt.Errorf("failed to decode OCSP Response value in a cache. CertID: %v", encodedCertIDBase64), - } - } - ocspRes, err := ocsp.ParseResponse(b, issuer) - if err != nil { - ocspResponseCacheLock.Lock() - delete(ocspResponseCache, encodedCertIDBase64) - ocspResponseCacheLock.Unlock() - return &ocspStatus{ - code: ocspFailedParseResponse, - err: fmt.Errorf("failed to parse OCSP Respose. CertID: %v", encodedCertIDBase64), - } - } - glog.V(2).Info("using cached OCSP Response") - return validateOCSP(encodedCertIDBase64, ocspRes, subject) - } - } - ocspResponseCacheLock.Lock() - delete(ocspResponseCache, encodedCertIDBase64) // delete invalid cache entry - ocspResponseCacheLock.Unlock() - return &ocspStatus{ - code: ocspMissedCache, - err: fmt.Errorf("missed cache. CertID: %v", encodedCertIDBase64), - } -} - -func validateOCSP(encodedCertIDBase64 string, ocspRes *ocsp.Response, subject *x509.Certificate) *ocspStatus { - ocspResponseCacheLock.Lock() - defer ocspResponseCacheLock.Unlock() - curTime := time.Now() - if !isInValidityRange(curTime, ocspRes.ThisUpdate, ocspRes.NextUpdate) { - delete(ocspResponseCache, encodedCertIDBase64) - return &ocspStatus{ - code: ocspInvalidValidity, - err: fmt.Errorf("invalid validity: producedAt: %v, thisUpdate: %v, nextUpdate: %v", ocspRes.ProducedAt, ocspRes.ThisUpdate, ocspRes.NextUpdate), - } - } - if ocspRes.Status != ocsp.Good { - delete(ocspResponseCache, encodedCertIDBase64) - return &ocspStatus{ - code: ocspRevokedOrUnknown, - err: fmt.Errorf("bad revocation status. %v: %v, cert: %v", ocspRes.Status, ocspRes.RevocationReason, subject.Subject), - } - } - return &ocspStatus{ - code: ocspSuccess, - err: nil, - } -} - -// retryOCSP is the second level of retry method if the returned contents are corrupted. It often happens with OCSP -// serer and retry helps. -func retryOCSP( - client clientInterface, - req requestFunc, - ocspHost string, - headers map[string]string, - reqBody []byte, - issuer *x509.Certificate, - totalTimeout time.Duration, - httpTimeout time.Duration) ( - ocspRes *ocsp.Response, - ocspResBytes []byte, - ocspS *ocspStatus) { - retryCounter := 0 - sleepTime := time.Duration(0) - for { - sleepTime = defaultWaitAlgo.decorr(retryCounter, sleepTime) - res, err := retryHTTP(context.TODO(), client, req, "POST", ocspHost, headers, reqBody, httpTimeout, false) - if err != nil { - if ok := retryRevocationStatusCheck(&totalTimeout, sleepTime); ok { - retryCounter++ - continue - } - return ocspRes, ocspResBytes, &ocspStatus{ - code: ocspFailedSubmit, - err: err, - } - } - defer res.Body.Close() - glog.V(2).Infof("StatusCode from OCSP Server: %v\n", res.StatusCode) - if res.StatusCode != http.StatusOK { - if ok := retryRevocationStatusCheck(&totalTimeout, sleepTime); ok { - retryCounter++ - continue - } - return ocspRes, ocspResBytes, &ocspStatus{ - code: ocspFailedResponse, - err: fmt.Errorf("HTTP code is not OK. %v: %v", res.StatusCode, res.Status), - } - } - glog.V(2).Info("reading contents") - ocspResBytes, err = ioutil.ReadAll(res.Body) - if err != nil { - if ok := retryRevocationStatusCheck(&totalTimeout, sleepTime); ok { - retryCounter++ - continue - } - return ocspRes, ocspResBytes, &ocspStatus{ - code: ocspFailedExtractResponse, - err: err, - } - } - glog.V(2).Info("parsing OCSP response") - ocspRes, err = ocsp.ParseResponse(ocspResBytes, issuer) - if err != nil { - if ok := retryRevocationStatusCheck(&totalTimeout, sleepTime); ok { - retryCounter++ - continue - } - return ocspRes, ocspResBytes, &ocspStatus{ - code: ocspFailedParseResponse, - err: err, - } - } - break - } - return ocspRes, ocspResBytes, &ocspStatus{ - code: ocspSuccess, - err: nil, - } -} - -// getRevocationStatus checks the certificate revocation status for subject using issuer certificate. -func getRevocationStatus(wg *sync.WaitGroup, ocspStatusChan chan<- *ocspStatus, subject, issuer *x509.Certificate) { - defer wg.Done() - glog.V(2).Infof("Subject: %v\n", subject.Subject) - glog.V(2).Infof("Issuer: %v\n", issuer.Subject) - glog.V(2).Infof("OCSP Server: %v\n", subject.OCSPServer) - if len(subject.OCSPServer) == 0 { - ocspStatusChan <- &ocspStatus{ - code: ocspNoServer, - err: fmt.Errorf("no OCSP server is attached to the certificate. %v", subject.Subject), - } - return - } - ocspHost := subject.OCSPServer[0] - u, err := url.Parse(ocspHost) - if err != nil { - ocspStatusChan <- &ocspStatus{ - code: ocspFailedParseOCSPHost, - err: fmt.Errorf("failed to parse OCSP server host. %v", ocspHost), - } - return - } - ocspReq, err := ocsp.CreateRequest(subject, issuer, &ocsp.RequestOptions{}) - if err != nil { - ocspStatusChan <- &ocspStatus{ - code: ocspFailedComposeRequest, - err: fmt.Errorf("failed to compose OCSP request object. %v", subject.Subject), - } - return - } - - encodedCertID, ocspS := encodeCertID(ocspReq) - if ocspS.code != ocspSuccess { - ocspStatusChan <- ocspS - return - } - - ocspValidatedWithCache := checkOCSPResponseCache(encodedCertID, subject, issuer) - if ocspValidatedWithCache.code == ocspSuccess { - ocspStatusChan <- ocspValidatedWithCache - return - } - glog.V(2).Infof("cache missed: %v\n", ocspValidatedWithCache.err) - - st := snowflakeInsecureTransport - ocspClient := &http.Client{ - Timeout: 30 * time.Second, - Transport: st, - } - headers := make(map[string]string) - headers["Content-Type"] = "application/ocsp-request" - headers["Accept"] = "application/ocsp-response" - headers["Content-Length"] = string(len(ocspReq)) - headers["Host"] = u.Hostname() - ocspRes, ocspResBytes, ocspS := retryOCSP(ocspClient, http.NewRequest, ocspHost, headers, ocspReq, issuer, retryOCSPTimeout, retryOCSPHTTPTimeout) - if ocspS.code != ocspSuccess { - ocspStatusChan <- ocspS - return - } - encodedCertIDBase64 := base64.StdEncoding.EncodeToString(encodedCertID) - ocspStatusChan <- validateOCSP(encodedCertIDBase64, ocspRes, subject) - v := []interface{}{float64(time.Now().UTC().Unix()), base64.StdEncoding.EncodeToString(ocspResBytes)} - ocspResponseCacheLock.Lock() - ocspResponseCache[encodedCertIDBase64] = v - ocspResponseCacheLock.Unlock() -} - -// verifyPeerCertificate verifies all of certificate revocation status -func verifyPeerCertificate(callback func(*sync.WaitGroup, []*x509.Certificate) []*ocspStatus, verifiedChains [][]*x509.Certificate) (err error) { - for i := 0; i < len(verifiedChains); i++ { - var wg sync.WaitGroup - n := len(verifiedChains[i]) - 1 - if !verifiedChains[i][n].IsCA || string(verifiedChains[i][n].RawIssuer) != string(verifiedChains[i][n].RawSubject) { - // if the last certificate is not root CA, add it to the list - rca := caRoot[string(verifiedChains[i][n].RawIssuer)] - if rca == nil { - return fmt.Errorf("failed to find root CA. pkix.name: %v", verifiedChains[i][n].Issuer) - } - verifiedChains[i] = append(verifiedChains[i], rca) - n++ - } - wg.Add(n) - results := callback(&wg, verifiedChains[i]) - wg.Wait() - for _, r := range results { - if r.err != nil { - return r.err - } - } - } - writeOCSPCacheFile() - return nil -} - -func getAllRevocationStatusParallel(wg *sync.WaitGroup, verifiedChains []*x509.Certificate) []*ocspStatus { - n := len(verifiedChains) - 1 - ocspStatusChan := make(chan *ocspStatus, n) - for j := 0; j < n; j++ { - go getRevocationStatus(wg, ocspStatusChan, verifiedChains[j], verifiedChains[j+1]) - } - results := make([]*ocspStatus, n) - for j := 0; j < n; j++ { - results[j] = <-ocspStatusChan // will wait for all results back - } - close(ocspStatusChan) - return results -} - -func getAllRevocationStatusSerial(wg *sync.WaitGroup, verifiedChains []*x509.Certificate) []*ocspStatus { - n := len(verifiedChains) - 1 - results := make([]*ocspStatus, n) - for j := 0; j < n; j++ { - ocspStatusChan := make(chan *ocspStatus, 1) - getRevocationStatus(wg, ocspStatusChan, verifiedChains[j], verifiedChains[j+1]) - results[j] = <-ocspStatusChan - close(ocspStatusChan) - } - return results -} - -// verifyPeerCertificateSerial verifies the certificate revocation status in serial. -// This is mainly used by tools that analyzes the OCSP output -func verifyPeerCertificateSerial(_ [][]byte, verifiedChains [][]*x509.Certificate) (err error) { - return verifyPeerCertificate(getAllRevocationStatusSerial, verifiedChains) -} - -// verifyPeerCertificateParallel verifies the certificate revocation status in parallel. -// This is mainly used for general connection -func verifyPeerCertificateParallel(_ [][]byte, verifiedChains [][]*x509.Certificate) (err error) { - return verifyPeerCertificate(getAllRevocationStatusParallel, verifiedChains) -} - -// readOCSPCacheFile reads a OCSP Response cache file. This should be called in init(). -func readOCSPCacheFile() { - ocspResponseCache = make(map[string][]interface{}) - ocspResponseCacheLock = &sync.RWMutex{} - cacheFileName = filepath.Join(cacheDir, cacheFileBaseName) - glog.V(2).Infof("reading OCSP Response cache file. %v\n", cacheFileName) - raw, err := ioutil.ReadFile(cacheFileName) - if err != nil { - glog.V(2).Infof("failed to read OCSP cache file. %v. ignored.\n", err) - } - err = json.Unmarshal(raw, &ocspResponseCache) - if err != nil { - glog.V(2).Infof("failed to read OCSP cache file. %v. ignored\n", err) - } -} - -// writeOCSPCacheFile writes a OCSP Response cache file. This is called if all revocation status is success. -// lock file is used to mitigate race condition with other process. -func writeOCSPCacheFile() { - glog.V(2).Infof("writing OCSP Response cache file. %v\n", cacheFileName) - cacheLockFileName := cacheFileName + ".lck" - statinfo, err := os.Stat(cacheLockFileName) - switch { - case os.IsNotExist(err): - os.OpenFile(cacheLockFileName, os.O_RDONLY|os.O_CREATE, 0644) - case err != nil: - glog.V(2).Infof("failed to write OCSP response cache file. file: %v, err: %v. ignored.\n", cacheFileName, err) - return - default: - if time.Since(statinfo.ModTime()) < time.Hour { - glog.V(2).Infof("other process locks the cache file. %v. ignored.\n", cacheFileName) - return - } - err := os.Remove(cacheLockFileName) - if err != nil { - glog.V(2).Infof("failed to delete lock file. file: %v, err: %v. ignored.\n", cacheLockFileName, err) - return - } - os.OpenFile(cacheLockFileName, os.O_RDONLY|os.O_CREATE, 0644) - } - defer os.Remove(cacheLockFileName) - ocspResponseCacheLock.Lock() - defer ocspResponseCacheLock.Unlock() - j, err := json.Marshal(ocspResponseCache) - if err != nil { - glog.V(2).Info("failed to convert OCSP Response cache to JSON. ignored.") - return - } - err = ioutil.WriteFile(cacheFileName, j, 0644) - if err != nil { - glog.V(2).Infof("failed to write OCSP Response cache. err: %v. ignored.\n", err) - } -} - -// readCACerts read a set of root CAs -func readCACerts() { - raw := []byte(caRootPEM) - certPool = x509.NewCertPool() - caRoot = make(map[string]*x509.Certificate) - var p *pem.Block - for { - p, raw = pem.Decode(raw) - if p == nil { - break - } - if p.Type != "CERTIFICATE" { - continue - } - c, err := x509.ParseCertificate(p.Bytes) - if err != nil { - panic("failed to parse CA certificate.") - } - certPool.AddCert(c) - caRoot[string(c.RawSubject)] = c - } -} - -// createOCSPCacheDir creates OCSP response cache directory. If SNOWFLAKE_TEST_WORKSPACE is set, -func createOCSPCacheDir() { - cacheDir = os.Getenv("SNOWFLAKE_TEST_WORKSPACE") - if cacheDir == "" { - switch runtime.GOOS { - case "windows": - cacheDir = filepath.Join(os.Getenv("USERPROFILE"), "AppData", "Local", "Snowflake", "Caches") - case "darwin": - home := os.Getenv("HOME") - if home == "" { - glog.V(2).Info("HOME is blank.") - } - cacheDir = filepath.Join(home, "Library", "Caches", "Snowflake") - default: - home := os.Getenv("HOME") - if home == "" { - glog.V(2).Info("HOME is blank") - } - cacheDir = filepath.Join(home, ".cache", "snowflake") - } - } - if _, err := os.Stat(cacheDir); os.IsNotExist(err) { - err := os.MkdirAll(cacheDir, os.ModePerm) - if err != nil { - glog.V(2).Infof("failed to create cache directory. %v, err: %v. ignored\n", cacheDir, err) - } - } -} - -func init() { - readCACerts() - createOCSPCacheDir() - readOCSPCacheFile() -} - -// snowflakeInsecureTransport is the default tranport object that doesn't do certificate revocation check. -var snowflakeInsecureTransport = &http.Transport{ - MaxIdleConns: 10, - IdleConnTimeout: 30 * time.Minute, - Proxy: http.ProxyFromEnvironment, -} - -// SnowflakeTransport includes the certificate revocation check with OCSP in parallel. By default, the driver uses -// this transport object. -var SnowflakeTransport = &http.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: certPool, - VerifyPeerCertificate: verifyPeerCertificateParallel, - }, - MaxIdleConns: 10, - IdleConnTimeout: 30 * time.Minute, - Proxy: http.ProxyFromEnvironment, -} - -// SnowflakeTransportSerial includes the certificate revocation check with OCSP in serial. -var SnowflakeTransportSerial = &http.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: certPool, - VerifyPeerCertificate: verifyPeerCertificateSerial, - }, - MaxIdleConns: 10, - IdleConnTimeout: 30 * time.Minute, - Proxy: http.ProxyFromEnvironment, -} - -// SnowflakeTransportTest includes the certificate revocation check in parallel -var SnowflakeTransportTest = SnowflakeTransport diff --git a/vendor/github.com/snowflakedb/gosnowflake/parameters.json.enc b/vendor/github.com/snowflakedb/gosnowflake/parameters.json.enc deleted file mode 100644 index c68f357..0000000 Binary files a/vendor/github.com/snowflakedb/gosnowflake/parameters.json.enc and /dev/null differ diff --git a/vendor/github.com/snowflakedb/gosnowflake/parameters.json.tmpl b/vendor/github.com/snowflakedb/gosnowflake/parameters.json.tmpl deleted file mode 100644 index 8448f60..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/parameters.json.tmpl +++ /dev/null @@ -1,11 +0,0 @@ -{ - "testconnection": { - "SNOWFLAKE_TEST_USER": "testuser", - "SNOWFLAKE_TEST_PASSWORD": "testpass", - "SNOWFLAKE_TEST_ACCOUNT": "testaccount", - "SNOWFLAKE_TEST_WAREHOUSE": "testwarehouse", - "SNOWFLAKE_TEST_DATABASE": "testdatabase", - "SNOWFLAKE_TEST_SCHEMA": "testschema", - "SNOWFLAKE_TEST_ROLE": "testrole" - } -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/query.go b/vendor/github.com/snowflakedb/gosnowflake/query.go deleted file mode 100644 index 7a34b27..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/query.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -import ( - "time" -) - -type execBindParameter struct { - Type string `json:"type"` - Value *string `json:"value"` -} - -type execRequest struct { - SQLText string `json:"sqlText"` - AsyncExec bool `json:"asyncExec"` - SequenceID uint64 `json:"sequenceId"` - IsInternal bool `json:"isInternal"` - Parameters map[string]string `json:"parameters,omitempty"` - Bindings map[string]execBindParameter `json:"bindings,omitempty"` -} -type execResponseRowType struct { - Name string `json:"name"` - ByteLength int64 `json:"byteLength"` - Length int64 `json:"length"` - Type string `json:"type"` - Scale int64 `json:"scale"` - Precision int64 `json:"precision"` - Nullable bool `json:"nullable"` -} - -type execResponseChunk struct { - URL string `json:"url"` - RowCount int `json:"rowCount"` - UncompressedSize int64 `json:"uncompressedSize"` - CompressedSize int64 `json:"compressedSize"` -} - -// make all data field optional -type execResponseData struct { - // succeed query response data - Parameters []nameValueParameter `json:"parameters,omitempty"` - RowType []execResponseRowType `json:"rowtype,omitempty"` - RowSet [][]*string `json:"rowset,omitempty"` - Total int64 `json:"total,omitempty"` // java:long - Returned int64 `json:"returned,omitempty"` // java:long - QueryID string `json:"queryId,omitempty"` - SQLState string `json:"sqlState,omitempty"` - DatabaseProvider string `json:"databaseProvider,omitempty"` - FinalDatabaseName string `json:"finalDatabaseName,omitempty"` - FinalSchemaName string `json:"finalSchemaName,omitempty"` - FinalWarehouseName string `json:"finalWarehouseName,omitempty"` - FinalRoleName string `json:"finalRoleName,omitempty"` - NumberOfBinds int `json:"numberOfBinds,omitempty"` // java:int - StatementTypeID int64 `json:"statementTypeId,omitempty"` // java:long - Version int64 `json:"version,omitempty"` // java:long - Chunks []execResponseChunk `json:"chunks,omitempty"` - Qrmk string `json:"qrmk,omitempty"` - ChunkHeaders map[string]string `json:"chunkHeaders,omitempty"` - - // ping pong response data - GetResultURL string `json:"getResultUrl,omitempty"` - ProgressDesc string `json:"progressDesc,omitempty"` - QueryAbortsAfterSecs time.Duration `json:"queryAbortsAfterSecs,omitempty"` -} - -type execResponse struct { - Data execResponseData `json:"Data"` - Message string `json:"message"` - Code string `json:"code"` - Success bool `json:"success"` -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/restful.go b/vendor/github.com/snowflakedb/gosnowflake/restful.go deleted file mode 100644 index 170f449..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/restful.go +++ /dev/null @@ -1,421 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "time" - - "github.com/google/uuid" -) - -const ( - headerSnowflakeToken = "Snowflake Token=\"%v\"" - headerAuthorizationKey = "Authorization" - - headerContentTypeApplicationJSON = "application/json" - headerAcceptTypeApplicationSnowflake = "application/snowflake" - - sessionExpiredCode = "390112" - queryInProgressCode = "333333" - queryInProgressAsyncCode = "333334" -) - -type snowflakeRestful struct { - Host string - Port int - Protocol string - LoginTimeout time.Duration // Login timeout - RequestTimeout time.Duration // request timeout - Authenticator string - - Client *http.Client - Token string - MasterToken string - SessionID int - HeartBeat *heartbeat - - Connection *snowflakeConn - FuncPostQuery func(context.Context, *snowflakeRestful, *url.Values, map[string]string, []byte, time.Duration) (*execResponse, error) - FuncPostQueryHelper func(context.Context, *snowflakeRestful, *url.Values, map[string]string, []byte, time.Duration, string) (*execResponse, error) - FuncPost func(context.Context, *snowflakeRestful, string, map[string]string, []byte, time.Duration, bool) (*http.Response, error) - FuncGet func(context.Context, *snowflakeRestful, string, map[string]string, time.Duration) (*http.Response, error) - FuncRenewSession func(context.Context, *snowflakeRestful) error - FuncPostAuth func(*snowflakeRestful, *url.Values, map[string]string, []byte, time.Duration) (*authResponse, error) - FuncCloseSession func(*snowflakeRestful) error - FuncCancelQuery func(*snowflakeRestful, string) error - - FuncPostAuthSAML func(*snowflakeRestful, map[string]string, []byte, time.Duration) (*authResponse, error) - FuncPostAuthOKTA func(*snowflakeRestful, map[string]string, []byte, string, time.Duration) (*authOKTAResponse, error) - FuncGetSSO func(*snowflakeRestful, *url.Values, map[string]string, string, time.Duration) ([]byte, error) -} - -type renewSessionResponse struct { - Data renewSessionResponseMain `json:"data"` - Message string `json:"message"` - Code string `json:"code"` - Success bool `json:"success"` -} - -type renewSessionResponseMain struct { - SessionToken string `json:"sessionToken"` - ValidityInSecondsST time.Duration `json:"validityInSecondsST"` - MasterToken string `json:"masterToken"` - ValidityInSecondsMT time.Duration `json:"validityInSecondsMT"` - SessionID int `json:"sessionId"` -} - -type cancelQueryResponse struct { - Data interface{} `json:"data"` - Message string `json:"message"` - Code string `json:"code"` - Success bool `json:"success"` -} - -func postRestful( - ctx context.Context, - sr *snowflakeRestful, - fullURL string, - headers map[string]string, - body []byte, - timeout time.Duration, - raise4XX bool) ( - *http.Response, error) { - return retryHTTP(ctx, sr.Client, http.NewRequest, "POST", fullURL, headers, body, timeout, raise4XX) -} - -func getRestful( - ctx context.Context, - sr *snowflakeRestful, - fullURL string, - headers map[string]string, - timeout time.Duration) ( - *http.Response, error) { - return retryHTTP(ctx, sr.Client, http.NewRequest, "GET", fullURL, headers, nil, timeout, false) -} - -type execResponseAndErr struct { - resp *execResponse - err error -} - -func postRestfulQuery( - ctx context.Context, - sr *snowflakeRestful, - params *url.Values, - headers map[string]string, - body []byte, - timeout time.Duration) ( - data *execResponse, err error) { - - requestID := uuid.New().String() - execResponseChan := make(chan execResponseAndErr) - - go func() { - data, err := sr.FuncPostQueryHelper(ctx, sr, params, headers, body, timeout, requestID) - execResp := execResponseAndErr{data, err} - execResponseChan <- execResp - close(execResponseChan) - }() - - select { - case <-ctx.Done(): - err := sr.FuncCancelQuery(sr, requestID) - if err != nil { - return nil, err - } - return nil, ctx.Err() - case respAndErr := <-execResponseChan: - return respAndErr.resp, respAndErr.err - } -} - -func postRestfulQueryHelper( - ctx context.Context, - sr *snowflakeRestful, - params *url.Values, - headers map[string]string, - body []byte, - timeout time.Duration, - requestID string) ( - data *execResponse, err error) { - glog.V(2).Infof("params: %v", params) - params.Add("requestId", requestID) - if sr.Token != "" { - headers[headerAuthorizationKey] = fmt.Sprintf(headerSnowflakeToken, sr.Token) - } - fullURL := fmt.Sprintf( - "%s://%s:%d%s", sr.Protocol, sr.Host, sr.Port, - "/queries/v1/query-request?"+params.Encode()) - resp, err := sr.FuncPost(ctx, sr, fullURL, headers, body, timeout, false) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusOK { - glog.V(2).Infof("postQuery: resp: %v", resp) - var respd execResponse - err = json.NewDecoder(resp.Body).Decode(&respd) - if err != nil { - glog.V(1).Infof("failed to decode JSON. err: %v", err) - glog.Flush() - return nil, err - } - if respd.Code == sessionExpiredCode { - err = sr.FuncRenewSession(ctx, sr) - if err != nil { - return nil, err - } - return sr.FuncPostQuery(ctx, sr, params, headers, body, timeout) - } - - var resultURL string - isSessionRenewed := false - - for isSessionRenewed || respd.Code == queryInProgressCode || - respd.Code == queryInProgressAsyncCode { - if !isSessionRenewed { - resultURL = respd.Data.GetResultURL - } - - glog.V(2).Info("ping pong") - glog.Flush() - headers[headerAuthorizationKey] = fmt.Sprintf(headerSnowflakeToken, sr.Token) - fullURL := fmt.Sprintf( - "%s://%s:%d%s", sr.Protocol, sr.Host, sr.Port, resultURL) - - resp, err = sr.FuncGet(ctx, sr, fullURL, headers, 0) - respd = execResponse{} // reset the response - err = json.NewDecoder(resp.Body).Decode(&respd) - resp.Body.Close() - if err != nil { - glog.V(1).Infof("failed to decode JSON. err: %v", err) - glog.Flush() - return nil, err - } - if respd.Code == sessionExpiredCode { - err = sr.FuncRenewSession(ctx, sr) - if err != nil { - return nil, err - } - isSessionRenewed = true - } else { - isSessionRenewed = false - } - } - return &respd, nil - } - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - glog.V(1).Infof("failed to extract HTTP response body. err: %v", err) - return nil, err - } - glog.V(1).Infof("HTTP: %v, URL: %v, Body: %v", resp.StatusCode, fullURL, b) - glog.V(1).Infof("Header: %v", resp.Header) - glog.Flush() - return nil, &SnowflakeError{ - Number: ErrFailedToPostQuery, - SQLState: SQLStateConnectionFailure, - Message: errMsgFailedToPostQuery, - MessageArgs: []interface{}{resp.StatusCode, fullURL}, - } -} - -func closeSession(sr *snowflakeRestful) error { - glog.V(2).Info("close session") - params := &url.Values{} - params.Add("delete", "true") - params.Add("requestId", uuid.New().String()) - fullURL := fmt.Sprintf( - "%s://%s:%d%s", sr.Protocol, sr.Host, sr.Port, "/session?"+params.Encode()) - - headers := make(map[string]string) - headers["Content-Type"] = headerContentTypeApplicationJSON - headers["accept"] = headerAcceptTypeApplicationSnowflake - headers["User-Agent"] = userAgent - headers[headerAuthorizationKey] = fmt.Sprintf(headerSnowflakeToken, sr.Token) - - resp, err := sr.FuncPost(context.TODO(), sr, fullURL, headers, nil, 5*time.Second, false) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusOK { - var respd renewSessionResponse - err = json.NewDecoder(resp.Body).Decode(&respd) - if err != nil { - glog.V(1).Infof("failed to decode JSON. err: %v", err) - glog.Flush() - return err - } - if !respd.Success && respd.Code != sessionExpiredCode { - c, err := strconv.Atoi(respd.Code) - if err != nil { - return err - } - return &SnowflakeError{ - Number: c, - Message: respd.Message, - } - } - return nil - } - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - glog.V(1).Infof("failed to extract HTTP response body. err: %v", err) - glog.Flush() - return err - } - glog.V(1).Infof("HTTP: %v, URL: %v, Body: %v", resp.StatusCode, fullURL, b) - glog.V(1).Infof("Header: %v", resp.Header) - glog.Flush() - return &SnowflakeError{ - Number: ErrFailedToCloseSession, - SQLState: SQLStateConnectionFailure, - Message: errMsgFailedToCloseSession, - MessageArgs: []interface{}{resp.StatusCode, fullURL}, - } -} - -func renewRestfulSession(ctx context.Context, sr *snowflakeRestful) error { - glog.V(2).Info("start renew session") - params := &url.Values{} - params.Add("requestId", uuid.New().String()) - fullURL := fmt.Sprintf( - "%s://%s:%d%s", sr.Protocol, sr.Host, sr.Port, "/session/token-request?"+params.Encode()) - - headers := make(map[string]string) - headers["Content-Type"] = headerContentTypeApplicationJSON - headers["accept"] = headerAcceptTypeApplicationSnowflake - headers["User-Agent"] = userAgent - headers[headerAuthorizationKey] = fmt.Sprintf(headerSnowflakeToken, sr.MasterToken) - - body := make(map[string]string) - body["oldSessionToken"] = sr.Token - body["requestType"] = "RENEW" - - var reqBody []byte - reqBody, err := json.Marshal(body) - if err != nil { - return err - } - - resp, err := sr.FuncPost(ctx, sr, fullURL, headers, reqBody, sr.RequestTimeout, false) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusOK { - var respd renewSessionResponse - err = json.NewDecoder(resp.Body).Decode(&respd) - if err != nil { - glog.V(1).Infof("failed to decode JSON. err: %v", err) - glog.Flush() - return err - } - if !respd.Success { - c, err := strconv.Atoi(respd.Code) - if err != nil { - return err - } - return &SnowflakeError{ - Number: c, - Message: respd.Message, - } - } - sr.Token = respd.Data.SessionToken - sr.MasterToken = respd.Data.MasterToken - return nil - } - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - glog.V(1).Infof("failed to extract HTTP response body. err: %v", err) - glog.Flush() - return err - } - glog.V(1).Infof("HTTP: %v, URL: %v, Body: %v", resp.StatusCode, fullURL, b) - glog.V(1).Infof("Header: %v", resp.Header) - glog.Flush() - return &SnowflakeError{ - Number: ErrFailedToRenewSession, - SQLState: SQLStateConnectionFailure, - Message: errMsgFailedToRenew, - MessageArgs: []interface{}{resp.StatusCode, fullURL}, - } -} - -func cancelQuery(sr *snowflakeRestful, requestID string) error { - glog.V(2).Info("cancel query") - params := &url.Values{} - params.Add("requestId", uuid.New().String()) - fullURL := fmt.Sprintf( - "%s://%s:%d%s", sr.Protocol, sr.Host, sr.Port, "/queries/v1/abort-request?"+params.Encode()) - - headers := make(map[string]string) - headers["Content-Type"] = headerContentTypeApplicationJSON - headers["accept"] = headerAcceptTypeApplicationSnowflake - headers["User-Agent"] = userAgent - headers[headerAuthorizationKey] = fmt.Sprintf(headerSnowflakeToken, sr.Token) - - req := make(map[string]string) - req["requestId"] = requestID - - reqByte, err := json.Marshal(req) - if err != nil { - return err - } - - resp, err := sr.FuncPost(context.TODO(), sr, fullURL, headers, reqByte, 0, false) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusOK { - var respd cancelQueryResponse - err = json.NewDecoder(resp.Body).Decode(&respd) - if err != nil { - glog.V(1).Infof("failed to decode JSON. err: %v", err) - glog.Flush() - return err - } - if !respd.Success && respd.Code == sessionExpiredCode { - err := sr.FuncRenewSession(context.TODO(), sr) - if err != nil { - return err - } - return sr.FuncCancelQuery(sr, requestID) - } else if respd.Success { - return nil - } else { - c, err := strconv.Atoi(respd.Code) - if err != nil { - return err - } - return &SnowflakeError{ - Number: c, - Message: respd.Message, - } - } - } - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - glog.V(1).Infof("failed to extract HTTP response body. err: %v", err) - glog.Flush() - return err - } - glog.V(1).Infof("HTTP: %v, URL: %v, Body: %v", resp.StatusCode, fullURL, b) - glog.V(1).Infof("Header: %v", resp.Header) - glog.Flush() - return &SnowflakeError{ - Number: ErrFailedToCancelQuery, - SQLState: SQLStateConnectionFailure, - Message: errMsgFailedToCancelQuery, - MessageArgs: []interface{}{resp.StatusCode, fullURL}, - } -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/result.go b/vendor/github.com/snowflakedb/gosnowflake/result.go deleted file mode 100644 index a2d978b..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/result.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -type snowflakeResult struct { - affectedRows int64 - insertID int64 // Snowflake doesn't support last insert id -} - -func (res *snowflakeResult) LastInsertId() (int64, error) { - return res.insertID, nil -} - -func (res *snowflakeResult) RowsAffected() (int64, error) { - return res.affectedRows, nil -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/retry.go b/vendor/github.com/snowflakedb/gosnowflake/retry.go deleted file mode 100644 index 6d94ae7..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/retry.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -import ( - "bytes" - "errors" - "fmt" - "io" - "math/rand" - "net/http" - "time" - - "context" - - "sync" -) - -var random *rand.Rand - -func init() { - random = rand.New(rand.NewSource(time.Now().UnixNano())) -} - -type waitAlgo struct { - mutex *sync.Mutex // required for random.Int63n - base time.Duration // base wait time - cap time.Duration // maximum wait time -} - -func randSecondDuration(n time.Duration) time.Duration { - return time.Duration(random.Int63n(int64(n/time.Second))) * time.Second -} - -// decorrelated jitter backoff -func (w *waitAlgo) decorr(attempt int, sleep time.Duration) time.Duration { - w.mutex.Lock() - defer w.mutex.Unlock() - t := 3*sleep - w.base - switch { - case t > 0: - return durationMin(w.cap, randSecondDuration(t)+w.base) - case t < 0: - return durationMin(w.cap, randSecondDuration(-t)+3*sleep) - } - return w.base -} - -var defaultWaitAlgo = &waitAlgo{ - mutex: &sync.Mutex{}, - base: 5 * time.Second, - cap: 160 * time.Second, -} - -type requestFunc func(method, urlStr string, body io.Reader) (*http.Request, error) - -type clientInterface interface { - Do(req *http.Request) (*http.Response, error) -} - -func retryHTTP( - ctx context.Context, - client clientInterface, - req requestFunc, - method string, - fullURL string, - headers map[string]string, - body []byte, - timeout time.Duration, - raise4XX bool) (res *http.Response, err error) { - totalTimeout := timeout - glog.V(2).Infof("retryHTTP.totalTimeout: %v", totalTimeout) - retryCounter := 0 - sleepTime := time.Duration(0) - for { - req, err := req(method, fullURL, bytes.NewReader(body)) - if err != nil { - return nil, err - } - if req != nil { - // req can be nil in tests - req = req.WithContext(ctx) - } - for k, v := range headers { - req.Header.Set(k, v) - } - res, err = client.Do(req) - if err == nil && res.StatusCode == http.StatusOK || err == context.Canceled { - // exit if success or canceled - break - } - if raise4XX && res != nil && res.StatusCode >= 400 && res.StatusCode < 500 { - // abort connection if raise4XX flag is enabled and the range of HTTP status code are 4XX. - // This is currently used for Snowflake login. The caller must generate an error object based on HTTP status. - break - } - // cannot just return 4xx and 5xx status as the error can be sporadic. retry often helps. - if err != nil { - glog.V(2).Infof( - "failed http connection. no response is returned. err: %v. retrying...\n", err) - } else { - glog.V(2).Infof( - "failed http connection. HTTP Status: %v. retrying...\n", res.StatusCode) - } - // uses decorrelated jitter backoff - sleepTime = defaultWaitAlgo.decorr(retryCounter, sleepTime) - - if totalTimeout > 0 { - glog.V(2).Infof("to timeout: %v", totalTimeout) - // if any timeout is set - totalTimeout -= sleepTime - if totalTimeout <= 0 { - if err != nil { - return nil, fmt.Errorf("timeout. err: %v. Hanging?", err) - } - if res != nil { - return nil, fmt.Errorf("timeout. HTTP Status: %v. Hanging?", res.StatusCode) - } - return nil, errors.New("timeout. Hanging?") - } - } - retryCounter++ - glog.V(2).Infof("sleeping %v. to timeout: %v. retrying", sleepTime, totalTimeout) - time.Sleep(sleepTime) - } - return res, err -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/rows.go b/vendor/github.com/snowflakedb/gosnowflake/rows.go deleted file mode 100644 index 6902f10..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/rows.go +++ /dev/null @@ -1,405 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -import ( - "context" - "database/sql/driver" - "encoding/json" - "io" - "io/ioutil" - "net/http" - "reflect" - "strings" - "sync" - "time" -) - -const ( - headerSseCAlgorithm = "x-amz-server-side-encryption-customer-algorithm" - headerSseCKey = "x-amz-server-side-encryption-customer-key" - headerSseCAes = "AES256" -) - -var ( - maxChunkDownloadWorkers = 10 - maxChunkDownloaderErrorCounter = 5 -) - -type snowflakeRows struct { - sc *snowflakeConn - RowType []execResponseRowType - ChunkDownloader *snowflakeChunkDownloader -} - -func (rows *snowflakeRows) Close() (err error) { - glog.V(2).Infoln("Rows.Close") - return nil -} - -type chunkError struct { - Index int - Error error -} - -type snowflakeChunkDownloader struct { - sc *snowflakeConn - ctx context.Context - Total int64 - TotalRowIndex int64 - CurrentChunk [][]*string - CurrentChunkIndex int - CurrentChunkSize int - ChunksMutex *sync.Mutex - ChunkMetas []execResponseChunk - Chunks map[int][][]*string - ChunksChan chan int - ChunksError chan *chunkError - ChunksErrorCounter int - ChunksFinalErrors []*chunkError - Qrmk string - ChunkHeader map[string]string - CurrentIndex int - FuncDownload func(*snowflakeChunkDownloader, int) - FuncDownloadHelper func(context.Context, *snowflakeChunkDownloader, int) - FuncGet func(context.Context, *snowflakeChunkDownloader, string, map[string]string, time.Duration) (*http.Response, error) - DoneDownloadCond *sync.Cond -} - -// ColumnTypeDatabaseTypeName returns the database column name. -func (rows *snowflakeRows) ColumnTypeDatabaseTypeName(index int) string { - return strings.ToUpper(rows.RowType[index].Type) -} - -// ColumnTypeLength returns the length of the column -func (rows *snowflakeRows) ColumnTypeLength(index int) (length int64, ok bool) { - if index < 0 || index > len(rows.RowType) { - return 0, false - } - switch rows.RowType[index].Type { - case "text", "variant", "object", "array", "binary": - return rows.RowType[index].Length, true - } - return 0, false -} - -func (rows *snowflakeRows) ColumnTypeNullable(index int) (nullable, ok bool) { - if index < 0 || index > len(rows.RowType) { - return false, false - } - return rows.RowType[index].Nullable, true -} - -func (rows *snowflakeRows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) { - if index < 0 || index > len(rows.RowType) { - return 0, 0, false - } - switch rows.RowType[index].Type { - case "fixed": - return rows.RowType[index].Precision, rows.RowType[index].Scale, true - } - return 0, 0, false -} - -func (rows *snowflakeRows) Columns() []string { - glog.V(3).Infoln("Rows.Columns") - ret := make([]string, len(rows.RowType)) - for i, n := 0, len(rows.RowType); i < n; i++ { - ret[i] = rows.RowType[i].Name - } - return ret -} - -func (rows *snowflakeRows) ColumnTypeScanType(index int) reflect.Type { - return snowflakeTypeToGo(rows.RowType[index].Type, rows.RowType[index].Scale) -} - -func (rows *snowflakeRows) Next(dest []driver.Value) (err error) { - row, err := rows.ChunkDownloader.Next() - if err != nil { - // includes io.EOF - if err == io.EOF { - rows.ChunkDownloader.Chunks = nil // detach all chunks. No way to go backward without reinitialize it. - } - return err - } - for i, n := 0, len(row); i < n; i++ { - // could move to chunk downloader so that each go routine - // can convert data - err := stringToValue(&dest[i], rows.RowType[i], row[i]) - if err != nil { - return err - } - } - return err -} - -func (rows *snowflakeRows) HasNextResultSet() bool { - if len(rows.ChunkDownloader.ChunkMetas) == 0 { - return false // no extra chunk - } - return rows.ChunkDownloader.hasNextResultSet() -} - -func (rows *snowflakeRows) NextResultSet() error { - if len(rows.ChunkDownloader.ChunkMetas) == 0 { - return io.EOF - } - return rows.ChunkDownloader.nextResultSet() -} - -func (scd *snowflakeChunkDownloader) hasNextResultSet() bool { - return scd.CurrentChunkIndex < len(scd.ChunkMetas) -} - -func (scd *snowflakeChunkDownloader) nextResultSet() error { - // no error at all times as the next chunk/resultset is automatically read - if scd.CurrentChunkIndex < len(scd.ChunkMetas) { - return nil - } - return io.EOF -} - -func (scd *snowflakeChunkDownloader) start() error { - scd.CurrentChunkSize = len(scd.CurrentChunk) // cache the size - scd.CurrentIndex = -1 // initial chunks idx - scd.CurrentChunkIndex = -1 // initial chunk - - // start downloading chunks if exists - chunkMetaLen := len(scd.ChunkMetas) - if chunkMetaLen > 0 { - glog.V(2).Infof("chunks: %v", chunkMetaLen) - scd.ChunksMutex = &sync.Mutex{} - scd.DoneDownloadCond = sync.NewCond(scd.ChunksMutex) - scd.Chunks = make(map[int][][]*string) - scd.ChunksChan = make(chan int, chunkMetaLen) - scd.ChunksError = make(chan *chunkError, maxChunkDownloadWorkers) - for i := 0; i < chunkMetaLen; i++ { - glog.V(2).Infof("add chunk to channel ChunksChan: %v", i+1) - scd.ChunksChan <- i - } - for i := 0; i < intMin(maxChunkDownloadWorkers, chunkMetaLen); i++ { - scd.schedule() - } - } - return nil -} - -func (scd *snowflakeChunkDownloader) schedule() { - select { - case nextIdx := <-scd.ChunksChan: - glog.V(2).Infof("schedule chunk: %v", nextIdx+1) - go scd.FuncDownload(scd, nextIdx) - default: - // no more download - glog.V(2).Info("no more download") - } -} - -func (scd *snowflakeChunkDownloader) checkErrorRetry() (err error) { - select { - case errc := <-scd.ChunksError: - if scd.ChunksErrorCounter < maxChunkDownloaderErrorCounter && errc.Error != context.Canceled { - // add the index to the chunks channel so that the download will be retried. - go scd.FuncDownload(scd, errc.Index) - scd.ChunksErrorCounter++ - glog.V(2).Infof("chunk idx: %v, err: %v. retrying (%v/%v)...", - errc.Index, errc.Error, scd.ChunksErrorCounter, maxChunkDownloaderErrorCounter) - } else { - scd.ChunksFinalErrors = append(scd.ChunksFinalErrors, errc) - glog.V(2).Infof("chunk idx: %v, err: %v. no further retry", errc.Index, errc.Error) - return errc.Error - } - default: - glog.V(2).Info("no error is detected.") - } - return nil -} -func (scd *snowflakeChunkDownloader) Next() ([]*string, error) { - for { - scd.CurrentIndex++ - if scd.CurrentIndex < scd.CurrentChunkSize { - return scd.CurrentChunk[scd.CurrentIndex], nil - } - scd.CurrentChunkIndex++ // next chunk - scd.CurrentIndex = -1 // reset - if scd.CurrentChunkIndex >= len(scd.ChunkMetas) { - break - } - - scd.ChunksMutex.Lock() - if scd.CurrentChunkIndex > 1 { - scd.Chunks[scd.CurrentChunkIndex-1] = nil // detach the previously used chunk - } - for scd.Chunks[scd.CurrentChunkIndex] == nil { - glog.V(2).Infof("waiting for chunk idx: %v/%v", - scd.CurrentChunkIndex+1, len(scd.ChunkMetas)) - err := scd.checkErrorRetry() - if err != nil { - scd.ChunksMutex.Unlock() - return nil, err - } - // wait for chunk downloader goroutine to broadcast the event, - // 1) one chunk download finishes or 2) an error occurs. - scd.DoneDownloadCond.Wait() - } - glog.V(2).Infof("ready: chunk %v", scd.CurrentChunkIndex+1) - scd.CurrentChunk = scd.Chunks[scd.CurrentChunkIndex] - scd.ChunksMutex.Unlock() - scd.CurrentChunkSize = len(scd.CurrentChunk) - - // kick off the next download - scd.schedule() - } - - glog.V(2).Infof("no more data") - if len(scd.ChunkMetas) > 0 { - close(scd.ChunksError) - close(scd.ChunksChan) - } - return nil, io.EOF -} - -func getChunk( - ctx context.Context, - scd *snowflakeChunkDownloader, - fullURL string, - headers map[string]string, - timeout time.Duration) ( - *http.Response, error) { - return retryHTTP( - ctx, scd.sc.rest.Client, http.NewRequest, - "GET", fullURL, headers, nil, timeout, false) -} - -/* largeResultSetReader is a reader that wraps the large result set with leading and tailing brackets. */ -type largeResultSetReader struct { - status int - body io.Reader -} - -func (r *largeResultSetReader) Read(p []byte) (n int, err error) { - if r.status == 0 { - p[0] = 0x5b // initial 0x5b ([) - r.status = 1 - return 1, nil - } - if r.status == 1 { - var len int - len, err = r.body.Read(p) - if err == io.EOF { - r.status = 2 - return len, nil - } - if err != nil { - return 0, err - } - return len, nil - } - if r.status == 2 { - p[0] = 0x5d // tail 0x5d (]) - r.status = 3 - return 1, nil - } - // ensure no data and EOF - return 0, io.EOF -} - -func downloadChunk(scd *snowflakeChunkDownloader, idx int) { - glog.V(2).Infof("download start chunk: %v", idx+1) - - execDownloadChan := make(chan struct{}) - - go func() { - scd.FuncDownloadHelper(scd.ctx, scd, idx) - close(execDownloadChan) - }() - - select { - case <-scd.ctx.Done(): - scd.ChunksError <- &chunkError{Index: idx, Error: scd.ctx.Err()} - case <-execDownloadChan: - } -} - -func downloadChunkHelper(ctx context.Context, scd *snowflakeChunkDownloader, idx int) { - - headers := make(map[string]string) - if len(scd.ChunkHeader) > 0 { - glog.V(2).Info("chunk header is provided.") - for k, v := range scd.ChunkHeader { - headers[k] = v - } - } else { - headers[headerSseCAlgorithm] = headerSseCAes - headers[headerSseCKey] = scd.Qrmk - } - - resp, err := scd.FuncGet(ctx, scd, scd.ChunkMetas[idx].URL, headers, 0) - if err != nil { - raiseDownloadError(scd, idx, err) - return - } - defer resp.Body.Close() - glog.V(2).Infof("download finish chunk: %v, resp: %v", idx+1, resp) - if resp.StatusCode == http.StatusOK { - var respd [][]*string - start := time.Now() - st := &largeResultSetReader{ - status: 0, - body: resp.Body, - } - dec := json.NewDecoder(st) - for { - if err := dec.Decode(&respd); err == io.EOF { - break - } else if err != nil { - glog.V(1).Infof( - "failed to extract HTTP response body. URL: %v, err: %v", scd.ChunkMetas[idx].URL, err) - raiseDownloadError(scd, idx, err) - return - } - } - glog.V(2).Infof( - "decoded %d rows w/ %d bytes in %s (chunk %v)", - scd.ChunkMetas[idx].RowCount, - scd.ChunkMetas[idx].UncompressedSize, - time.Since(start), idx+1, - ) - scd.ChunksMutex.Lock() - scd.Chunks[idx] = respd - scd.DoneDownloadCond.Broadcast() - scd.ChunksMutex.Unlock() - } else { - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - glog.V(1).Infof( - "failed to extract HTTP response body. URL: %v, err: %v", scd.ChunkMetas[idx].URL, err) - raiseDownloadError(scd, idx, err) - scd.ChunksError <- &chunkError{Index: idx, Error: err} - return - } - glog.V(1).Infof("HTTP: %v, URL: %v, Body: %v", resp.StatusCode, scd.ChunkMetas[idx].URL, b) - glog.V(1).Infof("Header: %v", resp.Header) - glog.Flush() - scd.ChunksMutex.Lock() - scd.ChunksError <- &chunkError{ - Index: idx, - Error: &SnowflakeError{ - Number: ErrFailedToGetChunk, - SQLState: SQLStateConnectionFailure, - Message: errMsgFailedToGetChunk, - MessageArgs: []interface{}{idx}, - }} - scd.DoneDownloadCond.Broadcast() - scd.ChunksMutex.Unlock() - } -} - -func raiseDownloadError(scd *snowflakeChunkDownloader, idx int, err error) { - glog.Flush() - scd.ChunksMutex.Lock() - scd.ChunksError <- &chunkError{Index: idx, Error: err} - scd.DoneDownloadCond.Broadcast() - scd.ChunksMutex.Unlock() -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/sqlstate.go b/vendor/github.com/snowflakedb/gosnowflake/sqlstate.go deleted file mode 100644 index 17c3256..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/sqlstate.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -const ( - // SQLStateNumericValueOutOfRange is a SQL State code indicating Numeric value is out of range. - SQLStateNumericValueOutOfRange = "22003" - // SQLStateInvalidDataTimeFormat is a SQL State code indicating DataTime format is invalid. - SQLStateInvalidDataTimeFormat = "22007" - // SQLStateConnectionWasNotEstablished is a SQL State code indicating connection was not established. - SQLStateConnectionWasNotEstablished = "08001" - // SQLStateConnectionRejected is a SQL State code indicating connection was rejected. - SQLStateConnectionRejected = "08004" - // SQLStateConnectionFailure is a SQL State code indicating connection failed. - SQLStateConnectionFailure = "08006" - // SQLStateFeatureNotSupported is a SQL State code indicating the feature is not enabled. - SQLStateFeatureNotSupported = "0A000" -) diff --git a/vendor/github.com/snowflakedb/gosnowflake/statement.go b/vendor/github.com/snowflakedb/gosnowflake/statement.go deleted file mode 100644 index 0f70586..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/statement.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -import ( - "context" - "database/sql/driver" -) - -type snowflakeStmt struct { - sc *snowflakeConn - query string -} - -func (stmt *snowflakeStmt) Close() error { - glog.V(2).Infoln("Stmt.Close") - // noop - return nil -} - -func (stmt *snowflakeStmt) NumInput() int { - glog.V(2).Infoln("Stmt.NumInput") - // Go Snowflake doesn't know the number of binding parameters. - return -1 -} - -func (stmt *snowflakeStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { - glog.V(2).Infoln("Stmt.ExecContext") - return stmt.sc.ExecContext(ctx, stmt.query, args) -} - -func (stmt *snowflakeStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { - glog.V(2).Infoln("Stmt.QueryContext") - return stmt.sc.QueryContext(ctx, stmt.query, args) -} - -func (stmt *snowflakeStmt) Exec(args []driver.Value) (driver.Result, error) { - glog.V(2).Infoln("Stmt.Exec") - return stmt.sc.Exec(stmt.query, args) -} - -func (stmt *snowflakeStmt) Query(args []driver.Value) (driver.Rows, error) { - glog.V(2).Infoln("Stmt.Query") - return stmt.sc.Query(stmt.query, args) -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/transaction.go b/vendor/github.com/snowflakedb/gosnowflake/transaction.go deleted file mode 100644 index df0e55b..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/transaction.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -import ( - "context" - "database/sql/driver" -) - -type snowflakeTx struct { - sc *snowflakeConn -} - -func (tx *snowflakeTx) Commit() (err error) { - if tx.sc == nil || tx.sc.rest == nil { - return driver.ErrBadConn - } - _, err = tx.sc.exec(context.TODO(), "COMMIT", false, false, nil) - if err != nil { - return - } - tx.sc = nil - return -} - -func (tx *snowflakeTx) Rollback() (err error) { - if tx.sc == nil || tx.sc.rest == nil { - return driver.ErrBadConn - } - _, err = tx.sc.exec(context.TODO(), "ROLLBACK", false, false, nil) - if err != nil { - return - } - tx.sc = nil - return -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/util.go b/vendor/github.com/snowflakedb/gosnowflake/util.go deleted file mode 100644 index 6611e68..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/util.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -import ( - "database/sql/driver" - "time" -) - -// integer min -func intMin(a, b int) int { - if a < b { - return a - } - return b -} - -// integer max -func intMax(a, b int) int { - if a > b { - return a - } - return b -} - -// time.Duration max -func durationMax(d1, d2 time.Duration) time.Duration { - if d1-d2 > 0 { - return d1 - } - return d2 -} - -// time.Duration min -func durationMin(d1, d2 time.Duration) time.Duration { - if d1-d2 < 0 { - return d1 - } - return d2 -} - -// toNamedValues converts a slice of driver.Value to a slice of driver.NamedValue for Go 1.8 SQL package -func toNamedValues(values []driver.Value) []driver.NamedValue { - namedValues := make([]driver.NamedValue, len(values)) - for idx, value := range values { - namedValues[idx] = driver.NamedValue{Name: "", Ordinal: idx + 1, Value: value} - } - return namedValues -} diff --git a/vendor/github.com/snowflakedb/gosnowflake/version.go b/vendor/github.com/snowflakedb/gosnowflake/version.go deleted file mode 100644 index 2cf85e1..0000000 --- a/vendor/github.com/snowflakedb/gosnowflake/version.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) 2017-2018 Snowflake Computing Inc. All right reserved. - -package gosnowflake - -// SnowflakeGoDriverVersion is the version of Go Snowflake Driver. -const SnowflakeGoDriverVersion = "1.1.9" diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE deleted file mode 100644 index 473b670..0000000 --- a/vendor/github.com/stretchr/testify/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell - -Please consider promoting this project if you find it useful. - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of the Software, -and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT -OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE -OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go deleted file mode 100644 index aa1c2b9..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ /dev/null @@ -1,484 +0,0 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ - -package assert - -import ( - http "net/http" - url "net/url" - time "time" -) - -// Conditionf uses a Comparison to assert a complex condition. -func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Condition(t, comp, append([]interface{}{msg}, args...)...) -} - -// Containsf asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") -// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") -// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") -func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Contains(t, s, contains, append([]interface{}{msg}, args...)...) -} - -// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. -func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return DirExists(t, path, append([]interface{}{msg}, args...)...) -} - -// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") -func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) -} - -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Emptyf(t, obj, "error message %s", "formatted") -func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Empty(t, object, append([]interface{}{msg}, args...)...) -} - -// Equalf asserts that two objects are equal. -// -// assert.Equalf(t, 123, 123, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Equal(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// EqualErrorf asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") -func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) -} - -// EqualValuesf asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) -func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Errorf asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } -func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Error(t, err, append([]interface{}{msg}, args...)...) -} - -// Exactlyf asserts that two objects are equal in value and type. -// -// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) -func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Failf reports a failure through -func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, failureMessage, append([]interface{}{msg}, args...)...) -} - -// FailNowf fails test -func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...) -} - -// Falsef asserts that the specified value is false. -// -// assert.Falsef(t, myBool, "error message %s", "formatted") -func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return False(t, value, append([]interface{}{msg}, args...)...) -} - -// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. -func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return FileExists(t, path, append([]interface{}{msg}, args...)...) -} - -// HTTPBodyContainsf asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) -} - -// HTTPBodyNotContainsf asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) -} - -// HTTPErrorf asserts that a specified handler returns an error status code. -// -// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). -func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...) -} - -// HTTPRedirectf asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). -func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...) -} - -// HTTPSuccessf asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...) -} - -// Implementsf asserts that an object is implemented by the specified interface. -// -// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) -func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) -} - -// InDeltaf asserts that the two numerals are within delta of each other. -// -// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) -func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InDeltaSlicef is the same as InDelta, except it compares two slices. -func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InEpsilonf asserts that expected and actual have a relative error less than epsilon -func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) -} - -// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. -func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) -} - -// IsTypef asserts that the specified objects are of the same type. -func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...) -} - -// JSONEqf asserts that two JSON strings are equivalent. -// -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") -func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Lenf asserts that the specified object has specific length. -// Lenf also fails if the object has a type that len() not accept. -// -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") -func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Len(t, object, length, append([]interface{}{msg}, args...)...) -} - -// Nilf asserts that the specified object is nil. -// -// assert.Nilf(t, err, "error message %s", "formatted") -func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Nil(t, object, append([]interface{}{msg}, args...)...) -} - -// NoErrorf asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoErrorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } -func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NoError(t, err, append([]interface{}{msg}, args...)...) -} - -// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") -func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) -} - -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } -func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotEmpty(t, object, append([]interface{}{msg}, args...)...) -} - -// NotEqualf asserts that the specified values are NOT equal. -// -// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// NotNilf asserts that the specified object is not nil. -// -// assert.NotNilf(t, err, "error message %s", "formatted") -func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotNil(t, object, append([]interface{}{msg}, args...)...) -} - -// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") -func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotPanics(t, f, append([]interface{}{msg}, args...)...) -} - -// NotRegexpf asserts that a specified regexp does not match a string. -// -// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") -// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") -func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) -} - -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") -func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...) -} - -// NotZerof asserts that i is not the zero value for its type. -func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotZero(t, i, append([]interface{}{msg}, args...)...) -} - -// Panicsf asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") -func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Panics(t, f, append([]interface{}{msg}, args...)...) -} - -// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) -} - -// Regexpf asserts that a specified regexp matches a string. -// -// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") -// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") -func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) -} - -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") -func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Subset(t, list, subset, append([]interface{}{msg}, args...)...) -} - -// Truef asserts that the specified value is true. -// -// assert.Truef(t, myBool, "error message %s", "formatted") -func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return True(t, value, append([]interface{}{msg}, args...)...) -} - -// WithinDurationf asserts that the two times are within duration delta of each other. -// -// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") -func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// Zerof asserts that i is the zero value for its type. -func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Zero(t, i, append([]interface{}{msg}, args...)...) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl deleted file mode 100644 index d2bb0b8..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -{{.CommentFormat}} -func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool { - if h, ok := t.(tHelper); ok { h.Helper() } - return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}}) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go deleted file mode 100644 index de39f79..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ /dev/null @@ -1,956 +0,0 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ - -package assert - -import ( - http "net/http" - url "net/url" - time "time" -) - -// Condition uses a Comparison to assert a complex condition. -func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Condition(a.t, comp, msgAndArgs...) -} - -// Conditionf uses a Comparison to assert a complex condition. -func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Conditionf(a.t, comp, msg, args...) -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// a.Contains("Hello World", "World") -// a.Contains(["Hello", "World"], "World") -// a.Contains({"Hello": "World"}, "Hello") -func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Contains(a.t, s, contains, msgAndArgs...) -} - -// Containsf asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// a.Containsf("Hello World", "World", "error message %s", "formatted") -// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") -// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") -func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Containsf(a.t, s, contains, msg, args...) -} - -// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. -func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return DirExists(a.t, path, msgAndArgs...) -} - -// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. -func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return DirExistsf(a.t, path, msg, args...) -} - -// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) -func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ElementsMatch(a.t, listA, listB, msgAndArgs...) -} - -// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") -func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ElementsMatchf(a.t, listA, listB, msg, args...) -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// a.Empty(obj) -func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Empty(a.t, object, msgAndArgs...) -} - -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// a.Emptyf(obj, "error message %s", "formatted") -func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Emptyf(a.t, object, msg, args...) -} - -// Equal asserts that two objects are equal. -// -// a.Equal(123, 123) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Equal(a.t, expected, actual, msgAndArgs...) -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// a.EqualError(err, expectedErrorString) -func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualError(a.t, theError, errString, msgAndArgs...) -} - -// EqualErrorf asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") -func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualErrorf(a.t, theError, errString, msg, args...) -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// a.EqualValues(uint32(123), int32(123)) -func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualValues(a.t, expected, actual, msgAndArgs...) -} - -// EqualValuesf asserts that two objects are equal or convertable to the same types -// and equal. -// -// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) -func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualValuesf(a.t, expected, actual, msg, args...) -} - -// Equalf asserts that two objects are equal. -// -// a.Equalf(123, 123, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Equalf(a.t, expected, actual, msg, args...) -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } -func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Error(a.t, err, msgAndArgs...) -} - -// Errorf asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } -func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Errorf(a.t, err, msg, args...) -} - -// Exactly asserts that two objects are equal in value and type. -// -// a.Exactly(int32(123), int64(123)) -func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Exactly(a.t, expected, actual, msgAndArgs...) -} - -// Exactlyf asserts that two objects are equal in value and type. -// -// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) -func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Exactlyf(a.t, expected, actual, msg, args...) -} - -// Fail reports a failure through -func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Fail(a.t, failureMessage, msgAndArgs...) -} - -// FailNow fails test -func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FailNow(a.t, failureMessage, msgAndArgs...) -} - -// FailNowf fails test -func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FailNowf(a.t, failureMessage, msg, args...) -} - -// Failf reports a failure through -func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Failf(a.t, failureMessage, msg, args...) -} - -// False asserts that the specified value is false. -// -// a.False(myBool) -func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return False(a.t, value, msgAndArgs...) -} - -// Falsef asserts that the specified value is false. -// -// a.Falsef(myBool, "error message %s", "formatted") -func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Falsef(a.t, value, msg, args...) -} - -// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. -func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FileExists(a.t, path, msgAndArgs...) -} - -// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. -func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FileExistsf(a.t, path, msg, args...) -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) -} - -// HTTPBodyContainsf asserts that a specified handler returns a -// body that contains a string. -// -// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) -} - -// HTTPBodyNotContainsf asserts that a specified handler returns a -// body that does not contain a string. -// -// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPError(a.t, handler, method, url, values, msgAndArgs...) -} - -// HTTPErrorf asserts that a specified handler returns an error status code. -// -// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). -func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPErrorf(a.t, handler, method, url, values, msg, args...) -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) -} - -// HTTPRedirectf asserts that a specified handler returns a redirect status code. -// -// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). -func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPRedirectf(a.t, handler, method, url, values, msg, args...) -} - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) -} - -// HTTPSuccessf asserts that a specified handler returns a success status code. -// -// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPSuccessf(a.t, handler, method, url, values, msg, args...) -} - -// Implements asserts that an object is implemented by the specified interface. -// -// a.Implements((*MyInterface)(nil), new(MyObject)) -func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Implements(a.t, interfaceObject, object, msgAndArgs...) -} - -// Implementsf asserts that an object is implemented by the specified interface. -// -// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) -func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Implementsf(a.t, interfaceObject, object, msg, args...) -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// a.InDelta(math.Pi, (22 / 7.0), 0.01) -func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDelta(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaSlicef is the same as InDelta, except it compares two slices. -func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaSlicef(a.t, expected, actual, delta, msg, args...) -} - -// InDeltaf asserts that the two numerals are within delta of each other. -// -// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) -func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaf(a.t, expected, actual, delta, msg, args...) -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. -func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. -func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) -} - -// InEpsilonf asserts that expected and actual have a relative error less than epsilon -func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) -} - -// IsType asserts that the specified objects are of the same type. -func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsType(a.t, expectedType, object, msgAndArgs...) -} - -// IsTypef asserts that the specified objects are of the same type. -func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsTypef(a.t, expectedType, object, msg, args...) -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return JSONEq(a.t, expected, actual, msgAndArgs...) -} - -// JSONEqf asserts that two JSON strings are equivalent. -// -// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") -func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return JSONEqf(a.t, expected, actual, msg, args...) -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// a.Len(mySlice, 3) -func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Len(a.t, object, length, msgAndArgs...) -} - -// Lenf asserts that the specified object has specific length. -// Lenf also fails if the object has a type that len() not accept. -// -// a.Lenf(mySlice, 3, "error message %s", "formatted") -func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Lenf(a.t, object, length, msg, args...) -} - -// Nil asserts that the specified object is nil. -// -// a.Nil(err) -func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Nil(a.t, object, msgAndArgs...) -} - -// Nilf asserts that the specified object is nil. -// -// a.Nilf(err, "error message %s", "formatted") -func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Nilf(a.t, object, msg, args...) -} - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if a.NoError(err) { -// assert.Equal(t, expectedObj, actualObj) -// } -func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoError(a.t, err, msgAndArgs...) -} - -// NoErrorf asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if a.NoErrorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } -func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoErrorf(a.t, err, msg, args...) -} - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// a.NotContains("Hello World", "Earth") -// a.NotContains(["Hello", "World"], "Earth") -// a.NotContains({"Hello": "World"}, "Earth") -func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotContains(a.t, s, contains, msgAndArgs...) -} - -// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") -// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") -// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") -func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotContainsf(a.t, s, contains, msg, args...) -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if a.NotEmpty(obj) { -// assert.Equal(t, "two", obj[1]) -// } -func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEmpty(a.t, object, msgAndArgs...) -} - -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if a.NotEmptyf(obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } -func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEmptyf(a.t, object, msg, args...) -} - -// NotEqual asserts that the specified values are NOT equal. -// -// a.NotEqual(obj1, obj2) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqual(a.t, expected, actual, msgAndArgs...) -} - -// NotEqualf asserts that the specified values are NOT equal. -// -// a.NotEqualf(obj1, obj2, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqualf(a.t, expected, actual, msg, args...) -} - -// NotNil asserts that the specified object is not nil. -// -// a.NotNil(err) -func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotNil(a.t, object, msgAndArgs...) -} - -// NotNilf asserts that the specified object is not nil. -// -// a.NotNilf(err, "error message %s", "formatted") -func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotNilf(a.t, object, msg, args...) -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanics(func(){ RemainCalm() }) -func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotPanics(a.t, f, msgAndArgs...) -} - -// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") -func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotPanicsf(a.t, f, msg, args...) -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") -// a.NotRegexp("^start", "it's not starting") -func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotRegexp(a.t, rx, str, msgAndArgs...) -} - -// NotRegexpf asserts that a specified regexp does not match a string. -// -// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") -// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") -func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotRegexpf(a.t, rx, str, msg, args...) -} - -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") -func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSubset(a.t, list, subset, msgAndArgs...) -} - -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") -func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSubsetf(a.t, list, subset, msg, args...) -} - -// NotZero asserts that i is not the zero value for its type. -func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotZero(a.t, i, msgAndArgs...) -} - -// NotZerof asserts that i is not the zero value for its type. -func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotZerof(a.t, i, msg, args...) -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panics(func(){ GoCrazy() }) -func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Panics(a.t, f, msgAndArgs...) -} - -// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) -func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithValue(a.t, expected, f, msgAndArgs...) -} - -// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithValuef(a.t, expected, f, msg, args...) -} - -// Panicsf asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Panicsf(a.t, f, msg, args...) -} - -// Regexp asserts that a specified regexp matches a string. -// -// a.Regexp(regexp.MustCompile("start"), "it's starting") -// a.Regexp("start...$", "it's not starting") -func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Regexp(a.t, rx, str, msgAndArgs...) -} - -// Regexpf asserts that a specified regexp matches a string. -// -// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") -// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") -func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Regexpf(a.t, rx, str, msg, args...) -} - -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") -func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Subset(a.t, list, subset, msgAndArgs...) -} - -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") -func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Subsetf(a.t, list, subset, msg, args...) -} - -// True asserts that the specified value is true. -// -// a.True(myBool) -func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return True(a.t, value, msgAndArgs...) -} - -// Truef asserts that the specified value is true. -// -// a.Truef(myBool, "error message %s", "formatted") -func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Truef(a.t, value, msg, args...) -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) -func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) -} - -// WithinDurationf asserts that the two times are within duration delta of each other. -// -// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") -func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return WithinDurationf(a.t, expected, actual, delta, msg, args...) -} - -// Zero asserts that i is the zero value for its type. -func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Zero(a.t, i, msgAndArgs...) -} - -// Zerof asserts that i is the zero value for its type. -func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Zerof(a.t, i, msg, args...) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl deleted file mode 100644 index 188bb9e..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -{{.CommentWithoutT "a"}} -func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { - if h, ok := a.t.(tHelper); ok { h.Helper() } - return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go deleted file mode 100644 index 5bdec56..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ /dev/null @@ -1,1394 +0,0 @@ -package assert - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "math" - "os" - "reflect" - "regexp" - "runtime" - "strings" - "time" - "unicode" - "unicode/utf8" - - "github.com/davecgh/go-spew/spew" - "github.com/pmezard/go-difflib/difflib" -) - -//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Errorf(format string, args ...interface{}) -} - -// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful -// for table driven tests. -type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool - -// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful -// for table driven tests. -type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool - -// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful -// for table driven tests. -type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool - -// ValuesAssertionFunc is a common function prototype when validating an error value. Can be useful -// for table driven tests. -type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool - -// Comparison a custom function that returns true on success and false on failure -type Comparison func() (success bool) - -/* - Helper functions -*/ - -// ObjectsAreEqual determines if two objects are considered equal. -// -// This function does no assertion of any kind. -func ObjectsAreEqual(expected, actual interface{}) bool { - if expected == nil || actual == nil { - return expected == actual - } - - exp, ok := expected.([]byte) - if !ok { - return reflect.DeepEqual(expected, actual) - } - - act, ok := actual.([]byte) - if !ok { - return false - } - if exp == nil || act == nil { - return exp == nil && act == nil - } - return bytes.Equal(exp, act) -} - -// ObjectsAreEqualValues gets whether two objects are equal, or if their -// values are equal. -func ObjectsAreEqualValues(expected, actual interface{}) bool { - if ObjectsAreEqual(expected, actual) { - return true - } - - actualType := reflect.TypeOf(actual) - if actualType == nil { - return false - } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { - // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) - } - - return false -} - -/* CallerInfo is necessary because the assert functions use the testing object -internally, causing it to print the file:line of the assert method, rather than where -the problem actually occurred in calling code.*/ - -// CallerInfo returns an array of strings containing the file and line number -// of each stack frame leading from the current test to the assert call that -// failed. -func CallerInfo() []string { - - pc := uintptr(0) - file := "" - line := 0 - ok := false - name := "" - - callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - // The breaks below failed to terminate the loop, and we ran off the - // end of the call stack. - break - } - - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } - - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() - - // testing.tRunner is the standard library function that calls - // tests. Subtests are called directly by tRunner, without going through - // the Test/Benchmark/Example function that contains the t.Run calls, so - // with subtests we should break when we hit tRunner, without adding it - // to the list of callers. - if name == "testing.tRunner" { - break - } - - parts := strings.Split(file, "/") - file = parts[len(parts)-1] - if len(parts) > 1 { - dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) - } - } - - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break - } - } - - return callers -} - -// Stolen from the `go test` tool. -// isTest tells whether name looks like a test (or benchmark, according to prefix). -// It is a Test (say) if there is a character after Test that is not a lower-case letter. -// We don't want TesticularCancer. -func isTest(name, prefix string) bool { - if !strings.HasPrefix(name, prefix) { - return false - } - if len(name) == len(prefix) { // "Test" is ok - return true - } - rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) - return !unicode.IsLower(rune) -} - -func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { - if len(msgAndArgs) == 0 || msgAndArgs == nil { - return "" - } - if len(msgAndArgs) == 1 { - return msgAndArgs[0].(string) - } - if len(msgAndArgs) > 1 { - return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) - } - return "" -} - -// Aligns the provided message so that all lines after the first line start at the same location as the first line. -// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the -// basis on which the alignment occurs). -func indentMessageLines(message string, longestLabelLen int) string { - outBuf := new(bytes.Buffer) - - for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { - // no need to align first line because it starts at the correct location (after the label) - if i != 0 { - // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab - outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t") - } - outBuf.WriteString(scanner.Text()) - } - - return outBuf.String() -} - -type failNower interface { - FailNow() -} - -// FailNow fails test -func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - Fail(t, failureMessage, msgAndArgs...) - - // We cannot extend TestingT with FailNow() and - // maintain backwards compatibility, so we fallback - // to panicking when FailNow is not available in - // TestingT. - // See issue #263 - - if t, ok := t.(failNower); ok { - t.FailNow() - } else { - panic("test failed and t is missing `FailNow()`") - } - return false -} - -// Fail reports a failure through -func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - content := []labeledContent{ - {"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")}, - {"Error", failureMessage}, - } - - // Add test name if the Go version supports it - if n, ok := t.(interface { - Name() string - }); ok { - content = append(content, labeledContent{"Test", n.Name()}) - } - - message := messageFromMsgAndArgs(msgAndArgs...) - if len(message) > 0 { - content = append(content, labeledContent{"Messages", message}) - } - - t.Errorf("\n%s", ""+labeledOutput(content...)) - - return false -} - -type labeledContent struct { - label string - content string -} - -// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: -// -// \t{{label}}:{{align_spaces}}\t{{content}}\n -// -// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. -// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this -// alignment is achieved, "\t{{content}}\n" is added for the output. -// -// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line. -func labeledOutput(content ...labeledContent) string { - longestLabel := 0 - for _, v := range content { - if len(v.label) > longestLabel { - longestLabel = len(v.label) - } - } - var output string - for _, v := range content { - output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n" - } - return output -} - -// Implements asserts that an object is implemented by the specified interface. -// -// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) -func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - interfaceType := reflect.TypeOf(interfaceObject).Elem() - - if object == nil { - return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...) - } - if !reflect.TypeOf(object).Implements(interfaceType) { - return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) - } - - return true -} - -// IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) - } - - return true -} - -// Equal asserts that two objects are equal. -// -// assert.Equal(t, 123, 123) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err := validateEqualArgs(expected, actual); err != nil { - return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)", - expected, actual, err), msgAndArgs...) - } - - if !ObjectsAreEqual(expected, actual) { - diff := diff(expected, actual) - expected, actual = formatUnequalValues(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: \n"+ - "expected: %s\n"+ - "actual : %s%s", expected, actual, diff), msgAndArgs...) - } - - return true - -} - -// formatUnequalValues takes two values of arbitrary types and returns string -// representations appropriate to be presented to the user. -// -// If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parenthesis similar -// to a type conversion in the Go grammar. -func formatUnequalValues(expected, actual interface{}) (e string, a string) { - if reflect.TypeOf(expected) != reflect.TypeOf(actual) { - return fmt.Sprintf("%T(%#v)", expected, expected), - fmt.Sprintf("%T(%#v)", actual, actual) - } - - return fmt.Sprintf("%#v", expected), - fmt.Sprintf("%#v", actual) -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValues(t, uint32(123), int32(123)) -func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if !ObjectsAreEqualValues(expected, actual) { - diff := diff(expected, actual) - expected, actual = formatUnequalValues(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: \n"+ - "expected: %s\n"+ - "actual : %s%s", expected, actual, diff), msgAndArgs...) - } - - return true - -} - -// Exactly asserts that two objects are equal in value and type. -// -// assert.Exactly(t, int32(123), int64(123)) -func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - aType := reflect.TypeOf(expected) - bType := reflect.TypeOf(actual) - - if aType != bType { - return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) - } - - return Equal(t, expected, actual, msgAndArgs...) - -} - -// NotNil asserts that the specified object is not nil. -// -// assert.NotNil(t, err) -func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if !isNil(object) { - return true - } - return Fail(t, "Expected value not to be nil.", msgAndArgs...) -} - -// isNil checks if a specified object is nil or not, without Failing. -func isNil(object interface{}) bool { - if object == nil { - return true - } - - value := reflect.ValueOf(object) - kind := value.Kind() - if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { - return true - } - - return false -} - -// Nil asserts that the specified object is nil. -// -// assert.Nil(t, err) -func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if isNil(object) { - return true - } - return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) -} - -// isEmpty gets whether the specified object is considered empty or not. -func isEmpty(object interface{}) bool { - - // get nil case out of the way - if object == nil { - return true - } - - objValue := reflect.ValueOf(object) - - switch objValue.Kind() { - // collection types are empty when they have no element - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty - case reflect.Ptr: - if objValue.IsNil() { - return true - } - deref := objValue.Elem().Interface() - return isEmpty(deref) - // for all other types, compare against the zero value - default: - zero := reflect.Zero(objValue.Type()) - return reflect.DeepEqual(object, zero.Interface()) - } -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Empty(t, obj) -func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - pass := isEmpty(object) - if !pass { - Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) -// } -func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - pass := !isEmpty(object) - if !pass { - Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { - v := reflect.ValueOf(x) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - return true, v.Len() -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// assert.Len(t, mySlice, 3) -func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - ok, l := getLen(object) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) - } - - if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) - } - return true -} - -// True asserts that the specified value is true. -// -// assert.True(t, myBool) -func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if h, ok := t.(interface { - Helper() - }); ok { - h.Helper() - } - - if value != true { - return Fail(t, "Should be true", msgAndArgs...) - } - - return true - -} - -// False asserts that the specified value is false. -// -// assert.False(t, myBool) -func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if value != false { - return Fail(t, "Should be false", msgAndArgs...) - } - - return true - -} - -// NotEqual asserts that the specified values are NOT equal. -// -// assert.NotEqual(t, obj1, obj2) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err := validateEqualArgs(expected, actual); err != nil { - return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)", - expected, actual, err), msgAndArgs...) - } - - if ObjectsAreEqual(expected, actual) { - return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) - } - - return true - -} - -// containsElement try loop over the list check if the list includes the element. -// return (false, false) if impossible. -// return (true, false) if element was not found. -// return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { - - listValue := reflect.ValueOf(list) - elementValue := reflect.ValueOf(element) - defer func() { - if e := recover(); e != nil { - ok = false - found = false - } - }() - - if reflect.TypeOf(list).Kind() == reflect.String { - return true, strings.Contains(listValue.String(), elementValue.String()) - } - - if reflect.TypeOf(list).Kind() == reflect.Map { - mapKeys := listValue.MapKeys() - for i := 0; i < len(mapKeys); i++ { - if ObjectsAreEqual(mapKeys[i].Interface(), element) { - return true, true - } - } - return true, false - } - - for i := 0; i < listValue.Len(); i++ { - if ObjectsAreEqual(listValue.Index(i).Interface(), element) { - return true, true - } - } - return true, false - -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// assert.Contains(t, "Hello World", "World") -// assert.Contains(t, ["Hello", "World"], "World") -// assert.Contains(t, {"Hello": "World"}, "Hello") -func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// assert.NotContains(t, "Hello World", "Earth") -// assert.NotContains(t, ["Hello", "World"], "Earth") -// assert.NotContains(t, {"Hello": "World"}, "Earth") -func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if found { - return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") -func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if subset == nil { - return true // we consider nil to be equal to the nil set - } - - subsetValue := reflect.ValueOf(subset) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - - if listKind != reflect.Array && listKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) - } - - if subsetKind != reflect.Array && subsetKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) - } - - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) - } - } - - return true -} - -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") -func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if subset == nil { - return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) - } - - subsetValue := reflect.ValueOf(subset) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - - if listKind != reflect.Array && listKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) - } - - if subsetKind != reflect.Array && subsetKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) - } - - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) - } - if !found { - return true - } - } - - return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) -} - -// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) -func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if isEmpty(listA) && isEmpty(listB) { - return true - } - - aKind := reflect.TypeOf(listA).Kind() - bKind := reflect.TypeOf(listB).Kind() - - if aKind != reflect.Array && aKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...) - } - - if bKind != reflect.Array && bKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...) - } - - aValue := reflect.ValueOf(listA) - bValue := reflect.ValueOf(listB) - - aLen := aValue.Len() - bLen := bValue.Len() - - if aLen != bLen { - return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...) - } - - // Mark indexes in bValue that we already used - visited := make([]bool, bLen) - for i := 0; i < aLen; i++ { - element := aValue.Index(i).Interface() - found := false - for j := 0; j < bLen; j++ { - if visited[j] { - continue - } - if ObjectsAreEqual(bValue.Index(j).Interface(), element) { - visited[j] = true - found = true - break - } - } - if !found { - return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...) - } - } - - return true -} - -// Condition uses a Comparison to assert a complex condition. -func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - result := comp() - if !result { - Fail(t, "Condition failed!", msgAndArgs...) - } - return result -} - -// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics -// methods, and represents a simple func that takes no arguments, and returns nothing. -type PanicTestFunc func() - -// didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}) { - - didPanic := false - var message interface{} - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - } - }() - - // call the target function - f() - - }() - - return didPanic, message - -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panics(t, func(){ GoCrazy() }) -func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) - } - - return true -} - -// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) -func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - funcDidPanic, panicValue := didPanic(f) - if !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) - } - if panicValue != expected { - return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v", f, expected, panicValue), msgAndArgs...) - } - - return true -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanics(t, func(){ RemainCalm() }) -func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if funcDidPanic, panicValue := didPanic(f); funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v", f, panicValue), msgAndArgs...) - } - - return true -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) -func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - dt := expected.Sub(actual) - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -func toFloat(x interface{}) (float64, bool) { - var xf float64 - xok := true - - switch xn := x.(type) { - case uint8: - xf = float64(xn) - case uint16: - xf = float64(xn) - case uint32: - xf = float64(xn) - case uint64: - xf = float64(xn) - case int: - xf = float64(xn) - case int8: - xf = float64(xn) - case int16: - xf = float64(xn) - case int32: - xf = float64(xn) - case int64: - xf = float64(xn) - case float32: - xf = float64(xn) - case float64: - xf = float64(xn) - case time.Duration: - xf = float64(xn) - default: - xok = false - } - - return xf, xok -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) -func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - af, aok := toFloat(expected) - bf, bok := toFloat(actual) - - if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) - } - - if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) - } - - if math.IsNaN(bf) { - return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) - } - - dt := af - bf - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...) - if !result { - return result - } - } - - return true -} - -// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Map || - reflect.TypeOf(expected).Kind() != reflect.Map { - return Fail(t, "Arguments must be maps", msgAndArgs...) - } - - expectedMap := reflect.ValueOf(expected) - actualMap := reflect.ValueOf(actual) - - if expectedMap.Len() != actualMap.Len() { - return Fail(t, "Arguments must have the same number of keys", msgAndArgs...) - } - - for _, k := range expectedMap.MapKeys() { - ev := expectedMap.MapIndex(k) - av := actualMap.MapIndex(k) - - if !ev.IsValid() { - return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...) - } - - if !av.IsValid() { - return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...) - } - - if !InDelta( - t, - ev.Interface(), - av.Interface(), - delta, - msgAndArgs..., - ) { - return false - } - } - - return true -} - -func calcRelativeError(expected, actual interface{}) (float64, error) { - af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) - } - if af == 0 { - return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") - } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) - } - - return math.Abs(af-bf) / math.Abs(af), nil -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - actualEpsilon, err := calcRelativeError(expected, actual) - if err != nil { - return Fail(t, err.Error(), msgAndArgs...) - } - if actualEpsilon > epsilon { - return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ - " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) - } - - return true -} - -// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. -func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result - } - } - - return true -} - -/* - Errors -*/ - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, expectedObj, actualObj) -// } -func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err != nil { - return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) - } - - return true -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } -func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if err == nil { - return Fail(t, "An error is expected but got nil.", msgAndArgs...) - } - - return true -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString) -func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if !Error(t, theError, msgAndArgs...) { - return false - } - expected := errString - actual := theError.Error() - // don't need to use deep equals here, we know they are both strings - if expected != actual { - return Fail(t, fmt.Sprintf("Error message not equal:\n"+ - "expected: %q\n"+ - "actual : %q", expected, actual), msgAndArgs...) - } - return true -} - -// matchRegexp return true if a specified regexp matches a string. -func matchRegexp(rx interface{}, str interface{}) bool { - - var r *regexp.Regexp - if rr, ok := rx.(*regexp.Regexp); ok { - r = rr - } else { - r = regexp.MustCompile(fmt.Sprint(rx)) - } - - return (r.FindStringIndex(fmt.Sprint(str)) != nil) - -} - -// Regexp asserts that a specified regexp matches a string. -// -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") -func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - match := matchRegexp(rx, str) - - if !match { - Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) - } - - return match -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") -func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - match := matchRegexp(rx, str) - - if match { - Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) - } - - return !match - -} - -// Zero asserts that i is the zero value for its type. -func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// NotZero asserts that i is not the zero value for its type. -func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. -func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - info, err := os.Lstat(path) - if err != nil { - if os.IsNotExist(err) { - return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) - } - return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) - } - if info.IsDir() { - return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...) - } - return true -} - -// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. -func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - info, err := os.Lstat(path) - if err != nil { - if os.IsNotExist(err) { - return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) - } - return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) - } - if !info.IsDir() { - return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...) - } - return true -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - var expectedJSONAsInterface, actualJSONAsInterface interface{} - - if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) - } - - if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) - } - - return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) -} - -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice or array. Otherwise it returns an empty string. -func diff(expected interface{}, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - if ek != reflect.String { - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } else { - e = expected.(string) - a = actual.(string) - } - - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), - FromFile: "Expected", - FromDate: "", - ToFile: "Actual", - ToDate: "", - Context: 1, - }) - - return "\n\nDiff:\n" + diff -} - -// validateEqualArgs checks whether provided arguments can be safely used in the -// Equal/NotEqual functions. -func validateEqualArgs(expected, actual interface{}) error { - if isFunction(expected) || isFunction(actual) { - return errors.New("cannot take func type as argument") - } - return nil -} - -func isFunction(arg interface{}) bool { - if arg == nil { - return false - } - return reflect.TypeOf(arg).Kind() == reflect.Func -} - -var spewConfig = spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, -} - -type tHelper interface { - Helper() -} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go deleted file mode 100644 index c9dccc4..0000000 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. -// -// Example Usage -// -// The following is a complete example using assert in a standard test function: -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(t, a, b, "The two words should be the same.") -// -// } -// -// if you assert many times, use the format below: -// -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// assert := assert.New(t) -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(a, b, "The two words should be the same.") -// } -// -// Assertions -// -// Assertions allow you to easily write test code, and are global funcs in the `assert` package. -// All assertion functions take, as the first argument, the `*testing.T` object provided by the -// testing framework. This allows the assertion funcs to write the failings and other details to -// the correct place. -// -// Every assertion function also takes an optional string message as the final argument, -// allowing custom error messages to be appended to the message the assertion method outputs. -package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go deleted file mode 100644 index ac9dc9d..0000000 --- a/vendor/github.com/stretchr/testify/assert/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package assert - -import ( - "errors" -) - -// AnError is an error instance useful for testing. If the code does not care -// about error specifics, and only needs to return the error for example, this -// error should be used to make the test code more readable. -var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go deleted file mode 100644 index 9ad5685..0000000 --- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go +++ /dev/null @@ -1,16 +0,0 @@ -package assert - -// Assertions provides assertion methods around the -// TestingT interface. -type Assertions struct { - t TestingT -} - -// New makes a new Assertions object for the specified TestingT. -func New(t TestingT) *Assertions { - return &Assertions{ - t: t, - } -} - -//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go deleted file mode 100644 index df46fa7..0000000 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ /dev/null @@ -1,143 +0,0 @@ -package assert - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "strings" -) - -// httpCode is a helper that returns HTTP code of the response. It returns -1 and -// an error if building a new request fails. -func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { - w := httptest.NewRecorder() - req, err := http.NewRequest(method, url, nil) - if err != nil { - return -1, err - } - req.URL.RawQuery = values.Encode() - handler(w, req) - return w.Code, nil -} - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - return false - } - - isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent - if !isSuccessCode { - Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) - } - - return isSuccessCode -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - return false - } - - isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect - if !isRedirectCode { - Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) - } - - return isRedirectCode -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - return false - } - - isErrorCode := code >= http.StatusBadRequest - if !isErrorCode { - Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) - } - - return isErrorCode -} - -// HTTPBody is a helper that returns HTTP body of the response. It returns -// empty string if building a new request fails. -func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { - w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) - if err != nil { - return "" - } - handler(w, req) - return w.Body.String() -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - body := HTTPBody(handler, method, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) - } - - return contains -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - body := HTTPBody(handler, method, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) - } - - return !contains -} diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS deleted file mode 100644 index 2b00ddb..0000000 --- a/vendor/golang.org/x/crypto/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS deleted file mode 100644 index 1fbd3e9..0000000 --- a/vendor/golang.org/x/crypto/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE deleted file mode 100644 index 6a66aea..0000000 --- a/vendor/golang.org/x/crypto/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS deleted file mode 100644 index 7330990..0000000 --- a/vendor/golang.org/x/crypto/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go deleted file mode 100644 index 5edc9c9..0000000 --- a/vendor/golang.org/x/crypto/ocsp/ocsp.go +++ /dev/null @@ -1,781 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses -// are signed messages attesting to the validity of a certificate for a small -// period of time. This is used to manage revocation for X.509 certificates. -package ocsp // import "golang.org/x/crypto/ocsp" - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - "fmt" - "math/big" - "strconv" - "time" -) - -var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1}) - -// ResponseStatus contains the result of an OCSP request. See -// https://tools.ietf.org/html/rfc6960#section-2.3 -type ResponseStatus int - -const ( - Success ResponseStatus = 0 - Malformed ResponseStatus = 1 - InternalError ResponseStatus = 2 - TryLater ResponseStatus = 3 - // Status code four is unused in OCSP. See - // https://tools.ietf.org/html/rfc6960#section-4.2.1 - SignatureRequired ResponseStatus = 5 - Unauthorized ResponseStatus = 6 -) - -func (r ResponseStatus) String() string { - switch r { - case Success: - return "success" - case Malformed: - return "malformed" - case InternalError: - return "internal error" - case TryLater: - return "try later" - case SignatureRequired: - return "signature required" - case Unauthorized: - return "unauthorized" - default: - return "unknown OCSP status: " + strconv.Itoa(int(r)) - } -} - -// ResponseError is an error that may be returned by ParseResponse to indicate -// that the response itself is an error, not just that its indicating that a -// certificate is revoked, unknown, etc. -type ResponseError struct { - Status ResponseStatus -} - -func (r ResponseError) Error() string { - return "ocsp: error from server: " + r.Status.String() -} - -// These are internal structures that reflect the ASN.1 structure of an OCSP -// response. See RFC 2560, section 4.2. - -type certID struct { - HashAlgorithm pkix.AlgorithmIdentifier - NameHash []byte - IssuerKeyHash []byte - SerialNumber *big.Int -} - -// https://tools.ietf.org/html/rfc2560#section-4.1.1 -type ocspRequest struct { - TBSRequest tbsRequest -} - -type tbsRequest struct { - Version int `asn1:"explicit,tag:0,default:0,optional"` - RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"` - RequestList []request -} - -type request struct { - Cert certID -} - -type responseASN1 struct { - Status asn1.Enumerated - Response responseBytes `asn1:"explicit,tag:0,optional"` -} - -type responseBytes struct { - ResponseType asn1.ObjectIdentifier - Response []byte -} - -type basicResponse struct { - TBSResponseData responseData - SignatureAlgorithm pkix.AlgorithmIdentifier - Signature asn1.BitString - Certificates []asn1.RawValue `asn1:"explicit,tag:0,optional"` -} - -type responseData struct { - Raw asn1.RawContent - Version int `asn1:"optional,default:0,explicit,tag:0"` - RawResponderID asn1.RawValue - ProducedAt time.Time `asn1:"generalized"` - Responses []singleResponse -} - -type singleResponse struct { - CertID certID - Good asn1.Flag `asn1:"tag:0,optional"` - Revoked revokedInfo `asn1:"tag:1,optional"` - Unknown asn1.Flag `asn1:"tag:2,optional"` - ThisUpdate time.Time `asn1:"generalized"` - NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"` - SingleExtensions []pkix.Extension `asn1:"explicit,tag:1,optional"` -} - -type revokedInfo struct { - RevocationTime time.Time `asn1:"generalized"` - Reason asn1.Enumerated `asn1:"explicit,tag:0,optional"` -} - -var ( - oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} - oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} - oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} - oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} - oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} - oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} - oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} - oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} - oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} - oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} - oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} - oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} -) - -var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{ - crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}), - crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}), - crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}), - crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}), -} - -// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below -var signatureAlgorithmDetails = []struct { - algo x509.SignatureAlgorithm - oid asn1.ObjectIdentifier - pubKeyAlgo x509.PublicKeyAlgorithm - hash crypto.Hash -}{ - {x509.MD2WithRSA, oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */}, - {x509.MD5WithRSA, oidSignatureMD5WithRSA, x509.RSA, crypto.MD5}, - {x509.SHA1WithRSA, oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1}, - {x509.SHA256WithRSA, oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256}, - {x509.SHA384WithRSA, oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384}, - {x509.SHA512WithRSA, oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512}, - {x509.DSAWithSHA1, oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1}, - {x509.DSAWithSHA256, oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256}, - {x509.ECDSAWithSHA1, oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1}, - {x509.ECDSAWithSHA256, oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256}, - {x509.ECDSAWithSHA384, oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384}, - {x509.ECDSAWithSHA512, oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512}, -} - -// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below -func signingParamsForPublicKey(pub interface{}, requestedSigAlgo x509.SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) { - var pubType x509.PublicKeyAlgorithm - - switch pub := pub.(type) { - case *rsa.PublicKey: - pubType = x509.RSA - hashFunc = crypto.SHA256 - sigAlgo.Algorithm = oidSignatureSHA256WithRSA - sigAlgo.Parameters = asn1.RawValue{ - Tag: 5, - } - - case *ecdsa.PublicKey: - pubType = x509.ECDSA - - switch pub.Curve { - case elliptic.P224(), elliptic.P256(): - hashFunc = crypto.SHA256 - sigAlgo.Algorithm = oidSignatureECDSAWithSHA256 - case elliptic.P384(): - hashFunc = crypto.SHA384 - sigAlgo.Algorithm = oidSignatureECDSAWithSHA384 - case elliptic.P521(): - hashFunc = crypto.SHA512 - sigAlgo.Algorithm = oidSignatureECDSAWithSHA512 - default: - err = errors.New("x509: unknown elliptic curve") - } - - default: - err = errors.New("x509: only RSA and ECDSA keys supported") - } - - if err != nil { - return - } - - if requestedSigAlgo == 0 { - return - } - - found := false - for _, details := range signatureAlgorithmDetails { - if details.algo == requestedSigAlgo { - if details.pubKeyAlgo != pubType { - err = errors.New("x509: requested SignatureAlgorithm does not match private key type") - return - } - sigAlgo.Algorithm, hashFunc = details.oid, details.hash - if hashFunc == 0 { - err = errors.New("x509: cannot sign with hash function requested") - return - } - found = true - break - } - } - - if !found { - err = errors.New("x509: unknown SignatureAlgorithm") - } - - return -} - -// TODO(agl): this is taken from crypto/x509 and so should probably be exported -// from crypto/x509 or crypto/x509/pkix. -func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) x509.SignatureAlgorithm { - for _, details := range signatureAlgorithmDetails { - if oid.Equal(details.oid) { - return details.algo - } - } - return x509.UnknownSignatureAlgorithm -} - -// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form. -func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash { - for hash, oid := range hashOIDs { - if oid.Equal(target) { - return hash - } - } - return crypto.Hash(0) -} - -func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier { - for hash, oid := range hashOIDs { - if hash == target { - return oid - } - } - return nil -} - -// This is the exposed reflection of the internal OCSP structures. - -// The status values that can be expressed in OCSP. See RFC 6960. -const ( - // Good means that the certificate is valid. - Good = iota - // Revoked means that the certificate has been deliberately revoked. - Revoked - // Unknown means that the OCSP responder doesn't know about the certificate. - Unknown - // ServerFailed is unused and was never used (see - // https://go-review.googlesource.com/#/c/18944). ParseResponse will - // return a ResponseError when an error response is parsed. - ServerFailed -) - -// The enumerated reasons for revoking a certificate. See RFC 5280. -const ( - Unspecified = 0 - KeyCompromise = 1 - CACompromise = 2 - AffiliationChanged = 3 - Superseded = 4 - CessationOfOperation = 5 - CertificateHold = 6 - - RemoveFromCRL = 8 - PrivilegeWithdrawn = 9 - AACompromise = 10 -) - -// Request represents an OCSP request. See RFC 6960. -type Request struct { - HashAlgorithm crypto.Hash - IssuerNameHash []byte - IssuerKeyHash []byte - SerialNumber *big.Int -} - -// Marshal marshals the OCSP request to ASN.1 DER encoded form. -func (req *Request) Marshal() ([]byte, error) { - hashAlg := getOIDFromHashAlgorithm(req.HashAlgorithm) - if hashAlg == nil { - return nil, errors.New("Unknown hash algorithm") - } - return asn1.Marshal(ocspRequest{ - tbsRequest{ - Version: 0, - RequestList: []request{ - { - Cert: certID{ - pkix.AlgorithmIdentifier{ - Algorithm: hashAlg, - Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, - }, - req.IssuerNameHash, - req.IssuerKeyHash, - req.SerialNumber, - }, - }, - }, - }, - }) -} - -// Response represents an OCSP response containing a single SingleResponse. See -// RFC 6960. -type Response struct { - // Status is one of {Good, Revoked, Unknown} - Status int - SerialNumber *big.Int - ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time - RevocationReason int - Certificate *x509.Certificate - // TBSResponseData contains the raw bytes of the signed response. If - // Certificate is nil then this can be used to verify Signature. - TBSResponseData []byte - Signature []byte - SignatureAlgorithm x509.SignatureAlgorithm - - // IssuerHash is the hash used to compute the IssuerNameHash and IssuerKeyHash. - // Valid values are crypto.SHA1, crypto.SHA256, crypto.SHA384, and crypto.SHA512. - // If zero, the default is crypto.SHA1. - IssuerHash crypto.Hash - - // RawResponderName optionally contains the DER-encoded subject of the - // responder certificate. Exactly one of RawResponderName and - // ResponderKeyHash is set. - RawResponderName []byte - // ResponderKeyHash optionally contains the SHA-1 hash of the - // responder's public key. Exactly one of RawResponderName and - // ResponderKeyHash is set. - ResponderKeyHash []byte - - // Extensions contains raw X.509 extensions from the singleExtensions field - // of the OCSP response. When parsing certificates, this can be used to - // extract non-critical extensions that are not parsed by this package. When - // marshaling OCSP responses, the Extensions field is ignored, see - // ExtraExtensions. - Extensions []pkix.Extension - - // ExtraExtensions contains extensions to be copied, raw, into any marshaled - // OCSP response (in the singleExtensions field). Values override any - // extensions that would otherwise be produced based on the other fields. The - // ExtraExtensions field is not populated when parsing certificates, see - // Extensions. - ExtraExtensions []pkix.Extension -} - -// These are pre-serialized error responses for the various non-success codes -// defined by OCSP. The Unauthorized code in particular can be used by an OCSP -// responder that supports only pre-signed responses as a response to requests -// for certificates with unknown status. See RFC 5019. -var ( - MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01} - InternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02} - TryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03} - SigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05} - UnauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06} -) - -// CheckSignatureFrom checks that the signature in resp is a valid signature -// from issuer. This should only be used if resp.Certificate is nil. Otherwise, -// the OCSP response contained an intermediate certificate that created the -// signature. That signature is checked by ParseResponse and only -// resp.Certificate remains to be validated. -func (resp *Response) CheckSignatureFrom(issuer *x509.Certificate) error { - return issuer.CheckSignature(resp.SignatureAlgorithm, resp.TBSResponseData, resp.Signature) -} - -// ParseError results from an invalid OCSP response. -type ParseError string - -func (p ParseError) Error() string { - return string(p) -} - -// ParseRequest parses an OCSP request in DER form. It only supports -// requests for a single certificate. Signed requests are not supported. -// If a request includes a signature, it will result in a ParseError. -func ParseRequest(bytes []byte) (*Request, error) { - var req ocspRequest - rest, err := asn1.Unmarshal(bytes, &req) - if err != nil { - return nil, err - } - if len(rest) > 0 { - return nil, ParseError("trailing data in OCSP request") - } - - if len(req.TBSRequest.RequestList) == 0 { - return nil, ParseError("OCSP request contains no request body") - } - innerRequest := req.TBSRequest.RequestList[0] - - hashFunc := getHashAlgorithmFromOID(innerRequest.Cert.HashAlgorithm.Algorithm) - if hashFunc == crypto.Hash(0) { - return nil, ParseError("OCSP request uses unknown hash function") - } - - return &Request{ - HashAlgorithm: hashFunc, - IssuerNameHash: innerRequest.Cert.NameHash, - IssuerKeyHash: innerRequest.Cert.IssuerKeyHash, - SerialNumber: innerRequest.Cert.SerialNumber, - }, nil -} - -// ParseResponse parses an OCSP response in DER form. It only supports -// responses for a single certificate. If the response contains a certificate -// then the signature over the response is checked. If issuer is not nil then -// it will be used to validate the signature or embedded certificate. -// -// Invalid responses and parse failures will result in a ParseError. -// Error responses will result in a ResponseError. -func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) { - return ParseResponseForCert(bytes, nil, issuer) -} - -// ParseResponseForCert parses an OCSP response in DER form and searches for a -// Response relating to cert. If such a Response is found and the OCSP response -// contains a certificate then the signature over the response is checked. If -// issuer is not nil then it will be used to validate the signature or embedded -// certificate. -// -// Invalid responses and parse failures will result in a ParseError. -// Error responses will result in a ResponseError. -func ParseResponseForCert(bytes []byte, cert, issuer *x509.Certificate) (*Response, error) { - var resp responseASN1 - rest, err := asn1.Unmarshal(bytes, &resp) - if err != nil { - return nil, err - } - if len(rest) > 0 { - return nil, ParseError("trailing data in OCSP response") - } - - if status := ResponseStatus(resp.Status); status != Success { - return nil, ResponseError{status} - } - - if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) { - return nil, ParseError("bad OCSP response type") - } - - var basicResp basicResponse - rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp) - if err != nil { - return nil, err - } - - if n := len(basicResp.TBSResponseData.Responses); n == 0 || cert == nil && n > 1 { - return nil, ParseError("OCSP response contains bad number of responses") - } - - var singleResp singleResponse - if cert == nil { - singleResp = basicResp.TBSResponseData.Responses[0] - } else { - match := false - for _, resp := range basicResp.TBSResponseData.Responses { - if cert.SerialNumber.Cmp(resp.CertID.SerialNumber) == 0 { - singleResp = resp - match = true - break - } - } - if !match { - return nil, ParseError("no response matching the supplied certificate") - } - } - - ret := &Response{ - TBSResponseData: basicResp.TBSResponseData.Raw, - Signature: basicResp.Signature.RightAlign(), - SignatureAlgorithm: getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm), - Extensions: singleResp.SingleExtensions, - SerialNumber: singleResp.CertID.SerialNumber, - ProducedAt: basicResp.TBSResponseData.ProducedAt, - ThisUpdate: singleResp.ThisUpdate, - NextUpdate: singleResp.NextUpdate, - } - - // Handle the ResponderID CHOICE tag. ResponderID can be flattened into - // TBSResponseData once https://go-review.googlesource.com/34503 has been - // released. - rawResponderID := basicResp.TBSResponseData.RawResponderID - switch rawResponderID.Tag { - case 1: // Name - var rdn pkix.RDNSequence - if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &rdn); err != nil || len(rest) != 0 { - return nil, ParseError("invalid responder name") - } - ret.RawResponderName = rawResponderID.Bytes - case 2: // KeyHash - if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &ret.ResponderKeyHash); err != nil || len(rest) != 0 { - return nil, ParseError("invalid responder key hash") - } - default: - return nil, ParseError("invalid responder id tag") - } - - if len(basicResp.Certificates) > 0 { - // Responders should only send a single certificate (if they - // send any) that connects the responder's certificate to the - // original issuer. We accept responses with multiple - // certificates due to a number responders sending them[1], but - // ignore all but the first. - // - // [1] https://github.com/golang/go/issues/21527 - ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes) - if err != nil { - return nil, err - } - - if err := ret.CheckSignatureFrom(ret.Certificate); err != nil { - return nil, ParseError("bad signature on embedded certificate: " + err.Error()) - } - - if issuer != nil { - if err := issuer.CheckSignature(ret.Certificate.SignatureAlgorithm, ret.Certificate.RawTBSCertificate, ret.Certificate.Signature); err != nil { - return nil, ParseError("bad OCSP signature: " + err.Error()) - } - } - } else if issuer != nil { - if err := ret.CheckSignatureFrom(issuer); err != nil { - return nil, ParseError("bad OCSP signature: " + err.Error()) - } - } - - for _, ext := range singleResp.SingleExtensions { - if ext.Critical { - return nil, ParseError("unsupported critical extension") - } - } - - for h, oid := range hashOIDs { - if singleResp.CertID.HashAlgorithm.Algorithm.Equal(oid) { - ret.IssuerHash = h - break - } - } - if ret.IssuerHash == 0 { - return nil, ParseError("unsupported issuer hash algorithm") - } - - switch { - case bool(singleResp.Good): - ret.Status = Good - case bool(singleResp.Unknown): - ret.Status = Unknown - default: - ret.Status = Revoked - ret.RevokedAt = singleResp.Revoked.RevocationTime - ret.RevocationReason = int(singleResp.Revoked.Reason) - } - - return ret, nil -} - -// RequestOptions contains options for constructing OCSP requests. -type RequestOptions struct { - // Hash contains the hash function that should be used when - // constructing the OCSP request. If zero, SHA-1 will be used. - Hash crypto.Hash -} - -func (opts *RequestOptions) hash() crypto.Hash { - if opts == nil || opts.Hash == 0 { - // SHA-1 is nearly universally used in OCSP. - return crypto.SHA1 - } - return opts.Hash -} - -// CreateRequest returns a DER-encoded, OCSP request for the status of cert. If -// opts is nil then sensible defaults are used. -func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte, error) { - hashFunc := opts.hash() - - // OCSP seems to be the only place where these raw hash identifiers are - // used. I took the following from - // http://msdn.microsoft.com/en-us/library/ff635603.aspx - _, ok := hashOIDs[hashFunc] - if !ok { - return nil, x509.ErrUnsupportedAlgorithm - } - - if !hashFunc.Available() { - return nil, x509.ErrUnsupportedAlgorithm - } - h := opts.hash().New() - - var publicKeyInfo struct { - Algorithm pkix.AlgorithmIdentifier - PublicKey asn1.BitString - } - if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { - return nil, err - } - - h.Write(publicKeyInfo.PublicKey.RightAlign()) - issuerKeyHash := h.Sum(nil) - - h.Reset() - h.Write(issuer.RawSubject) - issuerNameHash := h.Sum(nil) - - req := &Request{ - HashAlgorithm: hashFunc, - IssuerNameHash: issuerNameHash, - IssuerKeyHash: issuerKeyHash, - SerialNumber: cert.SerialNumber, - } - return req.Marshal() -} - -// CreateResponse returns a DER-encoded OCSP response with the specified contents. -// The fields in the response are populated as follows: -// -// The responder cert is used to populate the responder's name field, and the -// certificate itself is provided alongside the OCSP response signature. -// -// The issuer cert is used to puplate the IssuerNameHash and IssuerKeyHash fields. -// -// The template is used to populate the SerialNumber, Status, RevokedAt, -// RevocationReason, ThisUpdate, and NextUpdate fields. -// -// If template.IssuerHash is not set, SHA1 will be used. -// -// The ProducedAt date is automatically set to the current date, to the nearest minute. -func CreateResponse(issuer, responderCert *x509.Certificate, template Response, priv crypto.Signer) ([]byte, error) { - var publicKeyInfo struct { - Algorithm pkix.AlgorithmIdentifier - PublicKey asn1.BitString - } - if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { - return nil, err - } - - if template.IssuerHash == 0 { - template.IssuerHash = crypto.SHA1 - } - hashOID := getOIDFromHashAlgorithm(template.IssuerHash) - if hashOID == nil { - return nil, errors.New("unsupported issuer hash algorithm") - } - - if !template.IssuerHash.Available() { - return nil, fmt.Errorf("issuer hash algorithm %v not linked into binary", template.IssuerHash) - } - h := template.IssuerHash.New() - h.Write(publicKeyInfo.PublicKey.RightAlign()) - issuerKeyHash := h.Sum(nil) - - h.Reset() - h.Write(issuer.RawSubject) - issuerNameHash := h.Sum(nil) - - innerResponse := singleResponse{ - CertID: certID{ - HashAlgorithm: pkix.AlgorithmIdentifier{ - Algorithm: hashOID, - Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, - }, - NameHash: issuerNameHash, - IssuerKeyHash: issuerKeyHash, - SerialNumber: template.SerialNumber, - }, - ThisUpdate: template.ThisUpdate.UTC(), - NextUpdate: template.NextUpdate.UTC(), - SingleExtensions: template.ExtraExtensions, - } - - switch template.Status { - case Good: - innerResponse.Good = true - case Unknown: - innerResponse.Unknown = true - case Revoked: - innerResponse.Revoked = revokedInfo{ - RevocationTime: template.RevokedAt.UTC(), - Reason: asn1.Enumerated(template.RevocationReason), - } - } - - rawResponderID := asn1.RawValue{ - Class: 2, // context-specific - Tag: 1, // Name (explicit tag) - IsCompound: true, - Bytes: responderCert.RawSubject, - } - tbsResponseData := responseData{ - Version: 0, - RawResponderID: rawResponderID, - ProducedAt: time.Now().Truncate(time.Minute).UTC(), - Responses: []singleResponse{innerResponse}, - } - - tbsResponseDataDER, err := asn1.Marshal(tbsResponseData) - if err != nil { - return nil, err - } - - hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm) - if err != nil { - return nil, err - } - - responseHash := hashFunc.New() - responseHash.Write(tbsResponseDataDER) - signature, err := priv.Sign(rand.Reader, responseHash.Sum(nil), hashFunc) - if err != nil { - return nil, err - } - - response := basicResponse{ - TBSResponseData: tbsResponseData, - SignatureAlgorithm: signatureAlgorithm, - Signature: asn1.BitString{ - Bytes: signature, - BitLength: 8 * len(signature), - }, - } - if template.Certificate != nil { - response.Certificates = []asn1.RawValue{ - {FullBytes: template.Certificate.Raw}, - } - } - responseDER, err := asn1.Marshal(response) - if err != nil { - return nil, err - } - - return asn1.Marshal(responseASN1{ - Status: asn1.Enumerated(Success), - Response: responseBytes{ - ResponseType: idPKIXOCSPBasic, - Response: responseDER, - }, - }) -} diff --git a/vendor/gopkg.in/yaml.v1/LICENSE b/vendor/gopkg.in/yaml.v1/LICENSE deleted file mode 100644 index a68e67f..0000000 --- a/vendor/gopkg.in/yaml.v1/LICENSE +++ /dev/null @@ -1,188 +0,0 @@ - -Copyright (c) 2011-2014 - Canonical Inc. - -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/vendor/gopkg.in/yaml.v1/LICENSE.libyaml b/vendor/gopkg.in/yaml.v1/LICENSE.libyaml deleted file mode 100644 index 8da58fb..0000000 --- a/vendor/gopkg.in/yaml.v1/LICENSE.libyaml +++ /dev/null @@ -1,31 +0,0 @@ -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v1/README.md b/vendor/gopkg.in/yaml.v1/README.md deleted file mode 100644 index af07056..0000000 --- a/vendor/gopkg.in/yaml.v1/README.md +++ /dev/null @@ -1,128 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.1 and 1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v1*. - -To install it, run: - - go get gopkg.in/yaml.v1 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - * [https://gopkg.in/yaml.v1](https://gopkg.in/yaml.v1) - -API stability -------------- - -The package API for yaml v1 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v1" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -type T struct { - A string - B struct{C int; D []int ",flow"} -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/vendor/gopkg.in/yaml.v1/apic.go b/vendor/gopkg.in/yaml.v1/apic.go deleted file mode 100644 index 95ec014..0000000 --- a/vendor/gopkg.in/yaml.v1/apic.go +++ /dev/null @@ -1,742 +0,0 @@ -package yaml - -import ( - "io" - "os" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// File read handler. -func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_file.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_file_read_handler - parser.input_file = file -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - } - return true -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// File write handler. -func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_file.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_file_write_handler - emitter.output_file = file -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -//// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } - return true -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } - return true -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, implicit bool) bool { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } - return true -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } - return true -} - -///* -// * Create ALIAS. -// */ -// -//YAML_DECLARE(int) -//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) -//{ -// mark yaml_mark_t = { 0, 0, 0 } -// anchor_copy *yaml_char_t = NULL -// -// assert(event) // Non-NULL event object is expected. -// assert(anchor) // Non-NULL anchor is expected. -// -// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 -// -// anchor_copy = yaml_strdup(anchor) -// if (!anchor_copy) -// return 0 -// -// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) -// -// return 1 -//} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } - return true -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compliler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/vendor/gopkg.in/yaml.v1/decode.go b/vendor/gopkg.in/yaml.v1/decode.go deleted file mode 100644 index a098626..0000000 --- a/vendor/gopkg.in/yaml.v1/decode.go +++ /dev/null @@ -1,566 +0,0 @@ -package yaml - -import ( - "encoding/base64" - "fmt" - "reflect" - "strconv" - "time" -) - -const ( - documentNode = 1 << iota - mappingNode - sequenceNode - scalarNode - aliasNode -) - -type node struct { - kind int - line, column int - tag string - value string - implicit bool - children []*node - anchors map[string]*node -} - -// ---------------------------------------------------------------------------- -// Parser, produces a node tree out of a libyaml event stream. - -type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *node -} - -func newParser(b []byte) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("Failed to initialize YAML emitter") - } - - if len(b) == 0 { - b = []byte{'\n'} - } - - yaml_parser_set_input_string(&p.parser, b) - - p.skip() - if p.event.typ != yaml_STREAM_START_EVENT { - panic("Expected stream start event, got " + strconv.Itoa(int(p.event.typ))) - } - p.skip() - return &p -} - -func (p *parser) destroy() { - if p.event.typ != yaml_NO_EVENT { - yaml_event_delete(&p.event) - } - yaml_parser_delete(&p.parser) -} - -func (p *parser) skip() { - if p.event.typ != yaml_NO_EVENT { - if p.event.typ == yaml_STREAM_END_EVENT { - fail("Attempted to go past the end of stream. Corrupted value?") - } - yaml_event_delete(&p.event) - } - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } -} - -func (p *parser) fail() { - var where string - var line int - if p.parser.problem_mark.line != 0 { - line = p.parser.problem_mark.line - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line - } - if line != 0 { - where = "line " + strconv.Itoa(line) + ": " - } - var msg string - if len(p.parser.problem) > 0 { - msg = p.parser.problem - } else { - msg = "Unknown problem parsing YAML content" - } - fail(where + msg) -} - -func (p *parser) anchor(n *node, anchor []byte) { - if anchor != nil { - p.doc.anchors[string(anchor)] = n - } -} - -func (p *parser) parse() *node { - switch p.event.typ { - case yaml_SCALAR_EVENT: - return p.scalar() - case yaml_ALIAS_EVENT: - return p.alias() - case yaml_MAPPING_START_EVENT: - return p.mapping() - case yaml_SEQUENCE_START_EVENT: - return p.sequence() - case yaml_DOCUMENT_START_EVENT: - return p.document() - case yaml_STREAM_END_EVENT: - // Happens when attempting to decode an empty buffer. - return nil - default: - panic("Attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) - } - panic("unreachable") -} - -func (p *parser) node(kind int) *node { - return &node{ - kind: kind, - line: p.event.start_mark.line, - column: p.event.start_mark.column, - } -} - -func (p *parser) document() *node { - n := p.node(documentNode) - n.anchors = make(map[string]*node) - p.doc = n - p.skip() - n.children = append(n.children, p.parse()) - if p.event.typ != yaml_DOCUMENT_END_EVENT { - panic("Expected end of document event but got " + strconv.Itoa(int(p.event.typ))) - } - p.skip() - return n -} - -func (p *parser) alias() *node { - n := p.node(aliasNode) - n.value = string(p.event.anchor) - p.skip() - return n -} - -func (p *parser) scalar() *node { - n := p.node(scalarNode) - n.value = string(p.event.value) - n.tag = string(p.event.tag) - n.implicit = p.event.implicit - p.anchor(n, p.event.anchor) - p.skip() - return n -} - -func (p *parser) sequence() *node { - n := p.node(sequenceNode) - p.anchor(n, p.event.anchor) - p.skip() - for p.event.typ != yaml_SEQUENCE_END_EVENT { - n.children = append(n.children, p.parse()) - } - p.skip() - return n -} - -func (p *parser) mapping() *node { - n := p.node(mappingNode) - p.anchor(n, p.event.anchor) - p.skip() - for p.event.typ != yaml_MAPPING_END_EVENT { - n.children = append(n.children, p.parse(), p.parse()) - } - p.skip() - return n -} - -// ---------------------------------------------------------------------------- -// Decoder, unmarshals a node into a provided value. - -type decoder struct { - doc *node - aliases map[string]bool -} - -func newDecoder() *decoder { - d := &decoder{} - d.aliases = make(map[string]bool) - return d -} - -// d.setter deals with setters and pointer dereferencing and initialization. -// -// It's a slightly convoluted case to handle properly: -// -// - nil pointers should be initialized, unless being set to nil -// - we don't know at this point yet what's the value to SetYAML() with. -// - we can't separate pointer deref/init and setter checking, because -// a setter may be found while going down a pointer chain. -// -// Thus, here is how it takes care of it: -// -// - out is provided as a pointer, so that it can be replaced. -// - when looking at a non-setter ptr, *out=ptr.Elem(), unless tag=!!null -// - when a setter is found, *out=interface{}, and a set() function is -// returned to call SetYAML() with the value of *out once it's defined. -// -func (d *decoder) setter(tag string, out *reflect.Value, good *bool) (set func()) { - if (*out).Kind() != reflect.Ptr && (*out).CanAddr() { - setter, _ := (*out).Addr().Interface().(Setter) - if setter != nil { - var arg interface{} - *out = reflect.ValueOf(&arg).Elem() - return func() { - *good = setter.SetYAML(shortTag(tag), arg) - } - } - } - again := true - for again { - again = false - setter, _ := (*out).Interface().(Setter) - if tag != yaml_NULL_TAG || setter != nil { - if pv := (*out); pv.Kind() == reflect.Ptr { - if pv.IsNil() { - *out = reflect.New(pv.Type().Elem()).Elem() - pv.Set((*out).Addr()) - } else { - *out = pv.Elem() - } - setter, _ = pv.Interface().(Setter) - again = true - } - } - if setter != nil { - var arg interface{} - *out = reflect.ValueOf(&arg).Elem() - return func() { - *good = setter.SetYAML(shortTag(tag), arg) - } - } - } - return nil -} - -func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { - switch n.kind { - case documentNode: - good = d.document(n, out) - case scalarNode: - good = d.scalar(n, out) - case aliasNode: - good = d.alias(n, out) - case mappingNode: - good = d.mapping(n, out) - case sequenceNode: - good = d.sequence(n, out) - default: - panic("Internal error: unknown node kind: " + strconv.Itoa(n.kind)) - } - return -} - -func (d *decoder) document(n *node, out reflect.Value) (good bool) { - if len(n.children) == 1 { - d.doc = n - d.unmarshal(n.children[0], out) - return true - } - return false -} - -func (d *decoder) alias(n *node, out reflect.Value) (good bool) { - an, ok := d.doc.anchors[n.value] - if !ok { - fail("Unknown anchor '" + n.value + "' referenced") - } - if d.aliases[n.value] { - fail("Anchor '" + n.value + "' value contains itself") - } - d.aliases[n.value] = true - good = d.unmarshal(an, out) - delete(d.aliases, n.value) - return good -} - -var zeroValue reflect.Value - -func resetMap(out reflect.Value) { - for _, k := range out.MapKeys() { - out.SetMapIndex(k, zeroValue) - } -} - -var durationType = reflect.TypeOf(time.Duration(0)) - -func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { - var tag string - var resolved interface{} - if n.tag == "" && !n.implicit { - tag = yaml_STR_TAG - resolved = n.value - } else { - tag, resolved = resolve(n.tag, n.value) - if tag == yaml_BINARY_TAG { - data, err := base64.StdEncoding.DecodeString(resolved.(string)) - if err != nil { - fail("!!binary value contains invalid base64 data") - } - resolved = string(data) - } - } - if set := d.setter(tag, &out, &good); set != nil { - defer set() - } - if resolved == nil { - if out.Kind() == reflect.Map && !out.CanAddr() { - resetMap(out) - } else { - out.Set(reflect.Zero(out.Type())) - } - good = true - return - } - switch out.Kind() { - case reflect.String: - if tag == yaml_BINARY_TAG { - out.SetString(resolved.(string)) - good = true - } else if resolved != nil { - out.SetString(n.value) - good = true - } - case reflect.Interface: - if resolved == nil { - out.Set(reflect.Zero(out.Type())) - } else { - out.Set(reflect.ValueOf(resolved)) - } - good = true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch resolved := resolved.(type) { - case int: - if !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - good = true - } - case int64: - if !out.OverflowInt(resolved) { - out.SetInt(resolved) - good = true - } - case float64: - if resolved < 1<<63-1 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - good = true - } - case string: - if out.Type() == durationType { - d, err := time.ParseDuration(resolved) - if err == nil { - out.SetInt(int64(d)) - good = true - } - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch resolved := resolved.(type) { - case int: - if resolved >= 0 { - out.SetUint(uint64(resolved)) - good = true - } - case int64: - if resolved >= 0 { - out.SetUint(uint64(resolved)) - good = true - } - case float64: - if resolved < 1<<64-1 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - good = true - } - } - case reflect.Bool: - switch resolved := resolved.(type) { - case bool: - out.SetBool(resolved) - good = true - } - case reflect.Float32, reflect.Float64: - switch resolved := resolved.(type) { - case int: - out.SetFloat(float64(resolved)) - good = true - case int64: - out.SetFloat(float64(resolved)) - good = true - case float64: - out.SetFloat(resolved) - good = true - } - case reflect.Ptr: - if out.Type().Elem() == reflect.TypeOf(resolved) { - elem := reflect.New(out.Type().Elem()) - elem.Elem().Set(reflect.ValueOf(resolved)) - out.Set(elem) - good = true - } - } - return good -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { - if set := d.setter(yaml_SEQ_TAG, &out, &good); set != nil { - defer set() - } - var iface reflect.Value - if out.Kind() == reflect.Interface { - // No type hints. Will have to use a generic sequence. - iface = out - out = settableValueOf(make([]interface{}, 0)) - } - - if out.Kind() != reflect.Slice { - return false - } - et := out.Type().Elem() - - l := len(n.children) - for i := 0; i < l; i++ { - e := reflect.New(et).Elem() - if ok := d.unmarshal(n.children[i], e); ok { - out.Set(reflect.Append(out, e)) - } - } - if iface.IsValid() { - iface.Set(out) - } - return true -} - -func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { - if set := d.setter(yaml_MAP_TAG, &out, &good); set != nil { - defer set() - } - if out.Kind() == reflect.Struct { - return d.mappingStruct(n, out) - } - - if out.Kind() == reflect.Interface { - // No type hints. Will have to use a generic map. - iface := out - out = settableValueOf(make(map[interface{}]interface{})) - iface.Set(out) - } - - if out.Kind() != reflect.Map { - return false - } - outt := out.Type() - kt := outt.Key() - et := outt.Elem() - - if out.IsNil() { - out.Set(reflect.MakeMap(outt)) - } - l := len(n.children) - for i := 0; i < l; i += 2 { - if isMerge(n.children[i]) { - d.merge(n.children[i+1], out) - continue - } - k := reflect.New(kt).Elem() - if d.unmarshal(n.children[i], k) { - kkind := k.Kind() - if kkind == reflect.Interface { - kkind = k.Elem().Kind() - } - if kkind == reflect.Map || kkind == reflect.Slice { - fail(fmt.Sprintf("invalid map key: %#v", k.Interface())) - } - e := reflect.New(et).Elem() - if d.unmarshal(n.children[i+1], e) { - out.SetMapIndex(k, e) - } - } - } - return true -} - -func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - name := settableValueOf("") - l := len(n.children) - for i := 0; i < l; i += 2 { - ni := n.children[i] - if isMerge(ni) { - d.merge(n.children[i+1], out) - continue - } - if !d.unmarshal(ni, name) { - continue - } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - var field reflect.Value - if info.Inline == nil { - field = out.Field(info.Num) - } else { - field = out.FieldByIndex(info.Inline) - } - d.unmarshal(n.children[i+1], field) - } - } - return true -} - -func (d *decoder) merge(n *node, out reflect.Value) { - const wantMap = "map merge requires map or sequence of maps as the value" - switch n.kind { - case mappingNode: - d.unmarshal(n, out) - case aliasNode: - an, ok := d.doc.anchors[n.value] - if ok && an.kind != mappingNode { - fail(wantMap) - } - d.unmarshal(n, out) - case sequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.children) - 1; i >= 0; i-- { - ni := n.children[i] - if ni.kind == aliasNode { - an, ok := d.doc.anchors[ni.value] - if ok && an.kind != mappingNode { - fail(wantMap) - } - } else if ni.kind != mappingNode { - fail(wantMap) - } - d.unmarshal(ni, out) - } - default: - fail(wantMap) - } -} - -func isMerge(n *node) bool { - return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) -} diff --git a/vendor/gopkg.in/yaml.v1/emitterc.go b/vendor/gopkg.in/yaml.v1/emitterc.go deleted file mode 100644 index 9b3dc4a..0000000 --- a/vendor/gopkg.in/yaml.v1/emitterc.go +++ /dev/null @@ -1,1685 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - emitter.column = 0 - emitter.line++ - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - emitter.column = 0 - emitter.line++ - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { - return false - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") - } - return false -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an achor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceeded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceeded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[0]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceeded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceeded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - emitter.indention = true - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} diff --git a/vendor/gopkg.in/yaml.v1/encode.go b/vendor/gopkg.in/yaml.v1/encode.go deleted file mode 100644 index 0b9048d..0000000 --- a/vendor/gopkg.in/yaml.v1/encode.go +++ /dev/null @@ -1,265 +0,0 @@ -package yaml - -import ( - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" -) - -type encoder struct { - emitter yaml_emitter_t - event yaml_event_t - out []byte - flow bool -} - -func newEncoder() (e *encoder) { - e = &encoder{} - e.must(yaml_emitter_initialize(&e.emitter)) - yaml_emitter_set_output_string(&e.emitter, &e.out) - e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) - e.emit() - e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) - e.emit() - return e -} - -func (e *encoder) finish() { - e.must(yaml_document_end_event_initialize(&e.event, true)) - e.emit() - e.emitter.open_ended = false - e.must(yaml_stream_end_event_initialize(&e.event)) - e.emit() -} - -func (e *encoder) destroy() { - yaml_emitter_delete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { - e.must(false) - } -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "Unknown problem generating YAML content" - } - fail(msg) - } -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - if !in.IsValid() { - e.nilv() - return - } - var value interface{} - if getter, ok := in.Interface().(Getter); ok { - tag, value = getter.GetYAML() - tag = longTag(tag) - if value == nil { - e.nilv() - return - } - in = reflect.ValueOf(value) - } - switch in.Kind() { - case reflect.Interface: - if in.IsNil() { - e.nilv() - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - if in.IsNil() { - e.nilv() - } else { - e.marshal(tag, in.Elem()) - } - case reflect.Struct: - e.structv(tag, in) - case reflect.Slice: - e.slicev(tag, in) - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if in.Type() == durationType { - e.stringv(tag, reflect.ValueOf(in.Interface().(time.Duration).String())) - } else { - e.intv(tag, in) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("Can't marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = in.FieldByIndex(info.Inline) - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - f() - e.must(yaml_mapping_end_event_initialize(&e.event)) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yaml_scalar_style_t - s := in.String() - rtag, rs := resolve("", s) - if rtag == yaml_BINARY_TAG { - if tag == "" || tag == yaml_STR_TAG { - tag = rtag - s = rs.(string) - } else if tag == yaml_BINARY_TAG { - fail("explicitly tagged !!binary data must be base64-encoded") - } else { - fail("cannot marshal invalid UTF-8 data as " + shortTag(tag)) - } - } - if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } else if strings.Contains(s, "\n") { - style = yaml_LITERAL_SCALAR_STYLE - } else { - style = yaml_PLAIN_SCALAR_STYLE - } - e.emitScalar(s, "", tag, style) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // FIXME: Handle 64 bits here. - s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { - implicit := tag == "" - e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.emit() -} diff --git a/vendor/gopkg.in/yaml.v1/parserc.go b/vendor/gopkg.in/yaml.v1/parserc.go deleted file mode 100644 index 0a7037a..0000000 --- a/vendor/gopkg.in/yaml.v1/parserc.go +++ /dev/null @@ -1,1096 +0,0 @@ -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } - return false -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - return true -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/vendor/gopkg.in/yaml.v1/readerc.go b/vendor/gopkg.in/yaml.v1/readerc.go deleted file mode 100644 index d5fb097..0000000 --- a/vendor/gopkg.in/yaml.v1/readerc.go +++ /dev/null @@ -1,391 +0,0 @@ -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - high, low = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - } - buffer_len += width - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/vendor/gopkg.in/yaml.v1/resolve.go b/vendor/gopkg.in/yaml.v1/resolve.go deleted file mode 100644 index 06c698a..0000000 --- a/vendor/gopkg.in/yaml.v1/resolve.go +++ /dev/null @@ -1,190 +0,0 @@ -package yaml - -import ( - "encoding/base64" - "fmt" - "math" - "strconv" - "strings" - "unicode/utf8" -) - -// TODO: merge, timestamps, base 60 floats, omap. - -type resolveMapItem struct { - value interface{} - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v interface{} - tag string - l []string - }{ - {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, - {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, - {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, - {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, - {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, - {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, - {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", yaml_MERGE_TAG, []string{"<<"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - // TODO This can easily be made faster and produce less garbage. - if strings.HasPrefix(tag, longTagPrefix) { - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: - return true - } - return false -} - -func resolve(tag string, in string) (rtag string, out interface{}) { - if !resolvableTag(tag) { - return tag, in - } - - defer func() { - switch tag { - case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: - return - } - fail(fmt.Sprintf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))) - }() - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if in != "" { - hint = resolveTable[in[0]] - } - if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { - // Handle things we can lookup in a map. - if item, ok := resolveMap[in]; ok { - return item.tag, item.value - } - - // Base 60 floats are a bad idea, were dropped in YAML 1.2, and - // are purposefully unsupported here. They're still quoted on - // the way out for compatibility with other parser, though. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - floatv, err := strconv.ParseFloat(in, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - - case 'D', 'S': - // Int, float, or timestamp. - plain := strings.Replace(in, "_", "", -1) - intv, err := strconv.ParseInt(plain, 0, 64) - if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, int(intv) - } else { - return yaml_INT_TAG, intv - } - } - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - return yaml_INT_TAG, int(intv) - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt(plain[3:], 2, 64) - if err == nil { - return yaml_INT_TAG, -int(intv) - } - } - // XXX Handle timestamps here. - - default: - panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") - } - } - if tag == yaml_BINARY_TAG { - return yaml_BINARY_TAG, in - } - if utf8.ValidString(in) { - return yaml_STR_TAG, in - } - return yaml_BINARY_TAG, encodeBase64(in) -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} diff --git a/vendor/gopkg.in/yaml.v1/scannerc.go b/vendor/gopkg.in/yaml.v1/scannerc.go deleted file mode 100644 index fe93b19..0000000 --- a/vendor/gopkg.in/yaml.v1/scannerc.go +++ /dev/null @@ -1,2710 +0,0 @@ -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - // Check if we really need to fetch more tokens. - need_more_tokens := false - - if parser.tokens_head == len(parser.tokens) { - // Queue is empty. - need_more_tokens = true - } else { - // Check if any potential simple key may occupy the head position. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - if simple_key.possible && simple_key.token_number == parser.tokens_parsed { - need_more_tokens = true - break - } - } - } - - // We are finished. - if !need_more_tokens { - break - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // Remove obsolete potential simple keys. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -// Check the list of potential simple keys and remove the positions that -// cannot contain simple keys anymore. -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { - // Check for a potential simple key for each flow level. - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - - // The specification requires that a simple key - // - // - is limited to a single line, - // - is shorter than 1024 characters. - if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { - - // Check if the potential simple key to be removed is required. - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - } - } - return true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // A simple key is required only when it is the first token in the current - // line. Therefore it is always allowed. But we add a check anyway. - if required && !parser.simple_key_allowed { - panic("should not happen") - } - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - } - simple_key.mark = parser.mark - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - return true -} - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // Increase the flow level. - parser.flow_level++ - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] - } - return true -} - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each intendation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - // Loop through the intendation levels in the stack. - for parser.indent > column { - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if simple_key.possible { - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found uknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && !(s[0] == '!' && s[1] == 0) { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the tag is non-empty. - if len(s) == 0 { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the intendation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an intendation indicator equal to 0") - return false - } - - // Get the intendation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an intendation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the intendation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following intendation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan intendation spaces and line breaks for a block scalar. Determine the -// intendation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the intendation spaces and line breaks. - max_indent := 0 - for { - // Eat the intendation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the intendation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an intendation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". - if parser.flow_level > 0 && - parser.buffer[parser.buffer_pos] == ':' && - !is_blankz(parser.buffer, parser.buffer_pos+1) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found unexpected ':'") - return false - } - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab character that abuse intendation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violate intendation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check intendation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} diff --git a/vendor/gopkg.in/yaml.v1/sorter.go b/vendor/gopkg.in/yaml.v1/sorter.go deleted file mode 100644 index 5958822..0000000 --- a/vendor/gopkg.in/yaml.v1/sorter.go +++ /dev/null @@ -1,104 +0,0 @@ -package yaml - -import ( - "reflect" - "unicode" -) - -type keyList []reflect.Value - -func (l keyList) Len() int { return len(l) } -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l keyList) Less(i, j int) bool { - a := l[i] - b := l[j] - ak := a.Kind() - bk := b.Kind() - for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { - a = a.Elem() - ak = a.Kind() - } - for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { - b = b.Elem() - bk = b.Kind() - } - af, aok := keyFloat(a) - bf, bok := keyFloat(b) - if aok && bok { - if af != bf { - return af < bf - } - if ak != bk { - return ak < bk - } - return numLess(a, b) - } - if ak != reflect.String || bk != reflect.String { - return ak < bk - } - ar, br := []rune(a.String()), []rune(b.String()) - for i := 0; i < len(ar) && i < len(br); i++ { - if ar[i] == br[i] { - continue - } - al := unicode.IsLetter(ar[i]) - bl := unicode.IsLetter(br[i]) - if al && bl { - return ar[i] < br[i] - } - if al || bl { - return bl - } - var ai, bi int - var an, bn int64 - for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { - an = an*10 + int64(ar[ai]-'0') - } - for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { - bn = bn*10 + int64(br[bi]-'0') - } - if an != bn { - return an < bn - } - if ai != bi { - return ai < bi - } - return ar[i] < br[i] - } - return len(ar) < len(br) -} - -// keyFloat returns a float value for v if it is a number/bool -// and whether it is a number/bool or not. -func keyFloat(v reflect.Value) (f float64, ok bool) { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.Int()), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return float64(v.Uint()), true - case reflect.Bool: - if v.Bool() { - return 1, true - } - return 0, true - } - return 0, false -} - -// numLess returns whether a < b. -// a and b must necessarily have the same kind. -func numLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return a.Int() < b.Int() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Bool: - return !a.Bool() && b.Bool() - } - panic("not a number") -} diff --git a/vendor/gopkg.in/yaml.v1/writerc.go b/vendor/gopkg.in/yaml.v1/writerc.go deleted file mode 100644 index 190362f..0000000 --- a/vendor/gopkg.in/yaml.v1/writerc.go +++ /dev/null @@ -1,89 +0,0 @@ -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - // If the output encoding is UTF-8, we don't need to recode the buffer. - if emitter.encoding == yaml_UTF8_ENCODING { - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true - } - - // Recode the buffer into the raw buffer. - var low, high int - if emitter.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - high, low = 1, 0 - } - - pos := 0 - for pos < emitter.buffer_pos { - // See the "reader.c" code for more details on UTF-8 encoding. Note - // that we assume that the buffer contains a valid UTF-8 sequence. - - // Read the next UTF-8 character. - octet := emitter.buffer[pos] - - var w int - var value rune - switch { - case octet&0x80 == 0x00: - w, value = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, value = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, value = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, value = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = emitter.buffer[pos+k] - value = (value << 6) + (rune(octet) & 0x3F) - } - pos += w - - // Write the character. - if value < 0x10000 { - var b [2]byte - b[high] = byte(value >> 8) - b[low] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) - } else { - // Write the character using a surrogate pair (check "reader.c"). - var b [4]byte - value -= 0x10000 - b[high] = byte(0xD8 + (value >> 18)) - b[low] = byte((value >> 10) & 0xFF) - b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) - b[low+2] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) - } - } - - // Write the raw buffer. - if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - emitter.raw_buffer = emitter.raw_buffer[:0] - return true -} diff --git a/vendor/gopkg.in/yaml.v1/yaml.go b/vendor/gopkg.in/yaml.v1/yaml.go deleted file mode 100644 index f1c390e..0000000 --- a/vendor/gopkg.in/yaml.v1/yaml.go +++ /dev/null @@ -1,301 +0,0 @@ -// Package yaml implements YAML support for the Go language. -// -// Source code and other details for the project are available at GitHub: -// -// https://github.com/go-yaml/yaml -// -package yaml - -import ( - "errors" - "fmt" - "reflect" - "strings" - "sync" -) - -type yamlError string - -func fail(msg string) { - panic(yamlError(msg)) -} - -func handleErr(err *error) { - if r := recover(); r != nil { - if e, ok := r.(yamlError); ok { - *err = errors.New("YAML error: " + string(e)) - } else { - panic(r) - } - } -} - -// The Setter interface may be implemented by types to do their own custom -// unmarshalling of YAML values, rather than being implicitly assigned by -// the yaml package machinery. If setting the value works, the method should -// return true. If it returns false, the value is considered unsupported -// and is omitted from maps and slices. -type Setter interface { - SetYAML(tag string, value interface{}) bool -} - -// The Getter interface is implemented by types to do their own custom -// marshalling into a YAML tag and value. -type Getter interface { - GetYAML() (tag string, value interface{}) -} - -// Unmarshal decodes the first document found within the in byte slice -// and assigns decoded values into the out value. -// -// Maps and pointers (to a struct, string, int, etc) are accepted as out -// values. If an internal pointer within a struct is not initialized, -// the yaml package will initialize it if necessary for unmarshalling -// the provided data. The out parameter must not be nil. -// -// The type of the decoded values and the type of out will be considered, -// and Unmarshal will do the best possible job to unmarshal values -// appropriately. It is NOT considered an error, though, to skip values -// because they are not available in the decoded YAML, or if they are not -// compatible with the out value. To ensure something was properly -// unmarshaled use a map or compare against the previous value for the -// field (usually the zero value). -// -// Struct fields are only unmarshalled if they are exported (have an -// upper case first letter), and are unmarshalled using the field name -// lowercased as the default key. Custom keys may be defined via the -// "yaml" name in the field tag: the content preceding the first comma -// is used as the key, and the following comma-separated options are -// used to tweak the marshalling process (see Marshal). -// Conflicting names result in a runtime error. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) -// -// See the documentation of Marshal for the format of tags and a list of -// supported tag options. -// -func Unmarshal(in []byte, out interface{}) (err error) { - defer handleErr(&err) - d := newDecoder() - p := newParser(in) - defer p.destroy() - node := p.parse() - if node != nil { - v := reflect.ValueOf(out) - if v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - d.unmarshal(node, v) - } - return nil -} - -// Marshal serializes the value provided into a YAML document. The structure -// of the generated document will reflect the structure of the value itself. -// Maps and pointers (to struct, string, int, etc) are accepted as the in value. -// -// Struct fields are only unmarshalled if they are exported (have an upper case -// first letter), and are unmarshalled using the field name lowercased as the -// default key. Custom keys may be defined via the "yaml" name in the field -// tag: the content preceding the first comma is used as the key, and the -// following comma-separated options are used to tweak the marshalling process. -// Conflicting names result in a runtime error. -// -// The field tag format accepted is: -// -// `(...) yaml:"[][,[,]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Does not apply to zero valued structs. -// -// flow Marshal using a flow style (useful for structs, -// sequences and maps. -// -// inline Inline the struct it's applied to, so its fields -// are processed as if they were part of the outer -// struct. -// -// In addition, if the key is "-", the field is ignored. -// -// For example: -// -// type T struct { -// F int "a,omitempty" -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// -func Marshal(in interface{}) (out []byte, err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshal("", reflect.ValueOf(in)) - e.finish() - out = e.out - return -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - // TODO: Implement support for inline maps. - //case reflect.Map: - // if inlineMap >= 0 { - // return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - // } - // if field.Type.Key() != reflect.TypeOf("") { - // return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - // } - // inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - //return nil, errors.New("Option ,inline needs a struct value or map field") - return nil, errors.New("Option ,inline needs a struct value field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} - -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.String: - return len(v.String()) == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Bool: - return !v.Bool() - } - return false -} diff --git a/vendor/gopkg.in/yaml.v1/yamlh.go b/vendor/gopkg.in/yaml.v1/yamlh.go deleted file mode 100644 index 4b020b1..0000000 --- a/vendor/gopkg.in/yaml.v1/yamlh.go +++ /dev/null @@ -1,716 +0,0 @@ -package yaml - -import ( - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - yaml_PLAIN_SCALAR_STYLE // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. -) - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occured. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_file io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_file io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/vendor/gopkg.in/yaml.v1/yamlprivateh.go b/vendor/gopkg.in/yaml.v1/yamlprivateh.go deleted file mode 100644 index 8110ce3..0000000 --- a/vendor/gopkg.in/yaml.v1/yamlprivateh.go +++ /dev/null @@ -1,173 +0,0 @@ -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -}