diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml new file mode 100644 index 0000000..edb3699 --- /dev/null +++ b/.github/workflows/build-and-test.yml @@ -0,0 +1,171 @@ +name: Build packages + +# Controls when the action will run. Triggers the workflow on push or pull request +# events but only for the master branch +on: + push: + branches: [ master, maint-1.4 ] + pull_request: + branches: [ master, maint-1.4 ] + release: + types: [ published ] + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + make-rhel-pkg: + name: Make RHEL package + runs-on: ubuntu-latest + container: centos:latest + steps: + - name: Prepare + run: | + yum update -y + yum install -y make rpmdevtools yum-utils + + - name: Clone repo + uses: actions/checkout@v2 + + - name: Make package + run: | + yum-builddep --assumeyes packaging/rpm/*.spec + make -C packaging/rpm pkg-local "DESTDIR=${GITHUB_WORKSPACE}/pkgs/" + + - name: Publish package + uses: actions/upload-artifact@v1.0.0 + with: + name: rhel-pkg + path: pkgs + + make-alpine-pkg: + name: Make Alpine package + runs-on: ubuntu-latest + container: alpine:latest + steps: + - name: Prepare + run: | + apk update + apk upgrade + apk add make + apk add alpine-sdk + apk add go + adduser -G abuild -D jobber + + - name: Clone repo + uses: actions/checkout@v2 + + - name: Make package + run: | + chmod a+w . packaging/alpine + sudo -u jobber make -C packaging/alpine pkg-local "DESTDIR=${GITHUB_WORKSPACE}/pkgs/" + + - name: Publish package + uses: actions/upload-artifact@v1.0.0 + with: + name: alpine-pkg + path: pkgs + + make-debian-pkg: + name: Make Debian package + runs-on: ubuntu-latest + steps: + - name: Prepare + run: | + sudo apt-get update + sudo apt-get install -y dpkg-dev debhelper dh-systemd + + - name: Clone repo + uses: actions/checkout@v2 + + - name: Build package + run: make -C "${GITHUB_WORKSPACE}/packaging/debian" pkg-local "DESTDIR=${GITHUB_WORKSPACE}/pkgs/" + + - name: Publish package + uses: actions/upload-artifact@v1.0.0 + with: + name: debian-pkg + path: pkgs + + test-on-macos: + name: Test on macOS + runs-on: macos-latest + steps: + - name: Prepare + run: | + python --version + brew install socat + sudo pip install robotframework + sudo sysadminctl -addUser normuser -home /Users/normuser + sudo createhomedir -c + + - name: Clone repo + uses: actions/checkout@v2 + + - name: Install Jobber + run: | + make check build + sudo make install DESTDIR=/ + PLIST=packaging/darwin/launchd.plist + sudo chown root:admin "${PLIST}" + sudo launchctl load "${PLIST}" + sudo launchctl start info.nekonya.jobber + + - name: Test + run: | + if sudo robot --include test --pythonpath platform_tests/keywords platform_tests/suites; then + echo "::set-env name=PASSED_TESTS::true" + else + echo "::set-env name=PASSED_TESTS::false" + fi + mkdir test-report + mv *.html test-report/ + + - name: Publish test report + uses: actions/upload-artifact@v1.0.0 + with: + name: mac-test-report + path: test-report + + - name: SucceedOrFail + run: test "${PASSED_TESTS}" = true + + test-on-debian: + name: Test on Debian + needs: make-debian-pkg + runs-on: ubuntu-latest + steps: + - name: Prepare + run: | + sudo apt-get update + sudo apt-get install -y python-pip socat + sudo pip install robotframework + sudo useradd normuser -m + + - name: Download package + uses: actions/download-artifact@v1 + with: + name: debian-pkg + + - name: Install package + run: sudo dpkg -i "$(find ${GITHUB_WORKSPACE} -name *.deb)" + + - name: Clone repo + uses: actions/checkout@v2 + + - name: Test package + run: | + if sudo robot --include test --pythonpath platform_tests/keywords platform_tests/suites; then + echo "::set-env name=PASSED_TESTS::true" + else + echo "::set-env name=PASSED_TESTS::false" + fi + mkdir test-report + mv *.html test-report/ + + - name: Publish test report + uses: actions/upload-artifact@v1.0.0 + with: + name: debian-test-report + path: test-report + + - name: SucceedOrFail + run: test "${PASSED_TESTS}" = true \ No newline at end of file diff --git a/.gitignore b/.gitignore index b6c53fe..6f011e2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,6 @@ +bin buildtools/gen +buildtools/gotools/tools-gopls-v0.3.4 *.rpm *.tar *.tgz diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index d158aad..0000000 --- a/Gopkg.lock +++ /dev/null @@ -1,88 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/davecgh/go-spew" - packages = ["spew"] - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - name = "github.com/golang/protobuf" - packages = ["proto"] - revision = "925541529c1fa6821df4e44ce2723319eb2be768" - version = "v1.0.0" - -[[projects]] - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - name = "github.com/stretchr/testify" - packages = [ - "assert", - "require", - "suite" - ] - revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" - version = "v1.2.1" - -[[projects]] - branch = "master" - name = "golang.org/x/net" - packages = ["context"] - revision = "cbe0f9307d0156177f9dd5dc85da1a31abc5f2fb" - -[[projects]] - branch = "master" - name = "golang.org/x/tools" - packages = [ - "go/ast/astutil", - "go/buildutil", - "go/gccgoexportdata", - "go/internal/gccgoimporter", - "go/loader", - "go/types/typeutil", - "refactor/importgraph", - "refactor/rename", - "refactor/satisfy" - ] - revision = "5e776fee60db37e560cee3fb46db699d2f095386" - -[[projects]] - name = "google.golang.org/appengine" - packages = [ - ".", - "datastore", - "internal", - "internal/app_identity", - "internal/base", - "internal/datastore", - "internal/log", - "internal/memcache", - "internal/modules", - "internal/remote_api", - "internal/urlfetch", - "internal/user", - "log", - "memcache", - "urlfetch", - "user" - ] - revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a" - version = "v1.0.0" - -[[projects]] - branch = "v2" - name = "gopkg.in/yaml.v2" - packages = ["."] - revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "0b8d1dc43e356a174e143ef529683b4da87f44d26ac765172ed764ce23464288" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index 35071f3..0000000 --- a/Gopkg.toml +++ /dev/null @@ -1,50 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - - -[[constraint]] - name = "github.com/stretchr/testify" - version = "1.2.1" - -[[constraint]] - branch = "master" - name = "golang.org/x/net" - -[[constraint]] - branch = "master" - name = "golang.org/x/tools" - -[[constraint]] - name = "google.golang.org/appengine" - version = "1.0.0" - -[[constraint]] - branch = "v2" - name = "gopkg.in/yaml.v2" - -[prune] - go-tests = true - unused-packages = true diff --git a/Makefile b/Makefile index 2105abd..740bdc4 100644 --- a/Makefile +++ b/Makefile @@ -9,16 +9,20 @@ srcdir = . INSTALL = install INSTALL_PROGRAM = ${INSTALL} -GO_WKSPC ?= ${abspath ../../../..} -TEST_TMPDIR = ${PWD} -SRC_TARBALL = jobber-$(shell cat ${srcdir}/version).tgz -SRC_TARBALL_DIR = jobber-$(shell cat ${srcdir}/version) +JOBBER_VERSION := $(shell cat ${srcdir}/version) +SRC_TARBALL = jobber-${JOBBER_VERSION}.tgz +SRC_TARBALL_DIR = jobber-${JOBBER_VERSION} -GO = GOPATH=${GO_WKSPC} go +OUTPUT_DIR = bin +GO = go GO_VERSION = 1.11 - -LDFLAGS = -ldflags "-X github.com/dshearer/jobber/common.jobberVersion=`cat version`" +# NOTE: '-mod=vendor' prevents go from downloading dependencies +GO_BUILD := ${GO} build -mod=vendor -ldflags "-X github.com/dshearer/jobber/common.jobberVersion=${JOBBER_VERSION}" +GO_VET = ${GO} vet -mod=vendor +GO_TEST = ${GO} test -mod=vendor +GO_GEN = ${GO_WITH_TOOLS} generate -mod=vendor +GO_CLEAN = ${GO} clean PACKAGES = \ github.com/dshearer/jobber/common \ @@ -31,19 +35,19 @@ PACKAGES = \ include mk/def-sources.mk .PHONY : default -default : all +default : build -include mk/buildtools.mk +include mk/buildtools.mk # defines 'GO_WITH_TOOLS' and 'GOYACC' -.PHONY : all -all : ${GO_WKSPC}/bin/jobber ${GO_WKSPC}/bin/jobbermaster \ - ${GO_WKSPC}/bin/jobberrunner +.PHONY : build +build : ${OUTPUT_DIR}/jobber ${OUTPUT_DIR}/jobbermaster \ + ${OUTPUT_DIR}/jobberrunner .PHONY : check check : ${TEST_SOURCES} jobfile/parse_time_spec.go @go version - ${GO} vet ${PACKAGES} - TMPDIR="${TEST_TMPDIR}" ${GO} test ${PACKAGES} + ${GO_VET} ${PACKAGES} + ${GO_TEST} ${PACKAGES} install : \ ${DESTDIR}${libexecdir}/jobbermaster \ @@ -51,17 +55,17 @@ install : \ ${DESTDIR}${bindir}/jobber \ ${DESTDIR}${sysconfdir}/jobber.conf -${DESTDIR}${libexecdir}/% : ${GO_WKSPC}/bin/% +${DESTDIR}${libexecdir}/% : ${OUTPUT_DIR}/% @echo INSTALL "$@" @mkdir -p "${dir $@}" @${INSTALL_PROGRAM} "$<" "$@" -${DESTDIR}${bindir}/% : ${GO_WKSPC}/bin/% +${DESTDIR}${bindir}/% : ${OUTPUT_DIR}/% @echo INSTALL "$@" @mkdir -p "${dir $@}" @${INSTALL_PROGRAM} "$<" "$@" -${DESTDIR}${sysconfdir}/jobber.conf : ${GO_WKSPC}/bin/jobbermaster +${DESTDIR}${sysconfdir}/jobber.conf : ${OUTPUT_DIR}/jobbermaster @echo INSTALL "$@" @mkdir -p "${dir $@}" @"$<" defprefs > "$@" @@ -82,18 +86,18 @@ dist : "${SRC_TARBALL_DIR}" rm -rf "${DESTDIR}dist-tmp" -${GO_WKSPC}/bin/% : ${MAIN_SOURCES} jobfile/parse_time_spec.go +${OUTPUT_DIR}/% : ${MAIN_SOURCES} jobfile/parse_time_spec.go @${srcdir}/buildtools/versionge "$$(go version | egrep -o '[[:digit:].]+' | head -n 1)" "${GO_VERSION}" @echo BUILD $* - @${GO} install ${LDFLAGS} "github.com/dshearer/jobber/$*" + @${GO_BUILD} -o "$@" "github.com/dshearer/jobber/$*" jobfile/parse_time_spec.go : ${GOYACC} ${JOBFILE_SOURCES} @echo GEN SRC - @${GO_WITH_TOOLS} generate github.com/dshearer/jobber/jobfile + @${GO_GEN} -mod=vendor github.com/dshearer/jobber/jobfile .PHONY : clean clean : clean-buildtools @echo CLEAN - @-${GO} clean -i ${PACKAGES} - @rm -f "${DESTDIR}${SRC_TARBALL}.tgz" jobfile/parse_time_spec.go \ - jobfile/y.output + @-${GO_CLEAN} -i ${PACKAGES} + @rm -rf "${DESTDIR}${SRC_TARBALL}.tgz" jobfile/parse_time_spec.go \ + jobfile/y.output "${OUTPUT_DIR}" diff --git a/buildtools/gotools/tools-release-branch.go1.8.tar.gz b/buildtools/gotools/tools-release-branch.go1.8.tar.gz deleted file mode 100644 index b5b0036..0000000 Binary files a/buildtools/gotools/tools-release-branch.go1.8.tar.gz and /dev/null differ diff --git a/buildtools/gotools/tools-v0.3.4.tar.gz b/buildtools/gotools/tools-v0.3.4.tar.gz new file mode 100644 index 0000000..bdf0a7d Binary files /dev/null and b/buildtools/gotools/tools-v0.3.4.tar.gz differ diff --git a/buildtools/sources.mk b/buildtools/sources.mk index 8170ffa..5bed971 100644 --- a/buildtools/sources.mk +++ b/buildtools/sources.mk @@ -1,5 +1,5 @@ BUILDTOOLS_SOURCES = \ - buildtools/gotools/tools-release-branch.go1.8.tar.gz \ + buildtools/gotools/tools-v0.3.4.tar.gz \ buildtools/sources.mk \ buildtools/srcsync \ buildtools/versionge \ No newline at end of file diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..a64e352 --- /dev/null +++ b/go.mod @@ -0,0 +1,10 @@ +module github.com/dshearer/jobber + +go 1.14 + +require ( + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect + github.com/stretchr/testify v1.4.0 + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect + gopkg.in/yaml.v2 v2.2.2 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..0ff69d2 --- /dev/null +++ b/go.sum @@ -0,0 +1,17 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/jobfile/job_file.go b/jobfile/job_file.go index 25b0073..a0aca21 100644 --- a/jobfile/job_file.go +++ b/jobfile/job_file.go @@ -243,7 +243,7 @@ func parseV3Jobfile(f *os.File) (*JobFileV3Raw, error) { dataStr = gYamlStarter + "\n" + dataStr } var jobfileRaw JobFileV3Raw - if err := yaml.UnmarshalStrict([]byte(dataStr), &jobfileRaw); err != nil { + if err := yaml.Unmarshal([]byte(dataStr), &jobfileRaw); err != nil { return nil, err } return &jobfileRaw, nil diff --git a/mk/buildtools.mk b/mk/buildtools.mk index fd62a2f..e40744e 100644 --- a/mk/buildtools.mk +++ b/mk/buildtools.mk @@ -1,19 +1,15 @@ _BUILDTOOLS_GEN = ${CURDIR}/buildtools/gen -_BUILDTOOLS_WKSPC = ${_BUILDTOOLS_GEN}/gowkspc +_TOOLS_TAR = tools-v0.3.4.tar.gz +_TOOLS_DIR = tools-gopls-v0.3.4 GOYACC = ${_BUILDTOOLS_GEN}/bin/goyacc GO_WITH_TOOLS = PATH="${_BUILDTOOLS_GEN}/bin:$${PATH}" ${GO} -${GOYACC} : buildtools/gotools/tools-release-branch.go1.8.tar.gz +${GOYACC} : buildtools/gotools/${_TOOLS_TAR} @echo MAKE GOYACC - @mkdir -p "${_BUILDTOOLS_WKSPC}" - @cd buildtools/gotools/ && tar -xzf tools-release-branch.go1.8.tar.gz - @rsync -a buildtools/gotools/tools-release-branch.go1.8/ \ - "${_BUILDTOOLS_WKSPC}/src/" - @rm -rf buildtools/gotools/tools-release-branch.go1.8 - @cd "${_BUILDTOOLS_WKSPC}/src/golang.org/x/tools/cmd/goyacc" && \ - go build -o "${GOYACC}" + @cd buildtools/gotools/ && tar -xzf "${_TOOLS_TAR}" + @cd "buildtools/gotools/${_TOOLS_DIR}/cmd/goyacc" && ${GO_BUILD} -o "${GOYACC}" .PHONY : clean-buildtools clean-buildtools : - rm -rf "${_BUILDTOOLS_GEN}" \ No newline at end of file + @rm -rf "${_BUILDTOOLS_GEN}" "buildtools/gotools/${_TOOLS_DIR}" diff --git a/mk/def-sources.mk b/mk/def-sources.mk index c950e3f..08108e1 100644 --- a/mk/def-sources.mk +++ b/mk/def-sources.mk @@ -30,8 +30,9 @@ GO_SOURCES := \ OTHER_SOURCES := \ .circleci \ - Gopkg.lock \ - Gopkg.toml \ + .github \ + go.mod \ + go.sum \ LICENSE \ make-release-notes.py \ Makefile \ diff --git a/packaging/alpine_3.6/APKBUILD b/packaging/alpine/APKBUILD similarity index 50% rename from packaging/alpine_3.6/APKBUILD rename to packaging/alpine/APKBUILD index f1f86ee..e5aa6f4 100644 --- a/packaging/alpine_3.6/APKBUILD +++ b/packaging/alpine/APKBUILD @@ -14,34 +14,29 @@ source="${pkgname}.initd ${pkgname}.tgz" _builddir="${startdir}/build" prepare() { - # make Go workspace - mkdir -p "${_builddir}/src/github.com/dshearer" - ln -s "${srcdir}/${pkgname}" "${_builddir}/src/github.com/dshearer/${pkgname}" + # nothing to do + true } build() { - echo Nothing to do + make -C "${srcdir}/${pkgname}" build } check() { - export GOPATH="${_builddir}" - export GO_WKSPC="${_builddir}" - make -C "${_builddir}/src/github.com/dshearer/${pkgname}" check + make -C "${srcdir}/${pkgname}" check } package() { set -e - # build and install - export GOPATH="${_builddir}" - export GO_WKSPC="${_builddir}" - make -C "${_builddir}/src/github.com/dshearer/${pkgname}" \ - install "DESTDIR=${pkgdir}/" prefix=/usr + # install + make -C "${srcdir}/${pkgname}" install "DESTDIR=${pkgdir}/" prefix=/usr # install init script - local INITD=${pkgdir}/etc/init.d - mkdir -p "${INITD}" - cp "${startdir}/${pkgname}.initd" "${INITD}/jobber" + local INITD="${pkgdir}/etc/init.d" + mkdir -p "${INITD}" + cp "${startdir}/${pkgname}.initd" "${INITD}/jobber" } -md5sums="" +sha512sums="7dacd120effe454daded552623c99bb1b695c6ce47819b7f627772a1f6ca494b5ff83adcae2693e35ef600e92268ef5066a92fc7522043316864edde6f892550 jobber.initd +9831f6dc858fa085c317a496affeb63945de5a96a7656008ef493783bebca65ee3445bbad0dfe646378ae9b75622eef395b43a20be7fa435c5b16247c1324702 jobber.tgz" diff --git a/packaging/alpine_3.6/Makefile b/packaging/alpine/Makefile similarity index 96% rename from packaging/alpine_3.6/Makefile rename to packaging/alpine/Makefile index 13d4091..6f1ca34 100644 --- a/packaging/alpine_3.6/Makefile +++ b/packaging/alpine/Makefile @@ -32,7 +32,8 @@ pkg-local : ${WORK_DIR}/${SRC_TARBALL} ${PKGFILE_DEPS} env "pkgrel=${PKGREL}" "pkgver=${ALPINE_VERSION}" abuild -r # copy package out of VM - find "${HOME}/packages/packaging" -name "*.apk" -exec cp '{}' "${DESTDIR}" ';' + mkdir -p "${DESTDIR}/" + find "${HOME}/packages/packaging" -name "*.apk" -exec cp '{}' "${DESTDIR}/" ';' include ../tail.mk diff --git a/packaging/alpine_3.6/Vagrantfile b/packaging/alpine/Vagrantfile similarity index 100% rename from packaging/alpine_3.6/Vagrantfile rename to packaging/alpine/Vagrantfile diff --git a/packaging/alpine_3.6/jobber.initd b/packaging/alpine/jobber.initd similarity index 100% rename from packaging/alpine_3.6/jobber.initd rename to packaging/alpine/jobber.initd diff --git a/packaging/alpine_3.6/jobber.post-deinstall b/packaging/alpine/jobber.post-deinstall similarity index 100% rename from packaging/alpine_3.6/jobber.post-deinstall rename to packaging/alpine/jobber.post-deinstall diff --git a/packaging/alpine_3.6/jobber.post-install b/packaging/alpine/jobber.post-install similarity index 100% rename from packaging/alpine_3.6/jobber.post-install rename to packaging/alpine/jobber.post-install diff --git a/packaging/alpine_3.6/jobber.post-upgrade b/packaging/alpine/jobber.post-upgrade similarity index 100% rename from packaging/alpine_3.6/jobber.post-upgrade rename to packaging/alpine/jobber.post-upgrade diff --git a/packaging/alpine_3.6/jobber.pre-deinstall b/packaging/alpine/jobber.pre-deinstall similarity index 100% rename from packaging/alpine_3.6/jobber.pre-deinstall rename to packaging/alpine/jobber.pre-deinstall diff --git a/packaging/alpine_3.6/pkgrel b/packaging/alpine/pkgrel similarity index 100% rename from packaging/alpine_3.6/pkgrel rename to packaging/alpine/pkgrel diff --git a/packaging/alpine/sources.mk b/packaging/alpine/sources.mk new file mode 100644 index 0000000..f22f052 --- /dev/null +++ b/packaging/alpine/sources.mk @@ -0,0 +1,11 @@ +ALPINE_SOURCES = \ + packaging/alpine/APKBUILD \ + packaging/alpine/jobber.initd \ + packaging/alpine/jobber.post-deinstall \ + packaging/alpine/jobber.post-install \ + packaging/alpine/jobber.post-upgrade \ + packaging/alpine/jobber.pre-deinstall \ + packaging/alpine/Makefile \ + packaging/alpine/pkgrel \ + packaging/alpine/sources.mk \ + packaging/alpine/Vagrantfile \ No newline at end of file diff --git a/packaging/alpine_3.6/sources.mk b/packaging/alpine_3.6/sources.mk deleted file mode 100644 index 2a5b696..0000000 --- a/packaging/alpine_3.6/sources.mk +++ /dev/null @@ -1,11 +0,0 @@ -ALPINE_SOURCES = \ - packaging/alpine_3.6/APKBUILD \ - packaging/alpine_3.6/jobber.initd \ - packaging/alpine_3.6/jobber.post-deinstall \ - packaging/alpine_3.6/jobber.post-install \ - packaging/alpine_3.6/jobber.post-upgrade \ - packaging/alpine_3.6/jobber.pre-deinstall \ - packaging/alpine_3.6/Makefile \ - packaging/alpine_3.6/pkgrel \ - packaging/alpine_3.6/sources.mk \ - packaging/alpine_3.6/Vagrantfile \ No newline at end of file diff --git a/packaging/centos_6/Makefile b/packaging/centos_6/Makefile deleted file mode 100644 index 81780a0..0000000 --- a/packaging/centos_6/Makefile +++ /dev/null @@ -1,46 +0,0 @@ -include ../head.mk - -PKGREL := $(shell cat pkgrel) -PLATFORM = x86_64 - -# RPMs don't allow hyphens in version numbers -RPM_VERSION = ${shell echo ${VERSION} | sed -e 's/\-/_/'} - -# required by tail.mk: -PKGFILE = jobber-${RPM_VERSION}-${PKGREL}.el6.centos.${PLATFORM}.rpm -PKGFILE_DEPS = jobber.spec -PKGFILE_VM_PATH = jobber-${RPM_VERSION}-${PKGREL}.${PLATFORM}.rpm -PACKAGING_SUBDIR = centos_6 -INSTALL_PKG_CMD = sudo yum install -y ${PKGFILE} -UNINSTALL_PKG_CMD = sudo yum remove -y jobber -SRC_TARBALL = jobber-${RPM_VERSION}.tgz -SRC_TARBALL_DIR = jobber-${RPM_VERSION} - -.PHONY : pkg-local -pkg-local : jobber.spec ${WORK_DIR}/${SRC_TARBALL} - # make RPM tree - mkdir -p "${WORK_DIR}" \ - "${WORK_DIR}/BUILD" \ - "${WORK_DIR}/RPMS" \ - "${WORK_DIR}/RPMS/${PLATFORM}" \ - "${WORK_DIR}/SOURCES" \ - "${WORK_DIR}/SPECS" \ - "${WORK_DIR}/SRPMS" - cp "$<" "${WORK_DIR}/SPECS" - - # copy sources - cp "${WORK_DIR}/${SRC_TARBALL}" \ - se_policy/* \ - jobber_init "${WORK_DIR}/SOURCES/" - - # build RPMs - cd "${WORK_DIR}/SPECS" && rpmbuild -bb \ - --define "_topdir ${abspath ${WORK_DIR}}" \ - --define "_pkg_version ${RPM_VERSION}" \ - --define "_pkg_release ${PKGREL}" \ - --define "_enable_debug_packages 0" \ - "$<" - mkdir -p "${DESTDIR}" - find "${WORK_DIR}/RPMS" -name '*.rpm' | xargs cp -t "${DESTDIR}" - -include ../tail.mk \ No newline at end of file diff --git a/packaging/centos_6/Vagrantfile b/packaging/centos_6/Vagrantfile deleted file mode 100644 index 998ec10..0000000 --- a/packaging/centos_6/Vagrantfile +++ /dev/null @@ -1,18 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -Vagrant.configure("2") do |config| - config.vm.box = "centos/6" - - config.vm.network :forwarded_port, guest: 22, host: 2222, id: "ssh" - - config.vm.provision "shell", inline: <<-SHELL - #yum update -y - yum install -y epel-release rpm-build lsof - yum install -y golang python-pip - #yum upgrade -y - pip install --upgrade pip - pip install robotframework - grep normuser /etc/passwd >/dev/null || useradd normuser -m - SHELL -end diff --git a/packaging/centos_6/jobber.spec b/packaging/centos_6/jobber.spec deleted file mode 100644 index 15dda5f..0000000 --- a/packaging/centos_6/jobber.spec +++ /dev/null @@ -1,157 +0,0 @@ -Name: jobber -Version: %{_pkg_version} -Release: %{_pkg_release} -Summary: A replacement for cron. - -Group: System Environment/Daemons -License: MIT -URL: http://dshearer.github.io/jobber/ -Source0: jobber-%{_pkg_version}.tgz -Source1: jobber_init -Source2: jobber.fc -Source3: jobber.if -Source4: jobber.te - -BuildRequires: golang >= 1.8, coreutils, rsync -Requires: daemonize, initscripts - -Prefix: /usr/local - -%define debug_package %{nil} - -%description -A replacement for cron, with sophisticated status-reporting and error-handling. - - -%package selinux -Summary: An SELinux policy for Jobber. -Group: System Environment/Daemons -BuildRequires: checkpolicy, selinux-policy-devel -%if "%{_selinux_policy_version}" != "" -Requires: selinux-policy >= %{_selinux_policy_version} -%endif -Requires: %{name} = %{version}-%{release} -Requires(post): /usr/sbin/semodule, /sbin/restorecon -Requires(postun): /usr/sbin/semodule, /sbin/restorecon -%description selinux -An SELinux policy for Jobber. - - -%files -%attr(0755,root,root) /usr/local/bin/jobber -%attr(0755,root,root) /usr/local/libexec/jobbermaster -%attr(0755,root,root) /usr/local/libexec/jobberrunner -%attr(0755,root,root) /etc/init.d/jobber -%config(noreplace) /etc/jobber.conf - -%files selinux -%defattr(-,root,root,0755) -%doc selinux/* -%{_datadir}/selinux/*/jobber.pp - - -%prep - -# move sources into BUILD -%setup -q -cp "%{_sourcedir}/jobber_init" "%{_builddir}/" - -# create Go workspace -GO_WKSPC="%{_builddir}/go_workspace" -GO_SRC_DIR="${GO_WKSPC}/src/github.com/dshearer" -mkdir -p "${GO_SRC_DIR}" -ln -fs "%{_builddir}/jobber-%{_pkg_version}" "${GO_SRC_DIR}/jobber" - -echo "GO_WKSPC=${GO_WKSPC}" > "%{_builddir}/vars" -echo "GO_SRC_DIR=${GO_SRC_DIR}" >> "%{_builddir}/vars" - -# SELinux stuff -mkdir -p selinux -cp -p %{SOURCE2} %{SOURCE3} %{SOURCE4} selinux/ - - -%build -source "%{_builddir}/vars" -export GO_WKSPC -export GOPATH="${GO_WKSPC}" -make %{?_smp_mflags} -C "${GO_SRC_DIR}/jobber" check - -# SELinux stuff -cd selinux -for selinuxvariant in %{selinux_variants} -do - make NAME=${selinuxvariant} -f /usr/share/selinux/devel/Makefile - mv jobber.pp jobber.pp.${selinuxvariant} - make NAME=${selinuxvariant} -f /usr/share/selinux/devel/Makefile clean -done -cd - - - -%install -source "%{_builddir}/vars" -export GO_WKSPC -export GOPATH="${GO_WKSPC}" -%make_install -C "${GO_SRC_DIR}/jobber" -mkdir -p "%{buildroot}/etc/init.d" -cp "%{_builddir}/jobber_init" "%{buildroot}/etc/init.d/jobber" - -# SELinux stuff -for selinuxvariant in %{selinux_variants} -do - install -d %{buildroot}%{_datadir}/selinux/${selinuxvariant} - install -p -m 644 selinux/jobber.pp.${selinuxvariant} \ - %{buildroot}%{_datadir}/selinux/${selinuxvariant}/jobber.pp -done - - -%post -if [ "$1" -eq 1 ]; then - /sbin/service jobber start - /sbin/chkconfig --add jobber - /sbin/chkconfig jobber on -else - /sbin/service jobber condrestart -fi - - -%preun -if [ "$1" -eq 0 ]; then - /sbin/service jobber stop 2>/dev/null ||: -fi - - -%postun -if [ "$1" -eq 0 ]; then - /sbin/chkconfig jobber off - /sbin/chkconfig --del jobber -fi - - -%post selinux -for selinuxvariant in %{selinux_variants} -do - /usr/sbin/semodule -s ${selinuxvariant} -i \ - %{_datadir}/selinux/${selinuxvariant}/jobber.pp &> /dev/null ||: -done -restorecon -Rv /usr/local /etc/init.d ||: -/sbin/service jobber condrestart - - -%postun selinux -RUNNING=false -if /sbin/service jobber status; then - RUNNING=true - /sbin/service jobber stop -fi -if [ $1 -eq 0 ] ; then - for selinuxvariant in %{selinux_variants} - do - /usr/sbin/semodule -s ${selinuxvariant} -r jobber &> /dev/null ||: - done -fi -if [ "${RUNNING}" = "true" ]; then - /sbin/service jobber start -fi - - -%changelog \ No newline at end of file diff --git a/packaging/centos_6/jobber_init b/packaging/centos_6/jobber_init deleted file mode 100644 index 86a5627..0000000 --- a/packaging/centos_6/jobber_init +++ /dev/null @@ -1,98 +0,0 @@ -### BEGIN INIT INFO -# Provides: jobber jobberd -# Required-Start: $local_fs $syslog -# Required-Stop: $local_fs $syslog -# Default-Start: 2345 -# Default-Stop: 90 -# Short-Description: jobber -# Description: An alternative to cron. -### END INIT INFO - -prog=jobbermaster -exec=/usr/local/libexec/${prog} -lockfile=/var/lock/subsys/jobber -pidfile=/var/run/jobber.pid - -# Source function library. -. /etc/rc.d/init.d/functions - -start() { - if [ $UID -ne 0 ] ; then - echo "User has insufficient privilege." - exit 4 - fi - [ -x $exec ] || exit 5 - echo -n $"Starting $prog: " - daemon daemonize -p $pidfile -l $lockfile $exec - local retval=$? - echo - [ $retval -eq 0 ] && touch $lockfile -} - -stop() { - if [ $UID -ne 0 ] ; then - echo "User has insufficient privilege." - exit 4 - fi - echo -n $"Stopping $prog: " - - rh_status_q - local stat=$? - case $stat in - 0) - # running - kill `cat $pidfile` && success && rm -f $lockfile $pidfile - ;; - 1) - # stopped but pid file exists - rm -f $pidfile $pidfile - ;; - 2) - # stopped but subsys locked - rm -f $lockfile $pidfile - ;; - esac - local retval=$? - - echo - return $retval -} - -restart() { - rh_status_q && stop - start -} - -rh_status() { - status -p $pidfile -b $exec $prog -} - -rh_status_q() { - rh_status >/dev/null 2>&1 -} - - -case "$1" in - start) - rh_status_q && exit 0 - $1 - ;; - stop) - rh_status_q || exit 0 - $1 - ;; - restart) - $1 - ;; - status) - rh_status - ;; - condrestart) - rh_status && restart - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart}" - exit 2 -esac -exit $? - diff --git a/packaging/centos_6/se_policy/jobber.fc b/packaging/centos_6/se_policy/jobber.fc deleted file mode 100644 index c81ab54..0000000 --- a/packaging/centos_6/se_policy/jobber.fc +++ /dev/null @@ -1,6 +0,0 @@ -# jobberd executable will have: -# label: system_u:object_r:jobberd_exec_t -# MLS sensitivity: s0 -# MCS categories: - -/usr/local/sbin/jobberd -- gen_context(system_u:object_r:jobberd_exec_t,s0) diff --git a/packaging/centos_6/se_policy/jobber.if b/packaging/centos_6/se_policy/jobber.if deleted file mode 100644 index 3eb6a30..0000000 --- a/packaging/centos_6/se_policy/jobber.if +++ /dev/null @@ -1 +0,0 @@ -## diff --git a/packaging/centos_6/se_policy/jobber.te b/packaging/centos_6/se_policy/jobber.te deleted file mode 100644 index 0a1ded7..0000000 --- a/packaging/centos_6/se_policy/jobber.te +++ /dev/null @@ -1,26 +0,0 @@ - -policy_module(jobber,0.1.0) -require { - type crond_t; - type initrc_t; - type unconfined_t; - role unconfined_r; - attribute file_type; - attribute exec_type; -} - -######################################## -# -# Declarations -# - -type jobberd_exec_t, file_type, exec_type; - -######################################## -# -# jobber local policy -# -domain_auto_trans(initrc_t, jobberd_exec_t, crond_t) -domain_auto_trans(unconfined_t, jobberd_exec_t, crond_t) -role_transition unconfined_r jobberd_exec_t system_r; - diff --git a/packaging/centos_6/sources.mk b/packaging/centos_6/sources.mk deleted file mode 100644 index 4afe12d..0000000 --- a/packaging/centos_6/sources.mk +++ /dev/null @@ -1,8 +0,0 @@ -CENTOS_6_SOURCES = \ - packaging/centos_6/se_policy \ - packaging/centos_6/jobber_init \ - packaging/centos_6/jobber.spec \ - packaging/centos_6/Makefile \ - packaging/centos_6/pkgrel \ - packaging/centos_6/sources.mk \ - packaging/centos_6/Vagrantfile \ No newline at end of file diff --git a/packaging/centos_7/pkgrel b/packaging/centos_7/pkgrel deleted file mode 100644 index d00491f..0000000 --- a/packaging/centos_7/pkgrel +++ /dev/null @@ -1 +0,0 @@ -1 diff --git a/packaging/centos_7/sources.mk b/packaging/centos_7/sources.mk deleted file mode 100644 index 7f736f2..0000000 --- a/packaging/centos_7/sources.mk +++ /dev/null @@ -1,7 +0,0 @@ -CENTOS_7_SOURCES = \ - packaging/centos_7/jobber.service \ - packaging/centos_7/jobber.spec \ - packaging/centos_7/Makefile \ - packaging/centos_7/pkgrel \ - packaging/centos_7/sources.mk \ - packaging/centos_7/Vagrantfile \ No newline at end of file diff --git a/packaging/darwin/Makefile b/packaging/darwin/Makefile deleted file mode 100644 index 473b35c..0000000 --- a/packaging/darwin/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -include ../head.mk - -# required by tail.mk: -PKGFILE = nop -PKGFILE_DEPS = nop -PKGFILE_VM_PATH = nop -PACKAGING_SUBDIR = darwin -SRC_TARBALL_DIR = jobber -INSTALL_PKG_CMD = nop -UNINSTALL_PKG_CMD = nop - -.PHONY : test-local -test-local : ${WORK_DIR}/${SRC_TARBALL} - ./make-and-install.sh ${WORK_DIR}/${SRC_TARBALL} $${PWD} - -include ../tail.mk diff --git a/packaging/darwin/make-and-install.sh b/packaging/darwin/make-and-install.sh deleted file mode 100755 index 042e08a..0000000 --- a/packaging/darwin/make-and-install.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -ex - -if [ $# -ne 2 ]; then - echo "usage: make-pkg.sh SRC_TARBALL DARWIN_DIR" >&2 - exit 1 -fi - -SRC_TARBALL="$1" -DARWIN_DIR="$2" - -# extract source -TMPDIR="$(mktemp -d)" -WORKSPACE="${TMPDIR}/src/github.com/dshearer" -mkdir -p "${WORKSPACE}" -cd "${WORKSPACE}" -tar -xzf "${SRC_TARBALL}" - -# build & install -cd jobber -mkdir pkg -make check -sudo make install "DESTDIR=/" - -# run -sudo cp "${DARWIN_DIR}/launchd.plist" "${TMPDIR}/launchd.plist" -sudo launchctl unload "${TMPDIR}/launchd.plist" -sudo launchctl load "${TMPDIR}/launchd.plist" -sudo launchctl start info.nekonya.jobber - -# test -sudo robot --pythonpath "${WORKSPACE}/jobber/platform_tests/keywords" \ - "${WORKSPACE}/jobber/platform_tests/suites" | tee "${DARWIN_DIR}/testlog.txt" - -# clean up -sudo launchctl unload "${TMPDIR}/launchd.plist" -sudo rm -rf "${TMPDIR}" diff --git a/packaging/darwin/sources.mk b/packaging/darwin/sources.mk index 54a046e..ec85452 100644 --- a/packaging/darwin/sources.mk +++ b/packaging/darwin/sources.mk @@ -1,5 +1,3 @@ DARWIN_SOURCES = \ packaging/darwin/launchd.plist \ - packaging/darwin/make-and-install.sh \ - packaging/darwin/Makefile \ packaging/darwin/sources.mk diff --git a/packaging/debian_9/Makefile b/packaging/debian/Makefile similarity index 100% rename from packaging/debian_9/Makefile rename to packaging/debian/Makefile diff --git a/packaging/debian_9/Vagrantfile b/packaging/debian/Vagrantfile similarity index 100% rename from packaging/debian_9/Vagrantfile rename to packaging/debian/Vagrantfile diff --git a/packaging/debian_9/debian-pkg/changelog b/packaging/debian/debian-pkg/changelog similarity index 88% rename from packaging/debian_9/debian-pkg/changelog rename to packaging/debian/debian-pkg/changelog index f1c3d1f..bdc1073 100644 --- a/packaging/debian_9/debian-pkg/changelog +++ b/packaging/debian/debian-pkg/changelog @@ -1,4 +1,10 @@ -jobber (1.4.0-1) UNRELEASED; urgency=low +jobber (1.4.1-1) UNRELEASED; urgency=low + + * Release 1.4.1 + + -- C. Dylan Shearer Sun, 29 Mar 2020 19:30:00 -0800 + + jobber (1.4.0-1) UNRELEASED; urgency=low * Release 1.4.0 diff --git a/packaging/debian_9/debian-pkg/compat b/packaging/debian/debian-pkg/compat similarity index 100% rename from packaging/debian_9/debian-pkg/compat rename to packaging/debian/debian-pkg/compat diff --git a/packaging/debian_9/debian-pkg/control b/packaging/debian/debian-pkg/control similarity index 100% rename from packaging/debian_9/debian-pkg/control rename to packaging/debian/debian-pkg/control diff --git a/packaging/debian_9/debian-pkg/copyright b/packaging/debian/debian-pkg/copyright similarity index 100% rename from packaging/debian_9/debian-pkg/copyright rename to packaging/debian/debian-pkg/copyright diff --git a/packaging/debian_9/debian-pkg/docs b/packaging/debian/debian-pkg/docs similarity index 100% rename from packaging/debian_9/debian-pkg/docs rename to packaging/debian/debian-pkg/docs diff --git a/packaging/debian_9/debian-pkg/jobber.init b/packaging/debian/debian-pkg/jobber.init similarity index 100% rename from packaging/debian_9/debian-pkg/jobber.init rename to packaging/debian/debian-pkg/jobber.init diff --git a/packaging/debian_9/debian-pkg/jobber.service b/packaging/debian/debian-pkg/jobber.service similarity index 100% rename from packaging/debian_9/debian-pkg/jobber.service rename to packaging/debian/debian-pkg/jobber.service diff --git a/packaging/debian_9/debian-pkg/rules b/packaging/debian/debian-pkg/rules similarity index 73% rename from packaging/debian_9/debian-pkg/rules rename to packaging/debian/debian-pkg/rules index fe6dc7e..7208655 100644 --- a/packaging/debian_9/debian-pkg/rules +++ b/packaging/debian/debian-pkg/rules @@ -18,8 +18,6 @@ include /usr/share/dpkg/default.mk #export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed SRC_DIR = ${PWD} -GO_WKSPC = /tmp/workspace -export GOPATH=${GO_WKSPC} # main packaging script based on dh7 syntax %: @@ -27,18 +25,16 @@ export GOPATH=${GO_WKSPC} override_dh_auto_build: # make Go workspace - mkdir -p "${GO_WKSPC}/src/github.com/dshearer" - ln -s "${SRC_DIR}" "${GO_WKSPC}/src/github.com/dshearer/jobber" - dh_auto_build -- "GO_WKSPC=${GO_WKSPC}" + dh_auto_build override_dh_auto_test: - dh_auto_test -- "GO_WKSPC=${GO_WKSPC}" + dh_auto_test override_dh_auto_install: - dh_auto_install -- "GO_WKSPC=${GO_WKSPC}" prefix=/usr + dh_auto_install -- prefix=/usr override_dh_auto_clean: - rm -rf "${GO_WKSPC}" + dh_auto_clean override_dh_systemd_start: dh_systemd_start --restart-after-upgrade diff --git a/packaging/debian_9/debian-pkg/source/format b/packaging/debian/debian-pkg/source/format similarity index 100% rename from packaging/debian_9/debian-pkg/source/format rename to packaging/debian/debian-pkg/source/format diff --git a/packaging/debian/sources.mk b/packaging/debian/sources.mk new file mode 100644 index 0000000..a0db73d --- /dev/null +++ b/packaging/debian/sources.mk @@ -0,0 +1,5 @@ +DEBIAN_SOURCES = \ + packaging/debian/debian-pkg \ + packaging/debian/Makefile \ + packaging/debian/sources.mk \ + packaging/debian/Vagrantfile \ No newline at end of file diff --git a/packaging/debian_9/sources.mk b/packaging/debian_9/sources.mk deleted file mode 100644 index b2a9b04..0000000 --- a/packaging/debian_9/sources.mk +++ /dev/null @@ -1,5 +0,0 @@ -DEBIAN_9_SOURCES = \ - packaging/debian_9/debian-pkg \ - packaging/debian_9/Makefile \ - packaging/debian_9/sources.mk \ - packaging/debian_9/Vagrantfile \ No newline at end of file diff --git a/packaging/centos_7/Makefile b/packaging/rpm/Makefile similarity index 97% rename from packaging/centos_7/Makefile rename to packaging/rpm/Makefile index c829dda..cf3b2be 100644 --- a/packaging/centos_7/Makefile +++ b/packaging/rpm/Makefile @@ -35,6 +35,7 @@ pkg-local : jobber.spec ${WORK_DIR}/${SRC_TARBALL} "${WORK_DIR}/SOURCES/" # build RPMs + yum-builddep --assumeyes "$<" cd "${WORK_DIR}/SPECS" && rpmbuild -bb \ --define "_topdir ${abspath ${WORK_DIR}}" \ --define "_pkg_version ${RPM_VERSION}" \ diff --git a/packaging/centos_7/Vagrantfile b/packaging/rpm/Vagrantfile similarity index 100% rename from packaging/centos_7/Vagrantfile rename to packaging/rpm/Vagrantfile diff --git a/packaging/centos_7/jobber.service b/packaging/rpm/jobber.service similarity index 100% rename from packaging/centos_7/jobber.service rename to packaging/rpm/jobber.service diff --git a/packaging/centos_7/jobber.spec b/packaging/rpm/jobber.spec similarity index 64% rename from packaging/centos_7/jobber.spec rename to packaging/rpm/jobber.spec index 9b703f5..404533e 100644 --- a/packaging/centos_7/jobber.spec +++ b/packaging/rpm/jobber.spec @@ -10,7 +10,7 @@ Source0: jobber-%{_pkg_version}.tgz Source1: jobber.service %{?systemd_requires} -BuildRequires: coreutils, systemd, rsync +BuildRequires: coreutils, systemd, rsync, golang Prefix: /usr/local @@ -34,28 +34,13 @@ A replacement for cron, with sophisticated status-reporting and error-handling. %setup -q cp "%{_sourcedir}/jobber.service" "%{_builddir}/" -# create Go workspace -GO_WKSPC="%{_builddir}/go_workspace" -GO_SRC_DIR="${GO_WKSPC}/src/github.com/dshearer" -mkdir -p "${GO_SRC_DIR}" -cp -R "%{_builddir}/jobber-%{_pkg_version}" "${GO_SRC_DIR}/jobber" - -echo "GO_WKSPC=${GO_WKSPC}" > "%{_builddir}/vars" -echo "GO_SRC_DIR=${GO_SRC_DIR}" >> "%{_builddir}/vars" - %build -source "%{_builddir}/vars" -export GO_WKSPC -export GOPATH="${GO_WKSPC}" -make %{?_smp_mflags} -C "${GO_SRC_DIR}/jobber" check +make %{?_smp_mflags} check build %install -source "%{_builddir}/vars" -export GO_WKSPC -export GOPATH="${GO_WKSPC}" -%make_install -C "${GO_SRC_DIR}/jobber" +%make_install mkdir -p "%{buildroot}/%{_unitdir}" cp "%{_builddir}/jobber.service" "%{buildroot}/%{_unitdir}/" diff --git a/packaging/centos_6/pkgrel b/packaging/rpm/pkgrel similarity index 100% rename from packaging/centos_6/pkgrel rename to packaging/rpm/pkgrel diff --git a/packaging/rpm/sources.mk b/packaging/rpm/sources.mk new file mode 100644 index 0000000..2ba0d05 --- /dev/null +++ b/packaging/rpm/sources.mk @@ -0,0 +1,7 @@ +RPM_SOURCES = \ + packaging/rpm/jobber.service \ + packaging/rpm/jobber.spec \ + packaging/rpm/Makefile \ + packaging/rpm/pkgrel \ + packaging/rpm/sources.mk \ + packaging/rpm/Vagrantfile \ No newline at end of file diff --git a/packaging/sources.mk b/packaging/sources.mk index a1bf964..e1a9364 100644 --- a/packaging/sources.mk +++ b/packaging/sources.mk @@ -1,15 +1,13 @@ -include packaging/alpine_3.6/sources.mk -include packaging/centos_6/sources.mk -include packaging/centos_7/sources.mk -include packaging/debian_9/sources.mk +include packaging/alpine/sources.mk +include packaging/rpm/sources.mk +include packaging/debian/sources.mk include packaging/darwin/sources.mk PACKAGING_SOURCES := \ ${ALPINE_SOURCES} \ - ${CENTOS_6_SOURCES} \ - ${CENTOS_7_SOURCES} \ + ${RPM_SOURCES} \ ${DARWIN_SOURCES} \ - ${DEBIAN_9_SOURCES} \ + ${DEBIAN_SOURCES} \ packaging/head.mk \ packaging/Makefile \ packaging/sources.mk \ diff --git a/platform_tests/keywords/testlib.py b/platform_tests/keywords/testlib.py index 2cb9ca6..9f49ea9 100644 --- a/platform_tests/keywords/testlib.py +++ b/platform_tests/keywords/testlib.py @@ -30,6 +30,68 @@ def main(): main() ''' +class _ProcInfo(object): + '''Info about a process''' + def __init__(self, pid, username, uid, tty, program): + self.pid = pid + self.username = username + self.uid = uid + self.tty = tty + self.program = program + + def __eq__(self, other): + if self.pid != other.pid: + return False + if self.username != other.username: + return False + if self.uid != other.uid: + return False + if self.tty != other.tty: + return False + if self.program != other.program: + return False + return True + + def __ne__(self, other): + return not (self == other) + + def __lt__(self, other): + return (self.pid, self.username, self.uid, self.tty, self.program) < \ + (other.pid, other.username, other.uid, other.tty, other.program) + + def __repr__(self): + return "[{} {} {} {} {}]".format(self.pid, self.username, self.uid, \ + self.tty, self.program) + +def _get_proc_info(program_name): + ''' + :return: List of instances of _ProcInfo. + ''' + def split_by_whitespace(s, maxsplit): + s = ' '.join(s.split()) + return s.split(' ', maxsplit) + + args = ['ps', '-ax', '-o', 'pid,user,uid,tty,command'] + proc = sp.Popen(args, stdout=sp.PIPE, stderr=sp.PIPE) + infos = [] + skipped_first = False + for line in proc.stdout: + if not skipped_first: + skipped_first = True + continue + parts = split_by_whitespace(line.strip(), 4) + pid, username, uid, tty, program = parts + if program_name not in program: + continue + # Darwin uses '??' to mean 'no TTY'; Linux uses '?' + if tty == '?' or tty == '??': + tty = None + infos.append(_ProcInfo(pid, username, uid, tty, program)) + if proc.wait() != 0: + print(proc.stderr.read()) + return Exception("ps returned non-0") + return infos + def sp_check_output(args): proc = sp.Popen(args, stdout=sp.PIPE, stderr=sp.PIPE) out, err = proc.communicate() @@ -315,10 +377,13 @@ def install_jobfile(self, contents, for_root=False, reload=True, exp_num_jobs=1) pwnam = pwd.getpwnam(_NORMUSER) os.setegid(pwnam.pw_gid) os.seteuid(pwnam.pw_uid) - with open(self._normuser_jobfile_path, 'w') as f: - f.write(contents) - os.seteuid(0) - os.setegid(0) + try: + with open(self._normuser_jobfile_path, 'w') as f: + f.write(contents) + os.chmod(self._normuser_jobfile_path, 0600) + finally: + os.seteuid(0) + os.setegid(0) # reload it if reload: @@ -513,29 +578,13 @@ def directory_exists(self, path): def runner_proc_info(self): ''' - :return: A string containing lines of this format: - USER UID TTY - where TTY is '?' if there is no TTY for that process. + :return: List of instances of _ProcInfo. ''' - args = ['ps', '-ax', '-o', 'user,uid,tty,command'] - proc = sp.Popen(args, stdout=sp.PIPE, stderr=sp.PIPE) - output, _ = proc.communicate() - lines = [L.strip() for L in output.split('\n')[1:]] - records = [L.split() for L in lines \ - if len(L) > 0 and 'jobberrunner' in L] - new_records = [] - for r in records: - user, uid, tty = r[0], r[1], r[2] - if tty == '??': # Darwin - tty = '?' - new_rec = ' '.join([user, uid, tty]) - new_records.append(new_rec) - new_records.sort() - return '\n'.join(new_records) + return _get_proc_info("jobberrunner") def nbr_of_runner_procs_should_be_same(self, orig_proc_info): new_proc_info = self.runner_proc_info() - if orig_proc_info != new_proc_info: + if len(orig_proc_info) != len(new_proc_info): print("Original runner procs:\n{0}".format(orig_proc_info)) print("New runner procs:\n{0}".format(new_proc_info)) raise AssertionError("Number of runner procs has changed!") @@ -545,13 +594,8 @@ def runner_procs_should_not_have_tty(self): # (http://www.halfdog.net/Security/2012/TtyPushbackPrivilegeEscalation/) proc_info = self.runner_proc_info() print("proc_info: {}".format(proc_info)) - for line in proc_info.split('\n'): - try: - tty = line.split()[2] - except IndexError as _: - print("Error: " + line) - raise - if tty != '?': + for proc in proc_info: + if proc.tty is not None: print("Runner procs:\n{0}".format(proc_info)) raise AssertionError("A runner proc has a controlling tty") @@ -647,7 +691,7 @@ def jobberrunner_for_normuser_has_not_crashed(self): def jobberrunner_should_be_running_for_user(self, username): proc_info = self.runner_proc_info() - if username not in proc_info: + if not any(p.username == username for p in proc_info): print("Runner procs:\n{0}\n".format(proc_info)) self.print_debug_info() raise AssertionError("jobberrunner is not running for {0}".\ @@ -655,7 +699,7 @@ def jobberrunner_should_be_running_for_user(self, username): def jobberrunner_should_not_be_running_for_user(self, username): proc_info = self.runner_proc_info() - if username in proc_info: + if any(p.username == username for p in proc_info): print("Runner procs:\n{0}\n".format(proc_info)) self.print_debug_info() raise AssertionError("jobberrunner is running for {0}".\ @@ -688,15 +732,20 @@ def config_file_should_exist(self): raise e def jobber_procs_should_not_have_inet_sockets(self): - proc = sp.Popen(["lsof", "-n", "-i", "-P"], stdout=sp.PIPE) - output, _ = proc.communicate() - output = output.strip() - if len(output) == 0: - # no results - return - lines = output.split("\n")[1:] - jobber_lines = [line for line in lines if "jobber" in line] - if len(jobber_lines) > 0: - msg = "Jobber procs have inet sockets:\n{0}".\ - format("\n".join(jobber_lines)) - raise AssertionError(msg) + jobber_procs = _get_proc_info("jobber") + for jproc in jobber_procs: + cmd = [ + "lsof", + "-a", # AND the options (rather than ORing them) + "-n", # don't convert IP to domain + "-P", # don't convert port to service name + "-i", # show IP sockets + "-p", jproc.pid + ] + proc = sp.Popen(cmd, stdout=sp.PIPE, stderr=open(os.devnull)) + output, err = proc.communicate() + output = output.strip() + if len(output) > 0: + msg = "{} process has inet sockets:\n{}".\ + format(jproc.program, output) + raise AssertionError(msg) diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS deleted file mode 100644 index 15167cd..0000000 --- a/vendor/github.com/golang/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS deleted file mode 100644 index 1c4577e..0000000 --- a/vendor/github.com/golang/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 1b1b192..0000000 --- a/vendor/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,31 +0,0 @@ -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile deleted file mode 100644 index e2e0651..0000000 --- a/vendor/github.com/golang/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto - make diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index e392575..0000000 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,229 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") - } - if in.IsNil() { - // Merging nil into non-nil is a quiet no-op - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := extendable(in.Addr().Interface()); ok { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index aa20729..0000000 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,970 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" - "os" - "reflect" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - // x -= 0x80 << 63 // Always zero. - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - return UnmarshalMerge(buf, pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err - } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - - return err -} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go deleted file mode 100644 index bd0e3bb..0000000 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ /dev/null @@ -1,151 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" -) - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. -func DiscardUnknown(m Message) { - discardLegacy(m) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - vf := v.Field(i) - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, ok := extendable(m); ok { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } - } -} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 8b84d1b..0000000 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,1362 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "fmt" - "reflect" - "sort" -) - -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -func sizeFixed64(x uint64) int { - return 8 -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -func sizeFixed32(x uint64) int { - return 4 -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - p.buf = append(p.buf, data...) - return err - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Encode++ // Parens are to work around a goimports bug. - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { - return err - } - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index 2ed1cf5..0000000 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index eaad218..0000000 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,587 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, bool) { - if ep, ok := p.(extendableProto); ok { - return ep, ok - } - if ep, ok := p.(extendableProtoV1); ok { - return extensionAdapter{ep}, ok - } - return nil, false -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() -var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, ok := extendable(base) - if !ok { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensions(e *XXX_InternalExtensions) error { - m, mu := e.extensionsRead() - if m == nil { - return nil // fast path - } - mu.Lock() - defer mu.Unlock() - return encodeExtensionsMap(m) -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensionsMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func extensionsSize(e *XXX_InternalExtensions) (n int) { - m, mu := e.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - defer mu.Unlock() - return extensionsMapSize(m) -} - -func extensionsMapSize(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, ok := extendable(pb) - if !ok { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok = extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, ok := extendable(pb) - if !ok { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) -} - -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. - value := reflect.New(t).Elem() - - for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err - } - - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { - return nil, err - } - - if o.index >= len(o.buf) { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, ok := extendable(pb) - if !ok { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index 1c22550..0000000 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,897 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion2 = true - -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion1 = true diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index fd982de..0000000 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,311 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() - case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err - } - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - if i > 0 { - b.WriteByte(',') - } - - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index fb512e2..0000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,484 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "math" - "reflect" -) - -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} - -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) -} - -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) -} - -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} -} - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value -} - -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) -} - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value -} - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() -} - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} -} - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value -} - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} -} - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value -} - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } -} - -func (p word32Slice) Len() int { - return p.v.Len() -} - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} -} - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value -} - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") -} - -func word64_IsNil(p word64) bool { - return p.v.IsNil() -} - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value -} - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) - return - } - panic("unreachable") -} - -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} -} - -type word64Slice struct { - v reflect.Value -} - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } -} - -func (p word64Slice) Len() int { - return p.v.Len() -} - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") -} - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index 6b5567d..0000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,270 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != ^field(0) -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil -} - -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] -} - -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p -} - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x -} - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p -} - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] -} - -func word64_IsNil(p word64) bool { - return *p == nil -} - -func word64_Get(p word64) uint64 { - return **p -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x -} - -func word64Val_Get(p word64Val) uint64 { - return *p -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 - -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } - -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index ec2289c..0000000 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,872 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - def_uint64 uint64 - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool - - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s = "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint - case "fixed32": - p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 - case "fixed64": - p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 - case "zigzag32": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 - case "zigzag64": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break - } - } - } -} - -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - - switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - case reflect.Int32: - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.String: - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: - p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } - } - - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } - } - - case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - - p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -) - -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isMarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isMarshaler") - } - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isUnmarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isUnmarshaler") - } - return t.Implements(unmarshalerType) -} - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if f != nil { - p.field = toField(f) - } - if tag == "" { - return - } - p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - if collectStats { - stats.Chit++ - } - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } - return prop - } - if collectStats { - stats.Cmiss++ - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) - prop.unrecField = invalidField - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t - - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 965876b..0000000 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,854 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - gtNewline = []byte(">\n") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.mkeyprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.mvalprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, ok := extendable(pv.Interface()); ok { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else if err := tm.writeStruct(w, v); err != nil { - return err - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index 5e14513..0000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,895 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - base := 8 - ss := s[:2] - s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) - if err != nil { - return "", "", err - } - return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b - } - s = s[n:] - return string(bs), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err - } - pb.Reset() - v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil -} diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE index 473b670..f38ec59 100644 --- a/vendor/github.com/stretchr/testify/LICENSE +++ b/vendor/github.com/stretchr/testify/LICENSE @@ -1,22 +1,21 @@ -Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell +MIT License -Please consider promoting this project if you find it useful. +Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of the Software, -and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT -OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE -OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index ae06a54..e0364e9 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -13,6 +13,9 @@ import ( // Conditionf uses a Comparison to assert a complex condition. func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return Condition(t, comp, append([]interface{}{msg}, args...)...) } @@ -23,11 +26,17 @@ func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bo // assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") // assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return Contains(t, s, contains, append([]interface{}{msg}, args...)...) } // DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return DirExists(t, path, append([]interface{}{msg}, args...)...) } @@ -37,6 +46,9 @@ func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { // // assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) } @@ -45,6 +57,9 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string // // assert.Emptyf(t, obj, "error message %s", "formatted") func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return Empty(t, object, append([]interface{}{msg}, args...)...) } @@ -56,6 +71,9 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) boo // referenced values (as opposed to the memory addresses). Function equality // cannot be determined and will always fail. func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return Equal(t, expected, actual, append([]interface{}{msg}, args...)...) } @@ -65,6 +83,9 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // actualObj, err := SomeFunction() // assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) } @@ -73,6 +94,9 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args // // assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) } @@ -83,23 +107,46 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // assert.Equal(t, expectedErrorf, err) // } func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return Error(t, err, append([]interface{}{msg}, args...)...) } +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) +} + // Exactlyf asserts that two objects are equal in value and type. // // assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...) } // Failf reports a failure through func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return Fail(t, failureMessage, append([]interface{}{msg}, args...)...) } // FailNowf fails test func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...) } @@ -107,31 +154,68 @@ func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{} // // assert.Falsef(t, myBool, "error message %s", "formatted") func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return False(t, value, append([]interface{}{msg}, args...)...) } // FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return FileExists(t, path, append([]interface{}{msg}, args...)...) } +// Greaterf asserts that the first element is greater than the second +// +// assert.Greaterf(t, 2, 1, "error message %s", "formatted") +// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1)) +// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Greater(t, e1, e2, append([]interface{}{msg}, args...)...) +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) +} + // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) } // HTTPBodyNotContainsf asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) } @@ -141,6 +225,9 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u // // Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...) } @@ -150,6 +237,9 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, // // Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...) } @@ -159,6 +249,9 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...) } @@ -166,6 +259,9 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin // // assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) } @@ -173,31 +269,49 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // // assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...) } // InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...) } // InDeltaSlicef is the same as InDelta, except it compares two slices. func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...) } // InEpsilonf asserts that expected and actual have a relative error less than epsilon func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) } // InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) } // IsTypef asserts that the specified objects are of the same type. func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...) } @@ -205,21 +319,63 @@ func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg strin // // assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) } +// YAMLEqf asserts that two YAML strings are equivalent. +func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...) +} + // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // // assert.Lenf(t, mySlice, 3, "error message %s", "formatted") func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return Len(t, object, length, append([]interface{}{msg}, args...)...) } +// Lessf asserts that the first element is less than the second +// +// assert.Lessf(t, 1, 2, "error message %s", "formatted") +// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2)) +// assert.Lessf(t, "a", "b", "error message %s", "formatted") +func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Less(t, e1, e2, append([]interface{}{msg}, args...)...) +} + +// LessOrEqualf asserts that the first element is less than or equal to the second +// +// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) +} + // Nilf asserts that the specified object is nil. // // assert.Nilf(t, err, "error message %s", "formatted") func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return Nil(t, object, append([]interface{}{msg}, args...)...) } @@ -230,6 +386,9 @@ func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool // assert.Equal(t, expectedObj, actualObj) // } func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return NoError(t, err, append([]interface{}{msg}, args...)...) } @@ -240,6 +399,9 @@ func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { // assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") // assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) } @@ -250,6 +412,9 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a // assert.Equal(t, "two", obj[1]) // } func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return NotEmpty(t, object, append([]interface{}{msg}, args...)...) } @@ -260,6 +425,9 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) } @@ -267,6 +435,9 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, // // assert.NotNilf(t, err, "error message %s", "formatted") func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return NotNil(t, object, append([]interface{}{msg}, args...)...) } @@ -274,6 +445,9 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bo // // assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return NotPanics(t, f, append([]interface{}{msg}, args...)...) } @@ -282,6 +456,9 @@ func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bo // assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") // assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) } @@ -290,11 +467,17 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. // // assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...) } // NotZerof asserts that i is not the zero value for its type. func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return NotZero(t, i, append([]interface{}{msg}, args...)...) } @@ -302,6 +485,9 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { // // assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return Panics(t, f, append([]interface{}{msg}, args...)...) } @@ -310,6 +496,9 @@ func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool // // assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) } @@ -318,14 +507,33 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str // assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") // assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) } +// Samef asserts that two pointers reference the same object. +// +// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Same(t, expected, actual, append([]interface{}{msg}, args...)...) +} + // Subsetf asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // // assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return Subset(t, list, subset, append([]interface{}{msg}, args...)...) } @@ -333,6 +541,9 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args // // assert.Truef(t, myBool, "error message %s", "formatted") func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return True(t, value, append([]interface{}{msg}, args...)...) } @@ -340,10 +551,16 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { // // assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) } // Zerof asserts that i is the zero value for its type. func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } return Zero(t, i, append([]interface{}{msg}, args...)...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl index c5cc66f..d2bb0b8 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl @@ -1,4 +1,5 @@ {{.CommentFormat}} func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool { + if h, ok := t.(tHelper); ok { h.Helper() } return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}}) } diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index ffa5428..2683040 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -13,11 +13,17 @@ import ( // Condition uses a Comparison to assert a complex condition. func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Condition(a.t, comp, msgAndArgs...) } // Conditionf uses a Comparison to assert a complex condition. func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Conditionf(a.t, comp, msg, args...) } @@ -28,6 +34,9 @@ func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{} // a.Contains(["Hello", "World"], "World") // a.Contains({"Hello": "World"}, "Hello") func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Contains(a.t, s, contains, msgAndArgs...) } @@ -38,16 +47,25 @@ func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs .. // a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") // a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Containsf(a.t, s, contains, msg, args...) } // DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return DirExists(a.t, path, msgAndArgs...) } // DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return DirExistsf(a.t, path, msg, args...) } @@ -57,6 +75,9 @@ func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bo // // a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return ElementsMatch(a.t, listA, listB, msgAndArgs...) } @@ -66,6 +87,9 @@ func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndA // // a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return ElementsMatchf(a.t, listA, listB, msg, args...) } @@ -74,6 +98,9 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st // // a.Empty(obj) func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Empty(a.t, object, msgAndArgs...) } @@ -82,6 +109,9 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { // // a.Emptyf(obj, "error message %s", "formatted") func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Emptyf(a.t, object, msg, args...) } @@ -93,6 +123,9 @@ func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) // referenced values (as opposed to the memory addresses). Function equality // cannot be determined and will always fail. func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Equal(a.t, expected, actual, msgAndArgs...) } @@ -102,6 +135,9 @@ func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs // actualObj, err := SomeFunction() // a.EqualError(err, expectedErrorString) func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return EqualError(a.t, theError, errString, msgAndArgs...) } @@ -111,6 +147,9 @@ func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ... // actualObj, err := SomeFunction() // a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return EqualErrorf(a.t, theError, errString, msg, args...) } @@ -119,6 +158,9 @@ func (a *Assertions) EqualErrorf(theError error, errString string, msg string, a // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return EqualValues(a.t, expected, actual, msgAndArgs...) } @@ -127,6 +169,9 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn // // a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return EqualValuesf(a.t, expected, actual, msg, args...) } @@ -138,6 +183,9 @@ func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg // referenced values (as opposed to the memory addresses). Function equality // cannot be determined and will always fail. func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Equalf(a.t, expected, actual, msg, args...) } @@ -148,6 +196,9 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // assert.Equal(t, expectedError, err) // } func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Error(a.t, err, msgAndArgs...) } @@ -158,13 +209,41 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { // assert.Equal(t, expectedErrorf, err) // } func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Errorf(a.t, err, msg, args...) } +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) +func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Eventually(a.t, condition, waitFor, tick, msgAndArgs...) +} + +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Eventuallyf(a.t, condition, waitFor, tick, msg, args...) +} + // Exactly asserts that two objects are equal in value and type. // // a.Exactly(int32(123), int64(123)) func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Exactly(a.t, expected, actual, msgAndArgs...) } @@ -172,26 +251,41 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg // // a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Exactlyf(a.t, expected, actual, msg, args...) } // Fail reports a failure through func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Fail(a.t, failureMessage, msgAndArgs...) } // FailNow fails test func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return FailNow(a.t, failureMessage, msgAndArgs...) } // FailNowf fails test func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return FailNowf(a.t, failureMessage, msg, args...) } // Failf reports a failure through func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Failf(a.t, failureMessage, msg, args...) } @@ -199,6 +293,9 @@ func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{ // // a.False(myBool) func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return False(a.t, value, msgAndArgs...) } @@ -206,56 +303,127 @@ func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { // // a.Falsef(myBool, "error message %s", "formatted") func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Falsef(a.t, value, msg, args...) } // FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return FileExists(a.t, path, msgAndArgs...) } // FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return FileExistsf(a.t, path, msg, args...) } +// Greater asserts that the first element is greater than the second +// +// a.Greater(2, 1) +// a.Greater(float64(2), float64(1)) +// a.Greater("b", "a") +func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Greater(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqual(2, 1) +// a.GreaterOrEqual(2, 2) +// a.GreaterOrEqual("b", "a") +// a.GreaterOrEqual("b", "b") +func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return GreaterOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") +// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") +// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") +// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return GreaterOrEqualf(a.t, e1, e2, msg, args...) +} + +// Greaterf asserts that the first element is greater than the second +// +// a.Greaterf(2, 1, "error message %s", "formatted") +// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1)) +// a.Greaterf("b", "a", "error message %s", "formatted") +func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Greaterf(a.t, e1, e2, msg, args...) +} + // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // -// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) } // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // -// a.HTTPBodyContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) } // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. // -// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) } // HTTPBodyNotContainsf asserts that a specified handler returns a // body that does not contain a string. // -// a.HTTPBodyNotContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) } @@ -265,6 +433,9 @@ func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method strin // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return HTTPError(a.t, handler, method, url, values, msgAndArgs...) } @@ -274,6 +445,9 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri // // Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return HTTPErrorf(a.t, handler, method, url, values, msg, args...) } @@ -283,6 +457,9 @@ func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url str // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) } @@ -292,6 +469,9 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s // // Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return HTTPRedirectf(a.t, handler, method, url, values, msg, args...) } @@ -301,6 +481,9 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) } @@ -310,6 +493,9 @@ func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url st // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return HTTPSuccessf(a.t, handler, method, url, values, msg, args...) } @@ -317,6 +503,9 @@ func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url s // // a.Implements((*MyInterface)(nil), new(MyObject)) func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Implements(a.t, interfaceObject, object, msgAndArgs...) } @@ -324,6 +513,9 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, // // a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Implementsf(a.t, interfaceObject, object, msg, args...) } @@ -331,26 +523,41 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{} // // a.InDelta(math.Pi, (22 / 7.0), 0.01) func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return InDelta(a.t, expected, actual, delta, msgAndArgs...) } // InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) } // InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) } // InDeltaSlice is the same as InDelta, except it compares two slices. func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) } // InDeltaSlicef is the same as InDelta, except it compares two slices. func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return InDeltaSlicef(a.t, expected, actual, delta, msg, args...) } @@ -358,36 +565,57 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del // // a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return InDeltaf(a.t, expected, actual, delta, msg, args...) } // InEpsilon asserts that expected and actual have a relative error less than epsilon func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) } // InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) } // InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) } // InEpsilonf asserts that expected and actual have a relative error less than epsilon func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) } // IsType asserts that the specified objects are of the same type. func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return IsType(a.t, expectedType, object, msgAndArgs...) } // IsTypef asserts that the specified objects are of the same type. func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return IsTypef(a.t, expectedType, object, msg, args...) } @@ -395,6 +623,9 @@ func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg s // // a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return JSONEq(a.t, expected, actual, msgAndArgs...) } @@ -402,14 +633,36 @@ func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interf // // a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return JSONEqf(a.t, expected, actual, msg, args...) } +// YAMLEq asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return YAMLEq(a.t, expected, actual, msgAndArgs...) +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return YAMLEqf(a.t, expected, actual, msg, args...) +} + // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // // a.Len(mySlice, 3) func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Len(a.t, object, length, msgAndArgs...) } @@ -418,13 +671,69 @@ func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface // // a.Lenf(mySlice, 3, "error message %s", "formatted") func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Lenf(a.t, object, length, msg, args...) } +// Less asserts that the first element is less than the second +// +// a.Less(1, 2) +// a.Less(float64(1), float64(2)) +// a.Less("a", "b") +func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Less(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// a.LessOrEqual(1, 2) +// a.LessOrEqual(2, 2) +// a.LessOrEqual("a", "b") +// a.LessOrEqual("b", "b") +func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return LessOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqualf asserts that the first element is less than or equal to the second +// +// a.LessOrEqualf(1, 2, "error message %s", "formatted") +// a.LessOrEqualf(2, 2, "error message %s", "formatted") +// a.LessOrEqualf("a", "b", "error message %s", "formatted") +// a.LessOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return LessOrEqualf(a.t, e1, e2, msg, args...) +} + +// Lessf asserts that the first element is less than the second +// +// a.Lessf(1, 2, "error message %s", "formatted") +// a.Lessf(float64(1, "error message %s", "formatted"), float64(2)) +// a.Lessf("a", "b", "error message %s", "formatted") +func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Lessf(a.t, e1, e2, msg, args...) +} + // Nil asserts that the specified object is nil. // // a.Nil(err) func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Nil(a.t, object, msgAndArgs...) } @@ -432,6 +741,9 @@ func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { // // a.Nilf(err, "error message %s", "formatted") func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Nilf(a.t, object, msg, args...) } @@ -442,6 +754,9 @@ func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) b // assert.Equal(t, expectedObj, actualObj) // } func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return NoError(a.t, err, msgAndArgs...) } @@ -452,6 +767,9 @@ func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { // assert.Equal(t, expectedObj, actualObj) // } func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return NoErrorf(a.t, err, msg, args...) } @@ -462,6 +780,9 @@ func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { // a.NotContains(["Hello", "World"], "Earth") // a.NotContains({"Hello": "World"}, "Earth") func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return NotContains(a.t, s, contains, msgAndArgs...) } @@ -472,6 +793,9 @@ func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs // a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") // a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return NotContainsf(a.t, s, contains, msg, args...) } @@ -482,6 +806,9 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin // assert.Equal(t, "two", obj[1]) // } func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return NotEmpty(a.t, object, msgAndArgs...) } @@ -492,6 +819,9 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo // assert.Equal(t, "two", obj[1]) // } func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return NotEmptyf(a.t, object, msg, args...) } @@ -502,6 +832,9 @@ func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return NotEqual(a.t, expected, actual, msgAndArgs...) } @@ -512,6 +845,9 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return NotEqualf(a.t, expected, actual, msg, args...) } @@ -519,6 +855,9 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str // // a.NotNil(err) func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return NotNil(a.t, object, msgAndArgs...) } @@ -526,6 +865,9 @@ func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool // // a.NotNilf(err, "error message %s", "formatted") func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return NotNilf(a.t, object, msg, args...) } @@ -533,6 +875,9 @@ func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{} // // a.NotPanics(func(){ RemainCalm() }) func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return NotPanics(a.t, f, msgAndArgs...) } @@ -540,6 +885,9 @@ func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool // // a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return NotPanicsf(a.t, f, msg, args...) } @@ -548,6 +896,9 @@ func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{} // a.NotRegexp(regexp.MustCompile("starts"), "it's starting") // a.NotRegexp("^start", "it's not starting") func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return NotRegexp(a.t, rx, str, msgAndArgs...) } @@ -556,6 +907,9 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in // a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") // a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return NotRegexpf(a.t, rx, str, msg, args...) } @@ -564,6 +918,9 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg // // a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return NotSubset(a.t, list, subset, msgAndArgs...) } @@ -572,16 +929,25 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs // // a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return NotSubsetf(a.t, list, subset, msg, args...) } // NotZero asserts that i is not the zero value for its type. func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return NotZero(a.t, i, msgAndArgs...) } // NotZerof asserts that i is not the zero value for its type. func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return NotZerof(a.t, i, msg, args...) } @@ -589,6 +955,9 @@ func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bo // // a.Panics(func(){ GoCrazy() }) func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Panics(a.t, f, msgAndArgs...) } @@ -597,6 +966,9 @@ func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { // // a.PanicsWithValue("crazy error", func(){ GoCrazy() }) func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return PanicsWithValue(a.t, expected, f, msgAndArgs...) } @@ -605,6 +977,9 @@ func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgA // // a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return PanicsWithValuef(a.t, expected, f, msg, args...) } @@ -612,6 +987,9 @@ func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg // // a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Panicsf(a.t, f, msg, args...) } @@ -620,6 +998,9 @@ func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) b // a.Regexp(regexp.MustCompile("start"), "it's starting") // a.Regexp("start...$", "it's not starting") func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Regexp(a.t, rx, str, msgAndArgs...) } @@ -628,14 +1009,46 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter // a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") // a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Regexpf(a.t, rx, str, msg, args...) } +// Same asserts that two pointers reference the same object. +// +// a.Same(ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Same(a.t, expected, actual, msgAndArgs...) +} + +// Samef asserts that two pointers reference the same object. +// +// a.Samef(ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Samef(a.t, expected, actual, msg, args...) +} + // Subset asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // // a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Subset(a.t, list, subset, msgAndArgs...) } @@ -644,6 +1057,9 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... // // a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Subsetf(a.t, list, subset, msg, args...) } @@ -651,6 +1067,9 @@ func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, a // // a.True(myBool) func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return True(a.t, value, msgAndArgs...) } @@ -658,6 +1077,9 @@ func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { // // a.Truef(myBool, "error message %s", "formatted") func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Truef(a.t, value, msg, args...) } @@ -665,6 +1087,9 @@ func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { // // a.WithinDuration(time.Now(), time.Now(), 10*time.Second) func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) } @@ -672,15 +1097,24 @@ func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta // // a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return WithinDurationf(a.t, expected, actual, delta, msg, args...) } // Zero asserts that i is the zero value for its type. func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Zero(a.t, i, msgAndArgs...) } // Zerof asserts that i is the zero value for its type. func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } return Zerof(a.t, i, msg, args...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl index 99f9acf..188bb9e 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl @@ -1,4 +1,5 @@ {{.CommentWithoutT "a"}} func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { + if h, ok := a.t.(tHelper); ok { h.Helper() } return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) } diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go new file mode 100644 index 0000000..15a486c --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -0,0 +1,309 @@ +package assert + +import ( + "fmt" + "reflect" +) + +func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) { + switch kind { + case reflect.Int: + { + intobj1 := obj1.(int) + intobj2 := obj2.(int) + if intobj1 > intobj2 { + return -1, true + } + if intobj1 == intobj2 { + return 0, true + } + if intobj1 < intobj2 { + return 1, true + } + } + case reflect.Int8: + { + int8obj1 := obj1.(int8) + int8obj2 := obj2.(int8) + if int8obj1 > int8obj2 { + return -1, true + } + if int8obj1 == int8obj2 { + return 0, true + } + if int8obj1 < int8obj2 { + return 1, true + } + } + case reflect.Int16: + { + int16obj1 := obj1.(int16) + int16obj2 := obj2.(int16) + if int16obj1 > int16obj2 { + return -1, true + } + if int16obj1 == int16obj2 { + return 0, true + } + if int16obj1 < int16obj2 { + return 1, true + } + } + case reflect.Int32: + { + int32obj1 := obj1.(int32) + int32obj2 := obj2.(int32) + if int32obj1 > int32obj2 { + return -1, true + } + if int32obj1 == int32obj2 { + return 0, true + } + if int32obj1 < int32obj2 { + return 1, true + } + } + case reflect.Int64: + { + int64obj1 := obj1.(int64) + int64obj2 := obj2.(int64) + if int64obj1 > int64obj2 { + return -1, true + } + if int64obj1 == int64obj2 { + return 0, true + } + if int64obj1 < int64obj2 { + return 1, true + } + } + case reflect.Uint: + { + uintobj1 := obj1.(uint) + uintobj2 := obj2.(uint) + if uintobj1 > uintobj2 { + return -1, true + } + if uintobj1 == uintobj2 { + return 0, true + } + if uintobj1 < uintobj2 { + return 1, true + } + } + case reflect.Uint8: + { + uint8obj1 := obj1.(uint8) + uint8obj2 := obj2.(uint8) + if uint8obj1 > uint8obj2 { + return -1, true + } + if uint8obj1 == uint8obj2 { + return 0, true + } + if uint8obj1 < uint8obj2 { + return 1, true + } + } + case reflect.Uint16: + { + uint16obj1 := obj1.(uint16) + uint16obj2 := obj2.(uint16) + if uint16obj1 > uint16obj2 { + return -1, true + } + if uint16obj1 == uint16obj2 { + return 0, true + } + if uint16obj1 < uint16obj2 { + return 1, true + } + } + case reflect.Uint32: + { + uint32obj1 := obj1.(uint32) + uint32obj2 := obj2.(uint32) + if uint32obj1 > uint32obj2 { + return -1, true + } + if uint32obj1 == uint32obj2 { + return 0, true + } + if uint32obj1 < uint32obj2 { + return 1, true + } + } + case reflect.Uint64: + { + uint64obj1 := obj1.(uint64) + uint64obj2 := obj2.(uint64) + if uint64obj1 > uint64obj2 { + return -1, true + } + if uint64obj1 == uint64obj2 { + return 0, true + } + if uint64obj1 < uint64obj2 { + return 1, true + } + } + case reflect.Float32: + { + float32obj1 := obj1.(float32) + float32obj2 := obj2.(float32) + if float32obj1 > float32obj2 { + return -1, true + } + if float32obj1 == float32obj2 { + return 0, true + } + if float32obj1 < float32obj2 { + return 1, true + } + } + case reflect.Float64: + { + float64obj1 := obj1.(float64) + float64obj2 := obj2.(float64) + if float64obj1 > float64obj2 { + return -1, true + } + if float64obj1 == float64obj2 { + return 0, true + } + if float64obj1 < float64obj2 { + return 1, true + } + } + case reflect.String: + { + stringobj1 := obj1.(string) + stringobj2 := obj2.(string) + if stringobj1 > stringobj2 { + return -1, true + } + if stringobj1 == stringobj2 { + return 0, true + } + if stringobj1 < stringobj2 { + return 1, true + } + } + } + + return 0, false +} + +// Greater asserts that the first element is greater than the second +// +// assert.Greater(t, 2, 1) +// assert.Greater(t, float64(2), float64(1)) +// assert.Greater(t, "b", "a") +func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != -1 { + return Fail(t, fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqual(t, 2, 1) +// assert.GreaterOrEqual(t, 2, 2) +// assert.GreaterOrEqual(t, "b", "a") +// assert.GreaterOrEqual(t, "b", "b") +func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != -1 && res != 0 { + return Fail(t, fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} + +// Less asserts that the first element is less than the second +// +// assert.Less(t, 1, 2) +// assert.Less(t, float64(1), float64(2)) +// assert.Less(t, "a", "b") +func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != 1 { + return Fail(t, fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// assert.LessOrEqual(t, 1, 2) +// assert.LessOrEqual(t, 2, 2) +// assert.LessOrEqual(t, "a", "b") +// assert.LessOrEqual(t, "b", "b") +func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != 1 && res != 0 { + return Fail(t, fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 47bda77..044da8b 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -18,6 +18,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" + yaml "gopkg.in/yaml.v2" ) //go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl @@ -27,6 +28,22 @@ type TestingT interface { Errorf(format string, args ...interface{}) } +// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful +// for table driven tests. +type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool + +// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful +// for table driven tests. +type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool + +// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful +// for table driven tests. +type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool + +// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful +// for table driven tests. +type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool + // Comparison a custom function that returns true on success and false on failure type Comparison func() (success bool) @@ -38,21 +55,23 @@ type Comparison func() (success bool) // // This function does no assertion of any kind. func ObjectsAreEqual(expected, actual interface{}) bool { - if expected == nil || actual == nil { return expected == actual } - if exp, ok := expected.([]byte); ok { - act, ok := actual.([]byte) - if !ok { - return false - } else if exp == nil || act == nil { - return exp == nil && act == nil - } - return bytes.Equal(exp, act) + + exp, ok := expected.([]byte) + if !ok { + return reflect.DeepEqual(expected, actual) } - return reflect.DeepEqual(expected, actual) + act, ok := actual.([]byte) + if !ok { + return false + } + if exp == nil || act == nil { + return exp == nil && act == nil + } + return bytes.Equal(exp, act) } // ObjectsAreEqualValues gets whether two objects are equal, or if their @@ -156,27 +175,16 @@ func isTest(name, prefix string) bool { return !unicode.IsLower(rune) } -// getWhitespaceString returns a string that is long enough to overwrite the default -// output from the go testing framework. -func getWhitespaceString() string { - - _, file, line, ok := runtime.Caller(1) - if !ok { - return "" - } - parts := strings.Split(file, "/") - file = parts[len(parts)-1] - - return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) - -} - func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { if len(msgAndArgs) == 0 || msgAndArgs == nil { return "" } if len(msgAndArgs) == 1 { - return msgAndArgs[0].(string) + msg := msgAndArgs[0] + if msgAsStr, ok := msg.(string); ok { + return msgAsStr + } + return fmt.Sprintf("%+v", msg) } if len(msgAndArgs) > 1 { return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) @@ -195,7 +203,7 @@ func indentMessageLines(message string, longestLabelLen int) string { // no need to align first line because it starts at the correct location (after the label) if i != 0 { // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab - outBuf.WriteString("\n\r\t" + strings.Repeat(" ", longestLabelLen+1) + "\t") + outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t") } outBuf.WriteString(scanner.Text()) } @@ -209,6 +217,9 @@ type failNower interface { // FailNow fails test func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } Fail(t, failureMessage, msgAndArgs...) // We cannot extend TestingT with FailNow() and @@ -227,8 +238,11 @@ func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool // Fail reports a failure through func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } content := []labeledContent{ - {"Error Trace", strings.Join(CallerInfo(), "\n\r\t\t\t")}, + {"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")}, {"Error", failureMessage}, } @@ -244,7 +258,7 @@ func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { content = append(content, labeledContent{"Messages", message}) } - t.Errorf("%s", "\r"+getWhitespaceString()+labeledOutput(content...)) + t.Errorf("\n%s", ""+labeledOutput(content...)) return false } @@ -256,7 +270,7 @@ type labeledContent struct { // labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: // -// \r\t{{label}}:{{align_spaces}}\t{{content}}\n +// \t{{label}}:{{align_spaces}}\t{{content}}\n // // The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. // If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this @@ -272,7 +286,7 @@ func labeledOutput(content ...labeledContent) string { } var output string for _, v := range content { - output += "\r\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n" + output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n" } return output } @@ -281,6 +295,9 @@ func labeledOutput(content ...labeledContent) string { // // assert.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } interfaceType := reflect.TypeOf(interfaceObject).Elem() if object == nil { @@ -295,6 +312,9 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg // IsType asserts that the specified objects are of the same type. func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) @@ -311,6 +331,9 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs // referenced values (as opposed to the memory addresses). Function equality // cannot be determined and will always fail. func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } if err := validateEqualArgs(expected, actual); err != nil { return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)", expected, actual, err), msgAndArgs...) @@ -328,6 +351,37 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) } +// Same asserts that two pointers reference the same object. +// +// assert.Same(t, ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + expectedPtr, actualPtr := reflect.ValueOf(expected), reflect.ValueOf(actual) + if expectedPtr.Kind() != reflect.Ptr || actualPtr.Kind() != reflect.Ptr { + return Fail(t, "Invalid operation: both arguments must be pointers", msgAndArgs...) + } + + expectedType, actualType := reflect.TypeOf(expected), reflect.TypeOf(actual) + if expectedType != actualType { + return Fail(t, fmt.Sprintf("Pointer expected to be of type %v, but was %v", + expectedType, actualType), msgAndArgs...) + } + + if expected != actual { + return Fail(t, fmt.Sprintf("Not same: \n"+ + "expected: %p %#v\n"+ + "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) + } + + return true +} + // formatUnequalValues takes two values of arbitrary types and returns string // representations appropriate to be presented to the user. // @@ -349,6 +403,9 @@ func formatUnequalValues(expected, actual interface{}) (e string, a string) { // // assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } if !ObjectsAreEqualValues(expected, actual) { diff := diff(expected, actual) @@ -366,12 +423,15 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa // // assert.Exactly(t, int32(123), int64(123)) func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } aType := reflect.TypeOf(expected) bType := reflect.TypeOf(actual) if aType != bType { - return Fail(t, fmt.Sprintf("Types expected to match exactly\n\r\t%v != %v", aType, bType), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } return Equal(t, expected, actual, msgAndArgs...) @@ -382,12 +442,26 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} // // assert.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } if !isNil(object) { return true } return Fail(t, "Expected value not to be nil.", msgAndArgs...) } +// containsKind checks if a specified kind in the slice of kinds. +func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { + for i := 0; i < len(kinds); i++ { + if kind == kinds[i] { + return true + } + } + + return false +} + // isNil checks if a specified object is nil or not, without Failing. func isNil(object interface{}) bool { if object == nil { @@ -396,7 +470,14 @@ func isNil(object interface{}) bool { value := reflect.ValueOf(object) kind := value.Kind() - if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { + isNilableKind := containsKind( + []reflect.Kind{ + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice}, + kind) + + if isNilableKind && value.IsNil() { return true } @@ -407,6 +488,9 @@ func isNil(object interface{}) bool { // // assert.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } if isNil(object) { return true } @@ -427,14 +511,14 @@ func isEmpty(object interface{}) bool { // collection types are empty when they have no element case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // pointers are empty if nil or if the value they point to is empty case reflect.Ptr: if objValue.IsNil() { return true } deref := objValue.Elem().Interface() return isEmpty(deref) - // for all other types, compare against the zero value + // for all other types, compare against the zero value default: zero := reflect.Zero(objValue.Type()) return reflect.DeepEqual(object, zero.Interface()) @@ -446,6 +530,9 @@ func isEmpty(object interface{}) bool { // // assert.Empty(t, obj) func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } pass := isEmpty(object) if !pass { @@ -463,6 +550,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { // assert.Equal(t, "two", obj[1]) // } func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } pass := !isEmpty(object) if !pass { @@ -490,6 +580,9 @@ func getLen(x interface{}) (ok bool, length int) { // // assert.Len(t, mySlice, 3) func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } ok, l := getLen(object) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) @@ -505,6 +598,14 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) // // assert.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if h, ok := t.(interface { + Helper() + }); ok { + h.Helper() + } if value != true { return Fail(t, "Should be true", msgAndArgs...) @@ -518,6 +619,9 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { // // assert.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } if value != false { return Fail(t, "Should be false", msgAndArgs...) @@ -534,6 +638,9 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } if err := validateEqualArgs(expected, actual); err != nil { return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)", expected, actual, err), msgAndArgs...) @@ -554,7 +661,7 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ func includeElement(list interface{}, element interface{}) (ok, found bool) { listValue := reflect.ValueOf(list) - elementValue := reflect.ValueOf(element) + listKind := reflect.TypeOf(list).Kind() defer func() { if e := recover(); e != nil { ok = false @@ -562,11 +669,12 @@ func includeElement(list interface{}, element interface{}) (ok, found bool) { } }() - if reflect.TypeOf(list).Kind() == reflect.String { + if listKind == reflect.String { + elementValue := reflect.ValueOf(element) return true, strings.Contains(listValue.String(), elementValue.String()) } - if reflect.TypeOf(list).Kind() == reflect.Map { + if listKind == reflect.Map { mapKeys := listValue.MapKeys() for i := 0; i < len(mapKeys); i++ { if ObjectsAreEqual(mapKeys[i].Interface(), element) { @@ -592,6 +700,9 @@ func includeElement(list interface{}, element interface{}) (ok, found bool) { // assert.Contains(t, ["Hello", "World"], "World") // assert.Contains(t, {"Hello": "World"}, "Hello") func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } ok, found := includeElement(s, contains) if !ok { @@ -612,6 +723,9 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo // assert.NotContains(t, ["Hello", "World"], "Earth") // assert.NotContains(t, {"Hello": "World"}, "Earth") func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } ok, found := includeElement(s, contains) if !ok { @@ -630,6 +744,9 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) // // assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } if subset == nil { return true // we consider nil to be equal to the nil set } @@ -671,6 +788,9 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok // // assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } if subset == nil { return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) } @@ -713,6 +833,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) // // assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } if isEmpty(listA) && isEmpty(listB) { return true } @@ -763,6 +886,9 @@ func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface // Condition uses a Comparison to assert a complex condition. func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } result := comp() if !result { Fail(t, "Condition failed!", msgAndArgs...) @@ -800,9 +926,12 @@ func didPanic(f PanicTestFunc) (bool, interface{}) { // // assert.Panics(t, func(){ GoCrazy() }) func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) } return true @@ -813,13 +942,16 @@ func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { // // assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } funcDidPanic, panicValue := didPanic(f) if !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) } if panicValue != expected { - return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%v\n\r\tPanic value:\t%v", f, expected, panicValue), msgAndArgs...) + return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v", f, expected, panicValue), msgAndArgs...) } return true @@ -829,9 +961,12 @@ func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndAr // // assert.NotPanics(t, func(){ RemainCalm() }) func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } if funcDidPanic, panicValue := didPanic(f); funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v", f, panicValue), msgAndArgs...) } return true @@ -841,6 +976,9 @@ func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { // // assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } dt := expected.Sub(actual) if dt < -delta || dt > delta { @@ -890,6 +1028,9 @@ func toFloat(x interface{}) (float64, bool) { // // assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } af, aok := toFloat(expected) bf, bok := toFloat(actual) @@ -916,6 +1057,9 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs // InDeltaSlice is the same as InDelta, except it compares two slices. func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { @@ -937,6 +1081,9 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn // InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Map || reflect.TypeOf(expected).Kind() != reflect.Map { @@ -994,6 +1141,9 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { // InEpsilon asserts that expected and actual have a relative error less than epsilon func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } actualEpsilon, err := calcRelativeError(expected, actual) if err != nil { return Fail(t, err.Error(), msgAndArgs...) @@ -1008,6 +1158,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd // InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { @@ -1038,6 +1191,9 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m // assert.Equal(t, expectedObj, actualObj) // } func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } if err != nil { return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) } @@ -1052,6 +1208,9 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { // assert.Equal(t, expectedError, err) // } func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } if err == nil { return Fail(t, "An error is expected but got nil.", msgAndArgs...) @@ -1066,6 +1225,9 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { // actualObj, err := SomeFunction() // assert.EqualError(t, err, expectedErrorString) func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } if !Error(t, theError, msgAndArgs...) { return false } @@ -1099,6 +1261,9 @@ func matchRegexp(rx interface{}, str interface{}) bool { // assert.Regexp(t, regexp.MustCompile("start"), "it's starting") // assert.Regexp(t, "start...$", "it's not starting") func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } match := matchRegexp(rx, str) @@ -1114,6 +1279,9 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") // assert.NotRegexp(t, "^start", "it's not starting") func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } match := matchRegexp(rx, str) if match { @@ -1126,6 +1294,9 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf // Zero asserts that i is the zero value for its type. func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) } @@ -1134,6 +1305,9 @@ func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { // NotZero asserts that i is not the zero value for its type. func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) } @@ -1142,6 +1316,9 @@ func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { // FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } info, err := os.Lstat(path) if err != nil { if os.IsNotExist(err) { @@ -1157,6 +1334,9 @@ func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { // DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } info, err := os.Lstat(path) if err != nil { if os.IsNotExist(err) { @@ -1174,6 +1354,9 @@ func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { // // assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } var expectedJSONAsInterface, actualJSONAsInterface interface{} if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { @@ -1187,6 +1370,24 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) } +// YAMLEq asserts that two YAML strings are equivalent. +func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + var expectedYAMLAsInterface, actualYAMLAsInterface interface{} + + if err := yaml.Unmarshal([]byte(expected), &expectedYAMLAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) + } + + if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) + } + + return Equal(t, expectedYAMLAsInterface, actualYAMLAsInterface, msgAndArgs...) +} + func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { t := reflect.TypeOf(v) k := t.Kind() @@ -1199,7 +1400,7 @@ func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { } // diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice or array. Otherwise it returns an empty string. +// are a struct, map, slice, array or string. Otherwise it returns an empty string. func diff(expected interface{}, actual interface{}) string { if expected == nil || actual == nil { return "" @@ -1212,12 +1413,18 @@ func diff(expected interface{}, actual interface{}) string { return "" } - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { return "" } - e := spewConfig.Sdump(expected) - a := spewConfig.Sdump(actual) + var e, a string + if et != reflect.TypeOf("") { + e = spewConfig.Sdump(expected) + a = spewConfig.Sdump(actual) + } else { + e = reflect.ValueOf(expected).String() + a = reflect.ValueOf(actual).String() + } diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ A: difflib.SplitLines(e), @@ -1254,3 +1461,38 @@ var spewConfig = spew.ConfigState{ DisableCapacities: true, SortKeys: true, } + +type tHelper interface { + Helper() +} + +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + timer := time.NewTimer(waitFor) + ticker := time.NewTicker(tick) + checkPassed := make(chan bool) + defer timer.Stop() + defer ticker.Stop() + defer close(checkPassed) + for { + select { + case <-timer.C: + return Fail(t, "Condition never satisfied", msgAndArgs...) + case result := <-checkPassed: + if result { + return true + } + case <-ticker.C: + go func() { + checkPassed <- condition() + }() + } + } +} diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index 3101e78..df46fa7 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -12,10 +12,11 @@ import ( // an error if building a new request fails. func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + req, err := http.NewRequest(method, url, nil) if err != nil { return -1, err } + req.URL.RawQuery = values.Encode() handler(w, req) return w.Code, nil } @@ -26,6 +27,9 @@ func httpCode(handler http.HandlerFunc, method, url string, values url.Values) ( // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } code, err := httpCode(handler, method, url, values) if err != nil { Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) @@ -46,6 +50,9 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } code, err := httpCode(handler, method, url, values) if err != nil { Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) @@ -66,6 +73,9 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu // // Returns whether the assertion was successful (true) or not (false). func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } code, err := httpCode(handler, method, url, values) if err != nil { Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) @@ -95,10 +105,13 @@ func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) s // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } body := HTTPBody(handler, method, url, values) contains := strings.Contains(body, fmt.Sprint(str)) @@ -112,10 +125,13 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } body := HTTPBody(handler, method, url, values) contains := strings.Contains(body, fmt.Sprint(str)) diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index ac3c308..c5903f5 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -14,16 +14,24 @@ import ( // Condition uses a Comparison to assert a complex condition. func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) { - if !assert.Condition(t, comp, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.Condition(t, comp, msgAndArgs...) { + return + } + t.FailNow() } // Conditionf uses a Comparison to assert a complex condition. func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interface{}) { - if !assert.Conditionf(t, comp, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Conditionf(t, comp, msg, args...) { + return } + t.FailNow() } // Contains asserts that the specified string, list(array, slice...) or map contains the @@ -33,9 +41,13 @@ func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interfac // assert.Contains(t, ["Hello", "World"], "World") // assert.Contains(t, {"Hello": "World"}, "Hello") func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { - if !assert.Contains(t, s, contains, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Contains(t, s, contains, msgAndArgs...) { + return } + t.FailNow() } // Containsf asserts that the specified string, list(array, slice...) or map contains the @@ -45,23 +57,35 @@ func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...int // assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") // assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { - if !assert.Containsf(t, s, contains, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.Containsf(t, s, contains, msg, args...) { + return + } + t.FailNow() } // DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func DirExists(t TestingT, path string, msgAndArgs ...interface{}) { - if !assert.DirExists(t, path, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.DirExists(t, path, msgAndArgs...) { + return } + t.FailNow() } // DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { - if !assert.DirExistsf(t, path, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.DirExistsf(t, path, msg, args...) { + return + } + t.FailNow() } // ElementsMatch asserts that the specified listA(array, slice...) is equal to specified @@ -70,9 +94,13 @@ func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { // // assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { - if !assert.ElementsMatch(t, listA, listB, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ElementsMatch(t, listA, listB, msgAndArgs...) { + return } + t.FailNow() } // ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified @@ -81,9 +109,13 @@ func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs // // assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { - if !assert.ElementsMatchf(t, listA, listB, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ElementsMatchf(t, listA, listB, msg, args...) { + return } + t.FailNow() } // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either @@ -91,9 +123,13 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string // // assert.Empty(t, obj) func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if !assert.Empty(t, object, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.Empty(t, object, msgAndArgs...) { + return + } + t.FailNow() } // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either @@ -101,9 +137,13 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // // assert.Emptyf(t, obj, "error message %s", "formatted") func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { - if !assert.Emptyf(t, object, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Emptyf(t, object, msg, args...) { + return } + t.FailNow() } // Equal asserts that two objects are equal. @@ -114,9 +154,13 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { // referenced values (as opposed to the memory addresses). Function equality // cannot be determined and will always fail. func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if !assert.Equal(t, expected, actual, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.Equal(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() } // EqualError asserts that a function returned an error (i.e. not `nil`) @@ -125,9 +169,13 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i // actualObj, err := SomeFunction() // assert.EqualError(t, err, expectedErrorString) func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { - if !assert.EqualError(t, theError, errString, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.EqualError(t, theError, errString, msgAndArgs...) { + return } + t.FailNow() } // EqualErrorf asserts that a function returned an error (i.e. not `nil`) @@ -136,9 +184,13 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte // actualObj, err := SomeFunction() // assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { - if !assert.EqualErrorf(t, theError, errString, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.EqualErrorf(t, theError, errString, msg, args...) { + return } + t.FailNow() } // EqualValues asserts that two objects are equal or convertable to the same types @@ -146,9 +198,13 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args // // assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if !assert.EqualValues(t, expected, actual, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.EqualValues(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() } // EqualValuesf asserts that two objects are equal or convertable to the same types @@ -156,9 +212,13 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg // // assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if !assert.EqualValuesf(t, expected, actual, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.EqualValuesf(t, expected, actual, msg, args...) { + return } + t.FailNow() } // Equalf asserts that two objects are equal. @@ -169,9 +229,13 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // referenced values (as opposed to the memory addresses). Function equality // cannot be determined and will always fail. func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if !assert.Equalf(t, expected, actual, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.Equalf(t, expected, actual, msg, args...) { + return + } + t.FailNow() } // Error asserts that a function returned an error (i.e. not `nil`). @@ -181,9 +245,13 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // assert.Equal(t, expectedError, err) // } func Error(t TestingT, err error, msgAndArgs ...interface{}) { - if !assert.Error(t, err, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Error(t, err, msgAndArgs...) { + return } + t.FailNow() } // Errorf asserts that a function returned an error (i.e. not `nil`). @@ -193,135 +261,285 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) { // assert.Equal(t, expectedErrorf, err) // } func Errorf(t TestingT, err error, msg string, args ...interface{}) { - if !assert.Errorf(t, err, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Errorf(t, err, msg, args...) { + return + } + t.FailNow() +} + +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if assert.Eventually(t, condition, waitFor, tick, msgAndArgs...) { + return + } + if h, ok := t.(tHelper); ok { + h.Helper() + } + t.FailNow() +} + +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if assert.Eventuallyf(t, condition, waitFor, tick, msg, args...) { + return + } + if h, ok := t.(tHelper); ok { + h.Helper() } + t.FailNow() } // Exactly asserts that two objects are equal in value and type. // // assert.Exactly(t, int32(123), int64(123)) func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if !assert.Exactly(t, expected, actual, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.Exactly(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() } // Exactlyf asserts that two objects are equal in value and type. // // assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if !assert.Exactlyf(t, expected, actual, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Exactlyf(t, expected, actual, msg, args...) { + return } + t.FailNow() } // Fail reports a failure through func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) { - if !assert.Fail(t, failureMessage, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.Fail(t, failureMessage, msgAndArgs...) { + return + } + t.FailNow() } // FailNow fails test func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) { - if !assert.FailNow(t, failureMessage, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.FailNow(t, failureMessage, msgAndArgs...) { + return } + t.FailNow() } // FailNowf fails test func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) { - if !assert.FailNowf(t, failureMessage, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.FailNowf(t, failureMessage, msg, args...) { + return } + t.FailNow() } // Failf reports a failure through func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { - if !assert.Failf(t, failureMessage, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.Failf(t, failureMessage, msg, args...) { + return + } + t.FailNow() } // False asserts that the specified value is false. // // assert.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) { - if !assert.False(t, value, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.False(t, value, msgAndArgs...) { + return } + t.FailNow() } // Falsef asserts that the specified value is false. // // assert.Falsef(t, myBool, "error message %s", "formatted") func Falsef(t TestingT, value bool, msg string, args ...interface{}) { - if !assert.Falsef(t, value, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.Falsef(t, value, msg, args...) { + return + } + t.FailNow() } // FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func FileExists(t TestingT, path string, msgAndArgs ...interface{}) { - if !assert.FileExists(t, path, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.FileExists(t, path, msgAndArgs...) { + return } + t.FailNow() } // FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { - if !assert.FileExistsf(t, path, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.FileExistsf(t, path, msg, args...) { + return + } + t.FailNow() +} + +// Greater asserts that the first element is greater than the second +// +// assert.Greater(t, 2, 1) +// assert.Greater(t, float64(2), float64(1)) +// assert.Greater(t, "b", "a") +func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Greater(t, e1, e2, msgAndArgs...) { + return + } + t.FailNow() +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqual(t, 2, 1) +// assert.GreaterOrEqual(t, 2, 2) +// assert.GreaterOrEqual(t, "b", "a") +// assert.GreaterOrEqual(t, "b", "b") +func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.GreaterOrEqual(t, e1, e2, msgAndArgs...) { + return + } + t.FailNow() +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.GreaterOrEqualf(t, e1, e2, msg, args...) { + return + } + t.FailNow() +} + +// Greaterf asserts that the first element is greater than the second +// +// assert.Greaterf(t, 2, 1, "error message %s", "formatted") +// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1)) +// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Greaterf(t, e1, e2, msg, args...) { + return } + t.FailNow() } // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { - if !assert.HTTPBodyContains(t, handler, method, url, values, str, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.HTTPBodyContains(t, handler, method, url, values, str, msgAndArgs...) { + return + } + t.FailNow() } // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { - if !assert.HTTPBodyContainsf(t, handler, method, url, values, str, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.HTTPBodyContainsf(t, handler, method, url, values, str, msg, args...) { + return } + t.FailNow() } // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { - if !assert.HTTPBodyNotContains(t, handler, method, url, values, str, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.HTTPBodyNotContains(t, handler, method, url, values, str, msgAndArgs...) { + return + } + t.FailNow() } // HTTPBodyNotContainsf asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { - if !assert.HTTPBodyNotContainsf(t, handler, method, url, values, str, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.HTTPBodyNotContainsf(t, handler, method, url, values, str, msg, args...) { + return } + t.FailNow() } // HTTPError asserts that a specified handler returns an error status code. @@ -330,9 +548,13 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u // // Returns whether the assertion was successful (true) or not (false). func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { - if !assert.HTTPError(t, handler, method, url, values, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.HTTPError(t, handler, method, url, values, msgAndArgs...) { + return } + t.FailNow() } // HTTPErrorf asserts that a specified handler returns an error status code. @@ -341,9 +563,13 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, // // Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { - if !assert.HTTPErrorf(t, handler, method, url, values, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.HTTPErrorf(t, handler, method, url, values, msg, args...) { + return + } + t.FailNow() } // HTTPRedirect asserts that a specified handler returns a redirect status code. @@ -352,9 +578,13 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { - if !assert.HTTPRedirect(t, handler, method, url, values, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.HTTPRedirect(t, handler, method, url, values, msgAndArgs...) { + return } + t.FailNow() } // HTTPRedirectf asserts that a specified handler returns a redirect status code. @@ -363,9 +593,13 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin // // Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { - if !assert.HTTPRedirectf(t, handler, method, url, values, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.HTTPRedirectf(t, handler, method, url, values, msg, args...) { + return + } + t.FailNow() } // HTTPSuccess asserts that a specified handler returns a success status code. @@ -374,9 +608,13 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { - if !assert.HTTPSuccess(t, handler, method, url, values, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.HTTPSuccess(t, handler, method, url, values, msgAndArgs...) { + return } + t.FailNow() } // HTTPSuccessf asserts that a specified handler returns a success status code. @@ -385,133 +623,223 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { - if !assert.HTTPSuccessf(t, handler, method, url, values, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.HTTPSuccessf(t, handler, method, url, values, msg, args...) { + return } + t.FailNow() } // Implements asserts that an object is implemented by the specified interface. // // assert.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { - if !assert.Implements(t, interfaceObject, object, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.Implements(t, interfaceObject, object, msgAndArgs...) { + return + } + t.FailNow() } // Implementsf asserts that an object is implemented by the specified interface. // // assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { - if !assert.Implementsf(t, interfaceObject, object, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Implementsf(t, interfaceObject, object, msg, args...) { + return } + t.FailNow() } // InDelta asserts that the two numerals are within delta of each other. // // assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { - if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.InDelta(t, expected, actual, delta, msgAndArgs...) { + return + } + t.FailNow() } // InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. func InDeltaMapValues(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { - if !assert.InDeltaMapValues(t, expected, actual, delta, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.InDeltaMapValues(t, expected, actual, delta, msgAndArgs...) { + return } + t.FailNow() } // InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { - if !assert.InDeltaMapValuesf(t, expected, actual, delta, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.InDeltaMapValuesf(t, expected, actual, delta, msg, args...) { + return } + t.FailNow() } // InDeltaSlice is the same as InDelta, except it compares two slices. func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { - if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) { + return + } + t.FailNow() } // InDeltaSlicef is the same as InDelta, except it compares two slices. func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { - if !assert.InDeltaSlicef(t, expected, actual, delta, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.InDeltaSlicef(t, expected, actual, delta, msg, args...) { + return } + t.FailNow() } // InDeltaf asserts that the two numerals are within delta of each other. // // assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { - if !assert.InDeltaf(t, expected, actual, delta, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.InDeltaf(t, expected, actual, delta, msg, args...) { + return + } + t.FailNow() } // InEpsilon asserts that expected and actual have a relative error less than epsilon func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { - if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { + return } + t.FailNow() } // InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { - if !assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) { + return } + t.FailNow() } // InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { - if !assert.InEpsilonSlicef(t, expected, actual, epsilon, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.InEpsilonSlicef(t, expected, actual, epsilon, msg, args...) { + return + } + t.FailNow() } // InEpsilonf asserts that expected and actual have a relative error less than epsilon func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { - if !assert.InEpsilonf(t, expected, actual, epsilon, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.InEpsilonf(t, expected, actual, epsilon, msg, args...) { + return } + t.FailNow() } // IsType asserts that the specified objects are of the same type. func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { - if !assert.IsType(t, expectedType, object, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.IsType(t, expectedType, object, msgAndArgs...) { + return + } + t.FailNow() } // IsTypef asserts that the specified objects are of the same type. func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { - if !assert.IsTypef(t, expectedType, object, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsTypef(t, expectedType, object, msg, args...) { + return } + t.FailNow() } // JSONEq asserts that two JSON strings are equivalent. // // assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { - if !assert.JSONEq(t, expected, actual, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.JSONEq(t, expected, actual, msgAndArgs...) { + return } + t.FailNow() } // JSONEqf asserts that two JSON strings are equivalent. // // assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { - if !assert.JSONEqf(t, expected, actual, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.JSONEqf(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + +// YAMLEq asserts that two YAML strings are equivalent. +func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.YAMLEq(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.YAMLEqf(t, expected, actual, msg, args...) { + return } + t.FailNow() } // Len asserts that the specified object has specific length. @@ -519,9 +847,13 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int // // assert.Len(t, mySlice, 3) func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { - if !assert.Len(t, object, length, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Len(t, object, length, msgAndArgs...) { + return } + t.FailNow() } // Lenf asserts that the specified object has specific length. @@ -529,27 +861,101 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) // // assert.Lenf(t, mySlice, 3, "error message %s", "formatted") func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { - if !assert.Lenf(t, object, length, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Lenf(t, object, length, msg, args...) { + return + } + t.FailNow() +} + +// Less asserts that the first element is less than the second +// +// assert.Less(t, 1, 2) +// assert.Less(t, float64(1), float64(2)) +// assert.Less(t, "a", "b") +func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Less(t, e1, e2, msgAndArgs...) { + return + } + t.FailNow() +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// assert.LessOrEqual(t, 1, 2) +// assert.LessOrEqual(t, 2, 2) +// assert.LessOrEqual(t, "a", "b") +// assert.LessOrEqual(t, "b", "b") +func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.LessOrEqual(t, e1, e2, msgAndArgs...) { + return + } + t.FailNow() +} + +// LessOrEqualf asserts that the first element is less than or equal to the second +// +// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.LessOrEqualf(t, e1, e2, msg, args...) { + return + } + t.FailNow() +} + +// Lessf asserts that the first element is less than the second +// +// assert.Lessf(t, 1, 2, "error message %s", "formatted") +// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2)) +// assert.Lessf(t, "a", "b", "error message %s", "formatted") +func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.Lessf(t, e1, e2, msg, args...) { + return + } + t.FailNow() } // Nil asserts that the specified object is nil. // // assert.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if !assert.Nil(t, object, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Nil(t, object, msgAndArgs...) { + return } + t.FailNow() } // Nilf asserts that the specified object is nil. // // assert.Nilf(t, err, "error message %s", "formatted") func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { - if !assert.Nilf(t, object, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.Nilf(t, object, msg, args...) { + return + } + t.FailNow() } // NoError asserts that a function returned no error (i.e. `nil`). @@ -559,9 +965,13 @@ func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { // assert.Equal(t, expectedObj, actualObj) // } func NoError(t TestingT, err error, msgAndArgs ...interface{}) { - if !assert.NoError(t, err, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NoError(t, err, msgAndArgs...) { + return } + t.FailNow() } // NoErrorf asserts that a function returned no error (i.e. `nil`). @@ -571,9 +981,13 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) { // assert.Equal(t, expectedObj, actualObj) // } func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { - if !assert.NoErrorf(t, err, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NoErrorf(t, err, msg, args...) { + return } + t.FailNow() } // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the @@ -583,9 +997,13 @@ func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { // assert.NotContains(t, ["Hello", "World"], "Earth") // assert.NotContains(t, {"Hello": "World"}, "Earth") func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { - if !assert.NotContains(t, s, contains, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.NotContains(t, s, contains, msgAndArgs...) { + return + } + t.FailNow() } // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the @@ -595,9 +1013,13 @@ func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ... // assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") // assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { - if !assert.NotContainsf(t, s, contains, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotContainsf(t, s, contains, msg, args...) { + return } + t.FailNow() } // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either @@ -607,9 +1029,13 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a // assert.Equal(t, "two", obj[1]) // } func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if !assert.NotEmpty(t, object, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.NotEmpty(t, object, msgAndArgs...) { + return + } + t.FailNow() } // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either @@ -619,9 +1045,13 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // assert.Equal(t, "two", obj[1]) // } func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { - if !assert.NotEmptyf(t, object, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotEmptyf(t, object, msg, args...) { + return } + t.FailNow() } // NotEqual asserts that the specified values are NOT equal. @@ -631,9 +1061,13 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if !assert.NotEqual(t, expected, actual, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotEqual(t, expected, actual, msgAndArgs...) { + return } + t.FailNow() } // NotEqualf asserts that the specified values are NOT equal. @@ -643,45 +1077,65 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs . // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if !assert.NotEqualf(t, expected, actual, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.NotEqualf(t, expected, actual, msg, args...) { + return + } + t.FailNow() } // NotNil asserts that the specified object is not nil. // // assert.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if !assert.NotNil(t, object, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotNil(t, object, msgAndArgs...) { + return } + t.FailNow() } // NotNilf asserts that the specified object is not nil. // // assert.NotNilf(t, err, "error message %s", "formatted") func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { - if !assert.NotNilf(t, object, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.NotNilf(t, object, msg, args...) { + return + } + t.FailNow() } // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // // assert.NotPanics(t, func(){ RemainCalm() }) func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { - if !assert.NotPanics(t, f, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotPanics(t, f, msgAndArgs...) { + return } + t.FailNow() } // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. // // assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { - if !assert.NotPanicsf(t, f, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotPanicsf(t, f, msg, args...) { + return } + t.FailNow() } // NotRegexp asserts that a specified regexp does not match a string. @@ -689,9 +1143,13 @@ func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interfac // assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") // assert.NotRegexp(t, "^start", "it's not starting") func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { - if !assert.NotRegexp(t, rx, str, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.NotRegexp(t, rx, str, msgAndArgs...) { + return + } + t.FailNow() } // NotRegexpf asserts that a specified regexp does not match a string. @@ -699,9 +1157,13 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf // assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") // assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { - if !assert.NotRegexpf(t, rx, str, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotRegexpf(t, rx, str, msg, args...) { + return } + t.FailNow() } // NotSubset asserts that the specified list(array, slice...) contains not all @@ -709,9 +1171,13 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. // // assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { - if !assert.NotSubset(t, list, subset, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.NotSubset(t, list, subset, msgAndArgs...) { + return + } + t.FailNow() } // NotSubsetf asserts that the specified list(array, slice...) contains not all @@ -719,32 +1185,48 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i // // assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { - if !assert.NotSubsetf(t, list, subset, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotSubsetf(t, list, subset, msg, args...) { + return } + t.FailNow() } // NotZero asserts that i is not the zero value for its type. func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) { - if !assert.NotZero(t, i, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotZero(t, i, msgAndArgs...) { + return } + t.FailNow() } // NotZerof asserts that i is not the zero value for its type. func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { - if !assert.NotZerof(t, i, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.NotZerof(t, i, msg, args...) { + return + } + t.FailNow() } // Panics asserts that the code inside the specified PanicTestFunc panics. // // assert.Panics(t, func(){ GoCrazy() }) func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { - if !assert.Panics(t, f, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Panics(t, f, msgAndArgs...) { + return } + t.FailNow() } // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that @@ -752,9 +1234,13 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // // assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { - if !assert.PanicsWithValue(t, expected, f, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.PanicsWithValue(t, expected, f, msgAndArgs...) { + return + } + t.FailNow() } // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that @@ -762,18 +1248,26 @@ func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, m // // assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { - if !assert.PanicsWithValuef(t, expected, f, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.PanicsWithValuef(t, expected, f, msg, args...) { + return } + t.FailNow() } // Panicsf asserts that the code inside the specified PanicTestFunc panics. // // assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { - if !assert.Panicsf(t, f, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Panicsf(t, f, msg, args...) { + return } + t.FailNow() } // Regexp asserts that a specified regexp matches a string. @@ -781,9 +1275,13 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{} // assert.Regexp(t, regexp.MustCompile("start"), "it's starting") // assert.Regexp(t, "start...$", "it's not starting") func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { - if !assert.Regexp(t, rx, str, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.Regexp(t, rx, str, msgAndArgs...) { + return + } + t.FailNow() } // Regexpf asserts that a specified regexp matches a string. @@ -791,9 +1289,45 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") // assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { - if !assert.Regexpf(t, rx, str, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Regexpf(t, rx, str, msg, args...) { + return } + t.FailNow() +} + +// Same asserts that two pointers reference the same object. +// +// assert.Same(t, ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Same(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Same(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// Samef asserts that two pointers reference the same object. +// +// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Samef(t, expected, actual, msg, args...) { + return + } + t.FailNow() } // Subset asserts that the specified list(array, slice...) contains all @@ -801,9 +1335,13 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in // // assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { - if !assert.Subset(t, list, subset, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Subset(t, list, subset, msgAndArgs...) { + return } + t.FailNow() } // Subsetf asserts that the specified list(array, slice...) contains all @@ -811,57 +1349,85 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte // // assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { - if !assert.Subsetf(t, list, subset, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.Subsetf(t, list, subset, msg, args...) { + return + } + t.FailNow() } // True asserts that the specified value is true. // // assert.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) { - if !assert.True(t, value, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.True(t, value, msgAndArgs...) { + return } + t.FailNow() } // Truef asserts that the specified value is true. // // assert.Truef(t, myBool, "error message %s", "formatted") func Truef(t TestingT, value bool, msg string, args ...interface{}) { - if !assert.Truef(t, value, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.Truef(t, value, msg, args...) { + return + } + t.FailNow() } // WithinDuration asserts that the two times are within duration delta of each other. // // assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { - if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) { + return } + t.FailNow() } // WithinDurationf asserts that the two times are within duration delta of each other. // // assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { - if !assert.WithinDurationf(t, expected, actual, delta, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.WithinDurationf(t, expected, actual, delta, msg, args...) { + return } + t.FailNow() } // Zero asserts that i is the zero value for its type. func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) { - if !assert.Zero(t, i, msgAndArgs...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() } + if assert.Zero(t, i, msgAndArgs...) { + return + } + t.FailNow() } // Zerof asserts that i is the zero value for its type. func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) { - if !assert.Zerof(t, i, msg, args...) { - t.FailNow() + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Zerof(t, i, msg, args...) { + return } + t.FailNow() } diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl index d2c38f6..55e42dd 100644 --- a/vendor/github.com/stretchr/testify/require/require.go.tmpl +++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl @@ -1,6 +1,6 @@ {{.Comment}} func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { - if !assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { - t.FailNow() - } + if h, ok := t.(tHelper); ok { h.Helper() } + if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } + t.FailNow() } diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 299ceb9..804fae0 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -14,11 +14,17 @@ import ( // Condition uses a Comparison to assert a complex condition. func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Condition(a.t, comp, msgAndArgs...) } // Conditionf uses a Comparison to assert a complex condition. func (a *Assertions) Conditionf(comp assert.Comparison, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Conditionf(a.t, comp, msg, args...) } @@ -29,6 +35,9 @@ func (a *Assertions) Conditionf(comp assert.Comparison, msg string, args ...inte // a.Contains(["Hello", "World"], "World") // a.Contains({"Hello": "World"}, "Hello") func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Contains(a.t, s, contains, msgAndArgs...) } @@ -39,16 +48,25 @@ func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs .. // a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") // a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Containsf(a.t, s, contains, msg, args...) } // DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } DirExists(a.t, path, msgAndArgs...) } // DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } DirExistsf(a.t, path, msg, args...) } @@ -58,6 +76,9 @@ func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) { // // a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } ElementsMatch(a.t, listA, listB, msgAndArgs...) } @@ -67,6 +88,9 @@ func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndA // // a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } ElementsMatchf(a.t, listA, listB, msg, args...) } @@ -75,6 +99,9 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st // // a.Empty(obj) func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Empty(a.t, object, msgAndArgs...) } @@ -83,6 +110,9 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { // // a.Emptyf(obj, "error message %s", "formatted") func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Emptyf(a.t, object, msg, args...) } @@ -94,6 +124,9 @@ func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) // referenced values (as opposed to the memory addresses). Function equality // cannot be determined and will always fail. func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Equal(a.t, expected, actual, msgAndArgs...) } @@ -103,6 +136,9 @@ func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs // actualObj, err := SomeFunction() // a.EqualError(err, expectedErrorString) func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } EqualError(a.t, theError, errString, msgAndArgs...) } @@ -112,6 +148,9 @@ func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ... // actualObj, err := SomeFunction() // a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } EqualErrorf(a.t, theError, errString, msg, args...) } @@ -120,6 +159,9 @@ func (a *Assertions) EqualErrorf(theError error, errString string, msg string, a // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } EqualValues(a.t, expected, actual, msgAndArgs...) } @@ -128,6 +170,9 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn // // a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } EqualValuesf(a.t, expected, actual, msg, args...) } @@ -139,6 +184,9 @@ func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg // referenced values (as opposed to the memory addresses). Function equality // cannot be determined and will always fail. func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Equalf(a.t, expected, actual, msg, args...) } @@ -149,6 +197,9 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // assert.Equal(t, expectedError, err) // } func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Error(a.t, err, msgAndArgs...) } @@ -159,13 +210,41 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { // assert.Equal(t, expectedErrorf, err) // } func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Errorf(a.t, err, msg, args...) } +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) +func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Eventually(a.t, condition, waitFor, tick, msgAndArgs...) +} + +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Eventuallyf(a.t, condition, waitFor, tick, msg, args...) +} + // Exactly asserts that two objects are equal in value and type. // // a.Exactly(int32(123), int64(123)) func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Exactly(a.t, expected, actual, msgAndArgs...) } @@ -173,26 +252,41 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg // // a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Exactlyf(a.t, expected, actual, msg, args...) } // Fail reports a failure through func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Fail(a.t, failureMessage, msgAndArgs...) } // FailNow fails test func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } FailNow(a.t, failureMessage, msgAndArgs...) } // FailNowf fails test func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } FailNowf(a.t, failureMessage, msg, args...) } // Failf reports a failure through func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Failf(a.t, failureMessage, msg, args...) } @@ -200,6 +294,9 @@ func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{ // // a.False(myBool) func (a *Assertions) False(value bool, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } False(a.t, value, msgAndArgs...) } @@ -207,56 +304,127 @@ func (a *Assertions) False(value bool, msgAndArgs ...interface{}) { // // a.Falsef(myBool, "error message %s", "formatted") func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Falsef(a.t, value, msg, args...) } // FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } FileExists(a.t, path, msgAndArgs...) } // FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } FileExistsf(a.t, path, msg, args...) } +// Greater asserts that the first element is greater than the second +// +// a.Greater(2, 1) +// a.Greater(float64(2), float64(1)) +// a.Greater("b", "a") +func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Greater(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqual(2, 1) +// a.GreaterOrEqual(2, 2) +// a.GreaterOrEqual("b", "a") +// a.GreaterOrEqual("b", "b") +func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + GreaterOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") +// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") +// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") +// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + GreaterOrEqualf(a.t, e1, e2, msg, args...) +} + +// Greaterf asserts that the first element is greater than the second +// +// a.Greaterf(2, 1, "error message %s", "formatted") +// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1)) +// a.Greaterf("b", "a", "error message %s", "formatted") +func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Greaterf(a.t, e1, e2, msg, args...) +} + // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // -// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) } // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // -// a.HTTPBodyContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) } // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. // -// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) } // HTTPBodyNotContainsf asserts that a specified handler returns a // body that does not contain a string. // -// a.HTTPBodyNotContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) } @@ -266,6 +434,9 @@ func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method strin // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } HTTPError(a.t, handler, method, url, values, msgAndArgs...) } @@ -275,6 +446,9 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri // // Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } HTTPErrorf(a.t, handler, method, url, values, msg, args...) } @@ -284,6 +458,9 @@ func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url str // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) } @@ -293,6 +470,9 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s // // Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } HTTPRedirectf(a.t, handler, method, url, values, msg, args...) } @@ -302,6 +482,9 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) } @@ -311,6 +494,9 @@ func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url st // // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } HTTPSuccessf(a.t, handler, method, url, values, msg, args...) } @@ -318,6 +504,9 @@ func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url s // // a.Implements((*MyInterface)(nil), new(MyObject)) func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Implements(a.t, interfaceObject, object, msgAndArgs...) } @@ -325,6 +514,9 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, // // a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Implementsf(a.t, interfaceObject, object, msg, args...) } @@ -332,26 +524,41 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{} // // a.InDelta(math.Pi, (22 / 7.0), 0.01) func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } InDelta(a.t, expected, actual, delta, msgAndArgs...) } // InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) } // InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) } // InDeltaSlice is the same as InDelta, except it compares two slices. func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) } // InDeltaSlicef is the same as InDelta, except it compares two slices. func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } InDeltaSlicef(a.t, expected, actual, delta, msg, args...) } @@ -359,36 +566,57 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del // // a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } InDeltaf(a.t, expected, actual, delta, msg, args...) } // InEpsilon asserts that expected and actual have a relative error less than epsilon func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) } // InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) } // InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) } // InEpsilonf asserts that expected and actual have a relative error less than epsilon func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } InEpsilonf(a.t, expected, actual, epsilon, msg, args...) } // IsType asserts that the specified objects are of the same type. func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } IsType(a.t, expectedType, object, msgAndArgs...) } // IsTypef asserts that the specified objects are of the same type. func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } IsTypef(a.t, expectedType, object, msg, args...) } @@ -396,6 +624,9 @@ func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg s // // a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } JSONEq(a.t, expected, actual, msgAndArgs...) } @@ -403,14 +634,36 @@ func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interf // // a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } JSONEqf(a.t, expected, actual, msg, args...) } +// YAMLEq asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + YAMLEq(a.t, expected, actual, msgAndArgs...) +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + YAMLEqf(a.t, expected, actual, msg, args...) +} + // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // // a.Len(mySlice, 3) func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Len(a.t, object, length, msgAndArgs...) } @@ -419,13 +672,69 @@ func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface // // a.Lenf(mySlice, 3, "error message %s", "formatted") func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Lenf(a.t, object, length, msg, args...) } +// Less asserts that the first element is less than the second +// +// a.Less(1, 2) +// a.Less(float64(1), float64(2)) +// a.Less("a", "b") +func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Less(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// a.LessOrEqual(1, 2) +// a.LessOrEqual(2, 2) +// a.LessOrEqual("a", "b") +// a.LessOrEqual("b", "b") +func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + LessOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqualf asserts that the first element is less than or equal to the second +// +// a.LessOrEqualf(1, 2, "error message %s", "formatted") +// a.LessOrEqualf(2, 2, "error message %s", "formatted") +// a.LessOrEqualf("a", "b", "error message %s", "formatted") +// a.LessOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + LessOrEqualf(a.t, e1, e2, msg, args...) +} + +// Lessf asserts that the first element is less than the second +// +// a.Lessf(1, 2, "error message %s", "formatted") +// a.Lessf(float64(1, "error message %s", "formatted"), float64(2)) +// a.Lessf("a", "b", "error message %s", "formatted") +func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Lessf(a.t, e1, e2, msg, args...) +} + // Nil asserts that the specified object is nil. // // a.Nil(err) func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Nil(a.t, object, msgAndArgs...) } @@ -433,6 +742,9 @@ func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) { // // a.Nilf(err, "error message %s", "formatted") func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Nilf(a.t, object, msg, args...) } @@ -443,6 +755,9 @@ func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) { // assert.Equal(t, expectedObj, actualObj) // } func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } NoError(a.t, err, msgAndArgs...) } @@ -453,6 +768,9 @@ func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) { // assert.Equal(t, expectedObj, actualObj) // } func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } NoErrorf(a.t, err, msg, args...) } @@ -463,6 +781,9 @@ func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) { // a.NotContains(["Hello", "World"], "Earth") // a.NotContains({"Hello": "World"}, "Earth") func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } NotContains(a.t, s, contains, msgAndArgs...) } @@ -473,6 +794,9 @@ func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs // a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") // a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } NotContainsf(a.t, s, contains, msg, args...) } @@ -483,6 +807,9 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin // assert.Equal(t, "two", obj[1]) // } func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } NotEmpty(a.t, object, msgAndArgs...) } @@ -493,6 +820,9 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { // assert.Equal(t, "two", obj[1]) // } func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } NotEmptyf(a.t, object, msg, args...) } @@ -503,6 +833,9 @@ func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } NotEqual(a.t, expected, actual, msgAndArgs...) } @@ -513,6 +846,9 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } NotEqualf(a.t, expected, actual, msg, args...) } @@ -520,6 +856,9 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str // // a.NotNil(err) func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } NotNil(a.t, object, msgAndArgs...) } @@ -527,6 +866,9 @@ func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) { // // a.NotNilf(err, "error message %s", "formatted") func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } NotNilf(a.t, object, msg, args...) } @@ -534,6 +876,9 @@ func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{} // // a.NotPanics(func(){ RemainCalm() }) func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } NotPanics(a.t, f, msgAndArgs...) } @@ -541,6 +886,9 @@ func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{} // // a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") func (a *Assertions) NotPanicsf(f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } NotPanicsf(a.t, f, msg, args...) } @@ -549,6 +897,9 @@ func (a *Assertions) NotPanicsf(f assert.PanicTestFunc, msg string, args ...inte // a.NotRegexp(regexp.MustCompile("starts"), "it's starting") // a.NotRegexp("^start", "it's not starting") func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } NotRegexp(a.t, rx, str, msgAndArgs...) } @@ -557,6 +908,9 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in // a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") // a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } NotRegexpf(a.t, rx, str, msg, args...) } @@ -565,6 +919,9 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg // // a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } NotSubset(a.t, list, subset, msgAndArgs...) } @@ -573,16 +930,25 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs // // a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } NotSubsetf(a.t, list, subset, msg, args...) } // NotZero asserts that i is not the zero value for its type. func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } NotZero(a.t, i, msgAndArgs...) } // NotZerof asserts that i is not the zero value for its type. func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } NotZerof(a.t, i, msg, args...) } @@ -590,6 +956,9 @@ func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) { // // a.Panics(func(){ GoCrazy() }) func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Panics(a.t, f, msgAndArgs...) } @@ -598,6 +967,9 @@ func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { // // a.PanicsWithValue("crazy error", func(){ GoCrazy() }) func (a *Assertions) PanicsWithValue(expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } PanicsWithValue(a.t, expected, f, msgAndArgs...) } @@ -606,6 +978,9 @@ func (a *Assertions) PanicsWithValue(expected interface{}, f assert.PanicTestFun // // a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func (a *Assertions) PanicsWithValuef(expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } PanicsWithValuef(a.t, expected, f, msg, args...) } @@ -613,6 +988,9 @@ func (a *Assertions) PanicsWithValuef(expected interface{}, f assert.PanicTestFu // // a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Panicsf(a.t, f, msg, args...) } @@ -621,6 +999,9 @@ func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interfa // a.Regexp(regexp.MustCompile("start"), "it's starting") // a.Regexp("start...$", "it's not starting") func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Regexp(a.t, rx, str, msgAndArgs...) } @@ -629,14 +1010,46 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter // a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") // a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Regexpf(a.t, rx, str, msg, args...) } +// Same asserts that two pointers reference the same object. +// +// a.Same(ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Same(a.t, expected, actual, msgAndArgs...) +} + +// Samef asserts that two pointers reference the same object. +// +// a.Samef(ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Samef(a.t, expected, actual, msg, args...) +} + // Subset asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // // a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Subset(a.t, list, subset, msgAndArgs...) } @@ -645,6 +1058,9 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... // // a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Subsetf(a.t, list, subset, msg, args...) } @@ -652,6 +1068,9 @@ func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, a // // a.True(myBool) func (a *Assertions) True(value bool, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } True(a.t, value, msgAndArgs...) } @@ -659,6 +1078,9 @@ func (a *Assertions) True(value bool, msgAndArgs ...interface{}) { // // a.Truef(myBool, "error message %s", "formatted") func (a *Assertions) Truef(value bool, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Truef(a.t, value, msg, args...) } @@ -666,6 +1088,9 @@ func (a *Assertions) Truef(value bool, msg string, args ...interface{}) { // // a.WithinDuration(time.Now(), time.Now(), 10*time.Second) func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } WithinDuration(a.t, expected, actual, delta, msgAndArgs...) } @@ -673,15 +1098,24 @@ func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta // // a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } WithinDurationf(a.t, expected, actual, delta, msg, args...) } // Zero asserts that i is the zero value for its type. func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Zero(a.t, i, msgAndArgs...) } // Zerof asserts that i is the zero value for its type. func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } Zerof(a.t, i, msg, args...) } diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl b/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl index b93569e..54124df 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl +++ b/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl @@ -1,4 +1,5 @@ {{.CommentWithoutT "a"}} func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) { + if h, ok := a.t.(tHelper); ok { h.Helper() } {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) } diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go index e404f01..6b85c5e 100644 --- a/vendor/github.com/stretchr/testify/require/requirements.go +++ b/vendor/github.com/stretchr/testify/require/requirements.go @@ -6,4 +6,24 @@ type TestingT interface { FailNow() } +type tHelper interface { + Helper() +} + +// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful +// for table driven tests. +type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) + +// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful +// for table driven tests. +type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) + +// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful +// for table driven tests. +type BoolAssertionFunc func(TestingT, bool, ...interface{}) + +// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful +// for table driven tests. +type ErrorAssertionFunc func(TestingT, error, ...interface{}) + //go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/suite/suite.go b/vendor/github.com/stretchr/testify/suite/suite.go index e20afbc..d708d7d 100644 --- a/vendor/github.com/stretchr/testify/suite/suite.go +++ b/vendor/github.com/stretchr/testify/suite/suite.go @@ -6,6 +6,7 @@ import ( "os" "reflect" "regexp" + "runtime/debug" "testing" "github.com/stretchr/testify/assert" @@ -55,20 +56,35 @@ func (suite *Suite) Assert() *assert.Assertions { return suite.Assertions } +func failOnPanic(t *testing.T) { + r := recover() + if r != nil { + t.Errorf("test panicked: %v\n%s", r, debug.Stack()) + t.FailNow() + } +} + +// Run provides suite functionality around golang subtests. It should be +// called in place of t.Run(name, func(t *testing.T)) in test suite code. +// The passed-in func will be executed as a subtest with a fresh instance of t. +// Provides compatibility with go test pkg -run TestSuite/TestName/SubTestName. +func (suite *Suite) Run(name string, subtest func()) bool { + oldT := suite.T() + defer suite.SetT(oldT) + return oldT.Run(name, func(t *testing.T) { + suite.SetT(t) + subtest() + }) +} + // Run takes a testing suite and runs all of the tests attached // to it. func Run(t *testing.T, suite TestingSuite) { suite.SetT(t) + defer failOnPanic(t) - if setupAllSuite, ok := suite.(SetupAllSuite); ok { - setupAllSuite.SetupSuite() - } - defer func() { - if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok { - tearDownAllSuite.TearDownSuite() - } - }() - + suiteSetupDone := false + methodFinder := reflect.TypeOf(suite) tests := []testing.InternalTest{} for index := 0; index < methodFinder.NumMethod(); index++ { @@ -78,32 +94,46 @@ func Run(t *testing.T, suite TestingSuite) { fmt.Fprintf(os.Stderr, "testify: invalid regexp for -m: %s\n", err) os.Exit(1) } - if ok { - test := testing.InternalTest{ - Name: method.Name, - F: func(t *testing.T) { - parentT := suite.T() - suite.SetT(t) - if setupTestSuite, ok := suite.(SetupTestSuite); ok { - setupTestSuite.SetupTest() + if !ok { + continue + } + if !suiteSetupDone { + if setupAllSuite, ok := suite.(SetupAllSuite); ok { + setupAllSuite.SetupSuite() + } + defer func() { + if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok { + tearDownAllSuite.TearDownSuite() + } + }() + suiteSetupDone = true + } + test := testing.InternalTest{ + Name: method.Name, + F: func(t *testing.T) { + parentT := suite.T() + suite.SetT(t) + defer failOnPanic(t) + + if setupTestSuite, ok := suite.(SetupTestSuite); ok { + setupTestSuite.SetupTest() + } + if beforeTestSuite, ok := suite.(BeforeTest); ok { + beforeTestSuite.BeforeTest(methodFinder.Elem().Name(), method.Name) + } + defer func() { + if afterTestSuite, ok := suite.(AfterTest); ok { + afterTestSuite.AfterTest(methodFinder.Elem().Name(), method.Name) } - if beforeTestSuite, ok := suite.(BeforeTest); ok { - beforeTestSuite.BeforeTest(methodFinder.Elem().Name(), method.Name) + if tearDownTestSuite, ok := suite.(TearDownTestSuite); ok { + tearDownTestSuite.TearDownTest() } - defer func() { - if afterTestSuite, ok := suite.(AfterTest); ok { - afterTestSuite.AfterTest(methodFinder.Elem().Name(), method.Name) - } - if tearDownTestSuite, ok := suite.(TearDownTestSuite); ok { - tearDownTestSuite.TearDownTest() - } - suite.SetT(parentT) - }() - method.Func.Call([]reflect.Value{reflect.ValueOf(suite)}) - }, - } - tests = append(tests, test) + suite.SetT(parentT) + }() + method.Func.Call([]reflect.Value{reflect.ValueOf(suite)}) + }, } + tests = append(tests, test) } runTests(t, tests) } diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS deleted file mode 100644 index 15167cd..0000000 --- a/vendor/golang.org/x/net/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS deleted file mode 100644 index 1c4577e..0000000 --- a/vendor/golang.org/x/net/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE deleted file mode 100644 index 6a66aea..0000000 --- a/vendor/golang.org/x/net/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS deleted file mode 100644 index 7330990..0000000 --- a/vendor/golang.org/x/net/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index a3c021d..0000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index d20f52b..0000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, CancelFunc(f) -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, CancelFunc(f) -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index d88bd1d..0000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 0f35592..0000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index b105f80..0000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/tools/AUTHORS b/vendor/golang.org/x/tools/AUTHORS deleted file mode 100644 index 15167cd..0000000 --- a/vendor/golang.org/x/tools/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/tools/CONTRIBUTORS b/vendor/golang.org/x/tools/CONTRIBUTORS deleted file mode 100644 index 1c4577e..0000000 --- a/vendor/golang.org/x/tools/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE deleted file mode 100644 index 6a66aea..0000000 --- a/vendor/golang.org/x/tools/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS deleted file mode 100644 index 7330990..0000000 --- a/vendor/golang.org/x/tools/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/cmd/getgo/LICENSE b/vendor/golang.org/x/tools/cmd/getgo/LICENSE deleted file mode 100644 index 32017f8..0000000 --- a/vendor/golang.org/x/tools/cmd/getgo/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2017 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go deleted file mode 100644 index 6b7052b..0000000 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ /dev/null @@ -1,627 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package astutil - -// This file defines utilities for working with source positions. - -import ( - "fmt" - "go/ast" - "go/token" - "sort" -) - -// PathEnclosingInterval returns the node that encloses the source -// interval [start, end), and all its ancestors up to the AST root. -// -// The definition of "enclosing" used by this function considers -// additional whitespace abutting a node to be enclosed by it. -// In this example: -// -// z := x + y // add them -// <-A-> -// <----B-----> -// -// the ast.BinaryExpr(+) node is considered to enclose interval B -// even though its [Pos()..End()) is actually only interval A. -// This behaviour makes user interfaces more tolerant of imperfect -// input. -// -// This function treats tokens as nodes, though they are not included -// in the result. e.g. PathEnclosingInterval("+") returns the -// enclosing ast.BinaryExpr("x + y"). -// -// If start==end, the 1-char interval following start is used instead. -// -// The 'exact' result is true if the interval contains only path[0] -// and perhaps some adjacent whitespace. It is false if the interval -// overlaps multiple children of path[0], or if it contains only -// interior whitespace of path[0]. -// In this example: -// -// z := x + y // add them -// <--C--> <---E--> -// ^ -// D -// -// intervals C, D and E are inexact. C is contained by the -// z-assignment statement, because it spans three of its children (:=, -// x, +). So too is the 1-char interval D, because it contains only -// interior whitespace of the assignment. E is considered interior -// whitespace of the BlockStmt containing the assignment. -// -// Precondition: [start, end) both lie within the same file as root. -// TODO(adonovan): return (nil, false) in this case and remove precond. -// Requires FileSet; see loader.tokenFileContainsPos. -// -// Postcondition: path is never nil; it always contains at least 'root'. -// -func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { - // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging - - // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). - var visit func(node ast.Node) bool - visit = func(node ast.Node) bool { - path = append(path, node) - - nodePos := node.Pos() - nodeEnd := node.End() - - // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging - - // Intersect [start, end) with interval of node. - if start < nodePos { - start = nodePos - } - if end > nodeEnd { - end = nodeEnd - } - - // Find sole child that contains [start, end). - children := childrenOf(node) - l := len(children) - for i, child := range children { - // [childPos, childEnd) is unaugmented interval of child. - childPos := child.Pos() - childEnd := child.End() - - // [augPos, augEnd) is whitespace-augmented interval of child. - augPos := childPos - augEnd := childEnd - if i > 0 { - augPos = children[i-1].End() // start of preceding whitespace - } - if i < l-1 { - nextChildPos := children[i+1].Pos() - // Does [start, end) lie between child and next child? - if start >= augEnd && end <= nextChildPos { - return false // inexact match - } - augEnd = nextChildPos // end of following whitespace - } - - // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", - // i, augPos, augEnd, start, end) // debugging - - // Does augmented child strictly contain [start, end)? - if augPos <= start && end <= augEnd { - _, isToken := child.(tokenNode) - return isToken || visit(child) - } - - // Does [start, end) overlap multiple children? - // i.e. left-augmented child contains start - // but LR-augmented child does not contain end. - if start < childEnd && end > augEnd { - break - } - } - - // No single child contained [start, end), - // so node is the result. Is it exact? - - // (It's tempting to put this condition before the - // child loop, but it gives the wrong result in the - // case where a node (e.g. ExprStmt) and its sole - // child have equal intervals.) - if start == nodePos && end == nodeEnd { - return true // exact match - } - - return false // inexact: overlaps multiple children - } - - if start > end { - start, end = end, start - } - - if start < root.End() && end > root.Pos() { - if start == end { - end = start + 1 // empty interval => interval of size 1 - } - exact = visit(root) - - // Reverse the path: - for i, l := 0, len(path); i < l/2; i++ { - path[i], path[l-1-i] = path[l-1-i], path[i] - } - } else { - // Selection lies within whitespace preceding the - // first (or following the last) declaration in the file. - // The result nonetheless always includes the ast.File. - path = append(path, root) - } - - return -} - -// tokenNode is a dummy implementation of ast.Node for a single token. -// They are used transiently by PathEnclosingInterval but never escape -// this package. -// -type tokenNode struct { - pos token.Pos - end token.Pos -} - -func (n tokenNode) Pos() token.Pos { - return n.pos -} - -func (n tokenNode) End() token.Pos { - return n.end -} - -func tok(pos token.Pos, len int) ast.Node { - return tokenNode{pos, pos + token.Pos(len)} -} - -// childrenOf returns the direct non-nil children of ast.Node n. -// It may include fake ast.Node implementations for bare tokens. -// it is not safe to call (e.g.) ast.Walk on such nodes. -// -func childrenOf(n ast.Node) []ast.Node { - var children []ast.Node - - // First add nodes for all true subtrees. - ast.Inspect(n, func(node ast.Node) bool { - if node == n { // push n - return true // recur - } - if node != nil { // push child - children = append(children, node) - } - return false // no recursion - }) - - // Then add fake Nodes for bare tokens. - switch n := n.(type) { - case *ast.ArrayType: - children = append(children, - tok(n.Lbrack, len("[")), - tok(n.Elt.End(), len("]"))) - - case *ast.AssignStmt: - children = append(children, - tok(n.TokPos, len(n.Tok.String()))) - - case *ast.BasicLit: - children = append(children, - tok(n.ValuePos, len(n.Value))) - - case *ast.BinaryExpr: - children = append(children, tok(n.OpPos, len(n.Op.String()))) - - case *ast.BlockStmt: - children = append(children, - tok(n.Lbrace, len("{")), - tok(n.Rbrace, len("}"))) - - case *ast.BranchStmt: - children = append(children, - tok(n.TokPos, len(n.Tok.String()))) - - case *ast.CallExpr: - children = append(children, - tok(n.Lparen, len("(")), - tok(n.Rparen, len(")"))) - if n.Ellipsis != 0 { - children = append(children, tok(n.Ellipsis, len("..."))) - } - - case *ast.CaseClause: - if n.List == nil { - children = append(children, - tok(n.Case, len("default"))) - } else { - children = append(children, - tok(n.Case, len("case"))) - } - children = append(children, tok(n.Colon, len(":"))) - - case *ast.ChanType: - switch n.Dir { - case ast.RECV: - children = append(children, tok(n.Begin, len("<-chan"))) - case ast.SEND: - children = append(children, tok(n.Begin, len("chan<-"))) - case ast.RECV | ast.SEND: - children = append(children, tok(n.Begin, len("chan"))) - } - - case *ast.CommClause: - if n.Comm == nil { - children = append(children, - tok(n.Case, len("default"))) - } else { - children = append(children, - tok(n.Case, len("case"))) - } - children = append(children, tok(n.Colon, len(":"))) - - case *ast.Comment: - // nop - - case *ast.CommentGroup: - // nop - - case *ast.CompositeLit: - children = append(children, - tok(n.Lbrace, len("{")), - tok(n.Rbrace, len("{"))) - - case *ast.DeclStmt: - // nop - - case *ast.DeferStmt: - children = append(children, - tok(n.Defer, len("defer"))) - - case *ast.Ellipsis: - children = append(children, - tok(n.Ellipsis, len("..."))) - - case *ast.EmptyStmt: - // nop - - case *ast.ExprStmt: - // nop - - case *ast.Field: - // TODO(adonovan): Field.{Doc,Comment,Tag}? - - case *ast.FieldList: - children = append(children, - tok(n.Opening, len("(")), - tok(n.Closing, len(")"))) - - case *ast.File: - // TODO test: Doc - children = append(children, - tok(n.Package, len("package"))) - - case *ast.ForStmt: - children = append(children, - tok(n.For, len("for"))) - - case *ast.FuncDecl: - // TODO(adonovan): FuncDecl.Comment? - - // Uniquely, FuncDecl breaks the invariant that - // preorder traversal yields tokens in lexical order: - // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. - // - // As a workaround, we inline the case for FuncType - // here and order things correctly. - // - children = nil // discard ast.Walk(FuncDecl) info subtrees - children = append(children, tok(n.Type.Func, len("func"))) - if n.Recv != nil { - children = append(children, n.Recv) - } - children = append(children, n.Name) - if n.Type.Params != nil { - children = append(children, n.Type.Params) - } - if n.Type.Results != nil { - children = append(children, n.Type.Results) - } - if n.Body != nil { - children = append(children, n.Body) - } - - case *ast.FuncLit: - // nop - - case *ast.FuncType: - if n.Func != 0 { - children = append(children, - tok(n.Func, len("func"))) - } - - case *ast.GenDecl: - children = append(children, - tok(n.TokPos, len(n.Tok.String()))) - if n.Lparen != 0 { - children = append(children, - tok(n.Lparen, len("(")), - tok(n.Rparen, len(")"))) - } - - case *ast.GoStmt: - children = append(children, - tok(n.Go, len("go"))) - - case *ast.Ident: - children = append(children, - tok(n.NamePos, len(n.Name))) - - case *ast.IfStmt: - children = append(children, - tok(n.If, len("if"))) - - case *ast.ImportSpec: - // TODO(adonovan): ImportSpec.{Doc,EndPos}? - - case *ast.IncDecStmt: - children = append(children, - tok(n.TokPos, len(n.Tok.String()))) - - case *ast.IndexExpr: - children = append(children, - tok(n.Lbrack, len("{")), - tok(n.Rbrack, len("}"))) - - case *ast.InterfaceType: - children = append(children, - tok(n.Interface, len("interface"))) - - case *ast.KeyValueExpr: - children = append(children, - tok(n.Colon, len(":"))) - - case *ast.LabeledStmt: - children = append(children, - tok(n.Colon, len(":"))) - - case *ast.MapType: - children = append(children, - tok(n.Map, len("map"))) - - case *ast.ParenExpr: - children = append(children, - tok(n.Lparen, len("(")), - tok(n.Rparen, len(")"))) - - case *ast.RangeStmt: - children = append(children, - tok(n.For, len("for")), - tok(n.TokPos, len(n.Tok.String()))) - - case *ast.ReturnStmt: - children = append(children, - tok(n.Return, len("return"))) - - case *ast.SelectStmt: - children = append(children, - tok(n.Select, len("select"))) - - case *ast.SelectorExpr: - // nop - - case *ast.SendStmt: - children = append(children, - tok(n.Arrow, len("<-"))) - - case *ast.SliceExpr: - children = append(children, - tok(n.Lbrack, len("[")), - tok(n.Rbrack, len("]"))) - - case *ast.StarExpr: - children = append(children, tok(n.Star, len("*"))) - - case *ast.StructType: - children = append(children, tok(n.Struct, len("struct"))) - - case *ast.SwitchStmt: - children = append(children, tok(n.Switch, len("switch"))) - - case *ast.TypeAssertExpr: - children = append(children, - tok(n.Lparen-1, len(".")), - tok(n.Lparen, len("(")), - tok(n.Rparen, len(")"))) - - case *ast.TypeSpec: - // TODO(adonovan): TypeSpec.{Doc,Comment}? - - case *ast.TypeSwitchStmt: - children = append(children, tok(n.Switch, len("switch"))) - - case *ast.UnaryExpr: - children = append(children, tok(n.OpPos, len(n.Op.String()))) - - case *ast.ValueSpec: - // TODO(adonovan): ValueSpec.{Doc,Comment}? - - case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: - // nop - } - - // TODO(adonovan): opt: merge the logic of ast.Inspect() into - // the switch above so we can make interleaved callbacks for - // both Nodes and Tokens in the right order and avoid the need - // to sort. - sort.Sort(byPos(children)) - - return children -} - -type byPos []ast.Node - -func (sl byPos) Len() int { - return len(sl) -} -func (sl byPos) Less(i, j int) bool { - return sl[i].Pos() < sl[j].Pos() -} -func (sl byPos) Swap(i, j int) { - sl[i], sl[j] = sl[j], sl[i] -} - -// NodeDescription returns a description of the concrete type of n suitable -// for a user interface. -// -// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, -// StarExpr) we could be much more specific given the path to the AST -// root. Perhaps we should do that. -// -func NodeDescription(n ast.Node) string { - switch n := n.(type) { - case *ast.ArrayType: - return "array type" - case *ast.AssignStmt: - return "assignment" - case *ast.BadDecl: - return "bad declaration" - case *ast.BadExpr: - return "bad expression" - case *ast.BadStmt: - return "bad statement" - case *ast.BasicLit: - return "basic literal" - case *ast.BinaryExpr: - return fmt.Sprintf("binary %s operation", n.Op) - case *ast.BlockStmt: - return "block" - case *ast.BranchStmt: - switch n.Tok { - case token.BREAK: - return "break statement" - case token.CONTINUE: - return "continue statement" - case token.GOTO: - return "goto statement" - case token.FALLTHROUGH: - return "fall-through statement" - } - case *ast.CallExpr: - if len(n.Args) == 1 && !n.Ellipsis.IsValid() { - return "function call (or conversion)" - } - return "function call" - case *ast.CaseClause: - return "case clause" - case *ast.ChanType: - return "channel type" - case *ast.CommClause: - return "communication clause" - case *ast.Comment: - return "comment" - case *ast.CommentGroup: - return "comment group" - case *ast.CompositeLit: - return "composite literal" - case *ast.DeclStmt: - return NodeDescription(n.Decl) + " statement" - case *ast.DeferStmt: - return "defer statement" - case *ast.Ellipsis: - return "ellipsis" - case *ast.EmptyStmt: - return "empty statement" - case *ast.ExprStmt: - return "expression statement" - case *ast.Field: - // Can be any of these: - // struct {x, y int} -- struct field(s) - // struct {T} -- anon struct field - // interface {I} -- interface embedding - // interface {f()} -- interface method - // func (A) func(B) C -- receiver, param(s), result(s) - return "field/method/parameter" - case *ast.FieldList: - return "field/method/parameter list" - case *ast.File: - return "source file" - case *ast.ForStmt: - return "for loop" - case *ast.FuncDecl: - return "function declaration" - case *ast.FuncLit: - return "function literal" - case *ast.FuncType: - return "function type" - case *ast.GenDecl: - switch n.Tok { - case token.IMPORT: - return "import declaration" - case token.CONST: - return "constant declaration" - case token.TYPE: - return "type declaration" - case token.VAR: - return "variable declaration" - } - case *ast.GoStmt: - return "go statement" - case *ast.Ident: - return "identifier" - case *ast.IfStmt: - return "if statement" - case *ast.ImportSpec: - return "import specification" - case *ast.IncDecStmt: - if n.Tok == token.INC { - return "increment statement" - } - return "decrement statement" - case *ast.IndexExpr: - return "index expression" - case *ast.InterfaceType: - return "interface type" - case *ast.KeyValueExpr: - return "key/value association" - case *ast.LabeledStmt: - return "statement label" - case *ast.MapType: - return "map type" - case *ast.Package: - return "package" - case *ast.ParenExpr: - return "parenthesized " + NodeDescription(n.X) - case *ast.RangeStmt: - return "range loop" - case *ast.ReturnStmt: - return "return statement" - case *ast.SelectStmt: - return "select statement" - case *ast.SelectorExpr: - return "selector" - case *ast.SendStmt: - return "channel send" - case *ast.SliceExpr: - return "slice expression" - case *ast.StarExpr: - return "*-operation" // load/store expr or pointer type - case *ast.StructType: - return "struct type" - case *ast.SwitchStmt: - return "switch statement" - case *ast.TypeAssertExpr: - return "type assertion" - case *ast.TypeSpec: - return "type specification" - case *ast.TypeSwitchStmt: - return "type switch" - case *ast.UnaryExpr: - return fmt.Sprintf("unary %s operation", n.Op) - case *ast.ValueSpec: - return "value specification" - - } - panic(fmt.Sprintf("unexpected node type: %T", n)) -} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go deleted file mode 100644 index 83f196c..0000000 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ /dev/null @@ -1,470 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package astutil contains common utilities for working with the Go AST. -package astutil // import "golang.org/x/tools/go/ast/astutil" - -import ( - "fmt" - "go/ast" - "go/token" - "strconv" - "strings" -) - -// AddImport adds the import path to the file f, if absent. -func AddImport(fset *token.FileSet, f *ast.File, ipath string) (added bool) { - return AddNamedImport(fset, f, "", ipath) -} - -// AddNamedImport adds the import path to the file f, if absent. -// If name is not empty, it is used to rename the import. -// -// For example, calling -// AddNamedImport(fset, f, "pathpkg", "path") -// adds -// import pathpkg "path" -func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added bool) { - if imports(f, ipath) { - return false - } - - newImport := &ast.ImportSpec{ - Path: &ast.BasicLit{ - Kind: token.STRING, - Value: strconv.Quote(ipath), - }, - } - if name != "" { - newImport.Name = &ast.Ident{Name: name} - } - - // Find an import decl to add to. - // The goal is to find an existing import - // whose import path has the longest shared - // prefix with ipath. - var ( - bestMatch = -1 // length of longest shared prefix - lastImport = -1 // index in f.Decls of the file's final import decl - impDecl *ast.GenDecl // import decl containing the best match - impIndex = -1 // spec index in impDecl containing the best match - - isThirdPartyPath = isThirdParty(ipath) - ) - for i, decl := range f.Decls { - gen, ok := decl.(*ast.GenDecl) - if ok && gen.Tok == token.IMPORT { - lastImport = i - // Do not add to import "C", to avoid disrupting the - // association with its doc comment, breaking cgo. - if declImports(gen, "C") { - continue - } - - // Match an empty import decl if that's all that is available. - if len(gen.Specs) == 0 && bestMatch == -1 { - impDecl = gen - } - - // Compute longest shared prefix with imports in this group and find best - // matched import spec. - // 1. Always prefer import spec with longest shared prefix. - // 2. While match length is 0, - // - for stdlib package: prefer first import spec. - // - for third party package: prefer first third party import spec. - // We cannot use last import spec as best match for third party package - // because grouped imports are usually placed last by goimports -local - // flag. - // See issue #19190. - seenAnyThirdParty := false - for j, spec := range gen.Specs { - impspec := spec.(*ast.ImportSpec) - p := importPath(impspec) - n := matchLen(p, ipath) - if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) { - bestMatch = n - impDecl = gen - impIndex = j - } - seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p) - } - } - } - - // If no import decl found, add one after the last import. - if impDecl == nil { - impDecl = &ast.GenDecl{ - Tok: token.IMPORT, - } - if lastImport >= 0 { - impDecl.TokPos = f.Decls[lastImport].End() - } else { - // There are no existing imports. - // Our new import goes after the package declaration and after - // the comment, if any, that starts on the same line as the - // package declaration. - impDecl.TokPos = f.Package - - file := fset.File(f.Package) - pkgLine := file.Line(f.Package) - for _, c := range f.Comments { - if file.Line(c.Pos()) > pkgLine { - break - } - impDecl.TokPos = c.End() - } - } - f.Decls = append(f.Decls, nil) - copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) - f.Decls[lastImport+1] = impDecl - } - - // Insert new import at insertAt. - insertAt := 0 - if impIndex >= 0 { - // insert after the found import - insertAt = impIndex + 1 - } - impDecl.Specs = append(impDecl.Specs, nil) - copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) - impDecl.Specs[insertAt] = newImport - pos := impDecl.Pos() - if insertAt > 0 { - // If there is a comment after an existing import, preserve the comment - // position by adding the new import after the comment. - if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { - pos = spec.Comment.End() - } else { - // Assign same position as the previous import, - // so that the sorter sees it as being in the same block. - pos = impDecl.Specs[insertAt-1].Pos() - } - } - if newImport.Name != nil { - newImport.Name.NamePos = pos - } - newImport.Path.ValuePos = pos - newImport.EndPos = pos - - // Clean up parens. impDecl contains at least one spec. - if len(impDecl.Specs) == 1 { - // Remove unneeded parens. - impDecl.Lparen = token.NoPos - } else if !impDecl.Lparen.IsValid() { - // impDecl needs parens added. - impDecl.Lparen = impDecl.Specs[0].Pos() - } - - f.Imports = append(f.Imports, newImport) - - if len(f.Decls) <= 1 { - return true - } - - // Merge all the import declarations into the first one. - var first *ast.GenDecl - for i := 0; i < len(f.Decls); i++ { - decl := f.Decls[i] - gen, ok := decl.(*ast.GenDecl) - if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { - continue - } - if first == nil { - first = gen - continue // Don't touch the first one. - } - // We now know there is more than one package in this import - // declaration. Ensure that it ends up parenthesized. - first.Lparen = first.Pos() - // Move the imports of the other import declaration to the first one. - for _, spec := range gen.Specs { - spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() - first.Specs = append(first.Specs, spec) - } - f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) - i-- - } - - return true -} - -func isThirdParty(importPath string) bool { - // Third party package import path usually contains "." (".com", ".org", ...) - // This logic is taken from golang.org/x/tools/imports package. - return strings.Contains(importPath, ".") -} - -// DeleteImport deletes the import path from the file f, if present. -func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { - return DeleteNamedImport(fset, f, "", path) -} - -// DeleteNamedImport deletes the import with the given name and path from the file f, if present. -func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { - var delspecs []*ast.ImportSpec - var delcomments []*ast.CommentGroup - - // Find the import nodes that import path, if any. - for i := 0; i < len(f.Decls); i++ { - decl := f.Decls[i] - gen, ok := decl.(*ast.GenDecl) - if !ok || gen.Tok != token.IMPORT { - continue - } - for j := 0; j < len(gen.Specs); j++ { - spec := gen.Specs[j] - impspec := spec.(*ast.ImportSpec) - if impspec.Name == nil && name != "" { - continue - } - if impspec.Name != nil && impspec.Name.Name != name { - continue - } - if importPath(impspec) != path { - continue - } - - // We found an import spec that imports path. - // Delete it. - delspecs = append(delspecs, impspec) - deleted = true - copy(gen.Specs[j:], gen.Specs[j+1:]) - gen.Specs = gen.Specs[:len(gen.Specs)-1] - - // If this was the last import spec in this decl, - // delete the decl, too. - if len(gen.Specs) == 0 { - copy(f.Decls[i:], f.Decls[i+1:]) - f.Decls = f.Decls[:len(f.Decls)-1] - i-- - break - } else if len(gen.Specs) == 1 { - if impspec.Doc != nil { - delcomments = append(delcomments, impspec.Doc) - } - if impspec.Comment != nil { - delcomments = append(delcomments, impspec.Comment) - } - for _, cg := range f.Comments { - // Found comment on the same line as the import spec. - if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { - delcomments = append(delcomments, cg) - break - } - } - - spec := gen.Specs[0].(*ast.ImportSpec) - - // Move the documentation right after the import decl. - if spec.Doc != nil { - for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { - fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) - } - } - for _, cg := range f.Comments { - if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { - for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { - fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) - } - break - } - } - } - if j > 0 { - lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) - lastLine := fset.Position(lastImpspec.Path.ValuePos).Line - line := fset.Position(impspec.Path.ValuePos).Line - - // We deleted an entry but now there may be - // a blank line-sized hole where the import was. - if line-lastLine > 1 { - // There was a blank line immediately preceding the deleted import, - // so there's no need to close the hole. - // Do nothing. - } else if line != fset.File(gen.Rparen).LineCount() { - // There was no blank line. Close the hole. - fset.File(gen.Rparen).MergeLine(line) - } - } - j-- - } - } - - // Delete imports from f.Imports. - for i := 0; i < len(f.Imports); i++ { - imp := f.Imports[i] - for j, del := range delspecs { - if imp == del { - copy(f.Imports[i:], f.Imports[i+1:]) - f.Imports = f.Imports[:len(f.Imports)-1] - copy(delspecs[j:], delspecs[j+1:]) - delspecs = delspecs[:len(delspecs)-1] - i-- - break - } - } - } - - // Delete comments from f.Comments. - for i := 0; i < len(f.Comments); i++ { - cg := f.Comments[i] - for j, del := range delcomments { - if cg == del { - copy(f.Comments[i:], f.Comments[i+1:]) - f.Comments = f.Comments[:len(f.Comments)-1] - copy(delcomments[j:], delcomments[j+1:]) - delcomments = delcomments[:len(delcomments)-1] - i-- - break - } - } - } - - if len(delspecs) > 0 { - panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) - } - - return -} - -// RewriteImport rewrites any import of path oldPath to path newPath. -func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { - for _, imp := range f.Imports { - if importPath(imp) == oldPath { - rewrote = true - // record old End, because the default is to compute - // it using the length of imp.Path.Value. - imp.EndPos = imp.End() - imp.Path.Value = strconv.Quote(newPath) - } - } - return -} - -// UsesImport reports whether a given import is used. -func UsesImport(f *ast.File, path string) (used bool) { - spec := importSpec(f, path) - if spec == nil { - return - } - - name := spec.Name.String() - switch name { - case "": - // If the package name is not explicitly specified, - // make an educated guess. This is not guaranteed to be correct. - lastSlash := strings.LastIndex(path, "/") - if lastSlash == -1 { - name = path - } else { - name = path[lastSlash+1:] - } - case "_", ".": - // Not sure if this import is used - err on the side of caution. - return true - } - - ast.Walk(visitFn(func(n ast.Node) { - sel, ok := n.(*ast.SelectorExpr) - if ok && isTopName(sel.X, name) { - used = true - } - }), f) - - return -} - -type visitFn func(node ast.Node) - -func (fn visitFn) Visit(node ast.Node) ast.Visitor { - fn(node) - return fn -} - -// imports returns true if f imports path. -func imports(f *ast.File, path string) bool { - return importSpec(f, path) != nil -} - -// importSpec returns the import spec if f imports path, -// or nil otherwise. -func importSpec(f *ast.File, path string) *ast.ImportSpec { - for _, s := range f.Imports { - if importPath(s) == path { - return s - } - } - return nil -} - -// importPath returns the unquoted import path of s, -// or "" if the path is not properly quoted. -func importPath(s *ast.ImportSpec) string { - t, err := strconv.Unquote(s.Path.Value) - if err == nil { - return t - } - return "" -} - -// declImports reports whether gen contains an import of path. -func declImports(gen *ast.GenDecl, path string) bool { - if gen.Tok != token.IMPORT { - return false - } - for _, spec := range gen.Specs { - impspec := spec.(*ast.ImportSpec) - if importPath(impspec) == path { - return true - } - } - return false -} - -// matchLen returns the length of the longest path segment prefix shared by x and y. -func matchLen(x, y string) int { - n := 0 - for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { - if x[i] == '/' { - n++ - } - } - return n -} - -// isTopName returns true if n is a top-level unresolved identifier with the given name. -func isTopName(n ast.Expr, name string) bool { - id, ok := n.(*ast.Ident) - return ok && id.Name == name && id.Obj == nil -} - -// Imports returns the file imports grouped by paragraph. -func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { - var groups [][]*ast.ImportSpec - - for _, decl := range f.Decls { - genDecl, ok := decl.(*ast.GenDecl) - if !ok || genDecl.Tok != token.IMPORT { - break - } - - group := []*ast.ImportSpec{} - - var lastLine int - for _, spec := range genDecl.Specs { - importSpec := spec.(*ast.ImportSpec) - pos := importSpec.Path.ValuePos - line := fset.Position(pos).Line - if lastLine > 0 && pos > 0 && line-lastLine > 1 { - groups = append(groups, group) - group = []*ast.ImportSpec{} - } - group = append(group, importSpec) - lastLine = line - } - groups = append(groups, group) - } - - return groups -} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go deleted file mode 100644 index cf72ea9..0000000 --- a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ /dev/null @@ -1,477 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package astutil - -import ( - "fmt" - "go/ast" - "reflect" - "sort" -) - -// An ApplyFunc is invoked by Apply for each node n, even if n is nil, -// before and/or after the node's children, using a Cursor describing -// the current node and providing operations on it. -// -// The return value of ApplyFunc controls the syntax tree traversal. -// See Apply for details. -type ApplyFunc func(*Cursor) bool - -// Apply traverses a syntax tree recursively, starting with root, -// and calling pre and post for each node as described below. -// Apply returns the syntax tree, possibly modified. -// -// If pre is not nil, it is called for each node before the node's -// children are traversed (pre-order). If pre returns false, no -// children are traversed, and post is not called for that node. -// -// If post is not nil, and a prior call of pre didn't return false, -// post is called for each node after its children are traversed -// (post-order). If post returns false, traversal is terminated and -// Apply returns immediately. -// -// Only fields that refer to AST nodes are considered children; -// i.e., token.Pos, Scopes, Objects, and fields of basic types -// (strings, etc.) are ignored. -// -// Children are traversed in the order in which they appear in the -// respective node's struct definition. A package's files are -// traversed in the filenames' alphabetical order. -// -func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { - parent := &struct{ ast.Node }{root} - defer func() { - if r := recover(); r != nil && r != abort { - panic(r) - } - result = parent.Node - }() - a := &application{pre: pre, post: post} - a.apply(parent, "Node", nil, root) - return -} - -var abort = new(int) // singleton, to signal termination of Apply - -// A Cursor describes a node encountered during Apply. -// Information about the node and its parent is available -// from the Node, Parent, Name, and Index methods. -// -// If p is a variable of type and value of the current parent node -// c.Parent(), and f is the field identifier with name c.Name(), -// the following invariants hold: -// -// p.f == c.Node() if c.Index() < 0 -// p.f[c.Index()] == c.Node() if c.Index() >= 0 -// -// The methods Replace, Delete, InsertBefore, and InsertAfter -// can be used to change the AST without disrupting Apply. -type Cursor struct { - parent ast.Node - name string - iter *iterator // valid if non-nil - node ast.Node -} - -// Node returns the current Node. -func (c *Cursor) Node() ast.Node { return c.node } - -// Parent returns the parent of the current Node. -func (c *Cursor) Parent() ast.Node { return c.parent } - -// Name returns the name of the parent Node field that contains the current Node. -// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns -// the filename for the current Node. -func (c *Cursor) Name() string { return c.name } - -// Index reports the index >= 0 of the current Node in the slice of Nodes that -// contains it, or a value < 0 if the current Node is not part of a slice. -// The index of the current node changes if InsertBefore is called while -// processing the current node. -func (c *Cursor) Index() int { - if c.iter != nil { - return c.iter.index - } - return -1 -} - -// field returns the current node's parent field value. -func (c *Cursor) field() reflect.Value { - return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name) -} - -// Replace replaces the current Node with n. -// The replacement node is not walked by Apply. -func (c *Cursor) Replace(n ast.Node) { - if _, ok := c.node.(*ast.File); ok { - file, ok := n.(*ast.File) - if !ok { - panic("attempt to replace *ast.File with non-*ast.File") - } - c.parent.(*ast.Package).Files[c.name] = file - return - } - - v := c.field() - if i := c.Index(); i >= 0 { - v = v.Index(i) - } - v.Set(reflect.ValueOf(n)) -} - -// Delete deletes the current Node from its containing slice. -// If the current Node is not part of a slice, Delete panics. -// As a special case, if the current node is a package file, -// Delete removes it from the package's Files map. -func (c *Cursor) Delete() { - if _, ok := c.node.(*ast.File); ok { - delete(c.parent.(*ast.Package).Files, c.name) - return - } - - i := c.Index() - if i < 0 { - panic("Delete node not contained in slice") - } - v := c.field() - l := v.Len() - reflect.Copy(v.Slice(i, l), v.Slice(i+1, l)) - v.Index(l - 1).Set(reflect.Zero(v.Type().Elem())) - v.SetLen(l - 1) - c.iter.step-- -} - -// InsertAfter inserts n after the current Node in its containing slice. -// If the current Node is not part of a slice, InsertAfter panics. -// Apply does not walk n. -func (c *Cursor) InsertAfter(n ast.Node) { - i := c.Index() - if i < 0 { - panic("InsertAfter node not contained in slice") - } - v := c.field() - v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) - l := v.Len() - reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l)) - v.Index(i + 1).Set(reflect.ValueOf(n)) - c.iter.step++ -} - -// InsertBefore inserts n before the current Node in its containing slice. -// If the current Node is not part of a slice, InsertBefore panics. -// Apply will not walk n. -func (c *Cursor) InsertBefore(n ast.Node) { - i := c.Index() - if i < 0 { - panic("InsertBefore node not contained in slice") - } - v := c.field() - v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) - l := v.Len() - reflect.Copy(v.Slice(i+1, l), v.Slice(i, l)) - v.Index(i).Set(reflect.ValueOf(n)) - c.iter.index++ -} - -// application carries all the shared data so we can pass it around cheaply. -type application struct { - pre, post ApplyFunc - cursor Cursor - iter iterator -} - -func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { - // convert typed nil into untyped nil - if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { - n = nil - } - - // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead - saved := a.cursor - a.cursor.parent = parent - a.cursor.name = name - a.cursor.iter = iter - a.cursor.node = n - - if a.pre != nil && !a.pre(&a.cursor) { - a.cursor = saved - return - } - - // walk children - // (the order of the cases matches the order of the corresponding node types in go/ast) - switch n := n.(type) { - case nil: - // nothing to do - - // Comments and fields - case *ast.Comment: - // nothing to do - - case *ast.CommentGroup: - if n != nil { - a.applyList(n, "List") - } - - case *ast.Field: - a.apply(n, "Doc", nil, n.Doc) - a.applyList(n, "Names") - a.apply(n, "Type", nil, n.Type) - a.apply(n, "Tag", nil, n.Tag) - a.apply(n, "Comment", nil, n.Comment) - - case *ast.FieldList: - a.applyList(n, "List") - - // Expressions - case *ast.BadExpr, *ast.Ident, *ast.BasicLit: - // nothing to do - - case *ast.Ellipsis: - a.apply(n, "Elt", nil, n.Elt) - - case *ast.FuncLit: - a.apply(n, "Type", nil, n.Type) - a.apply(n, "Body", nil, n.Body) - - case *ast.CompositeLit: - a.apply(n, "Type", nil, n.Type) - a.applyList(n, "Elts") - - case *ast.ParenExpr: - a.apply(n, "X", nil, n.X) - - case *ast.SelectorExpr: - a.apply(n, "X", nil, n.X) - a.apply(n, "Sel", nil, n.Sel) - - case *ast.IndexExpr: - a.apply(n, "X", nil, n.X) - a.apply(n, "Index", nil, n.Index) - - case *ast.SliceExpr: - a.apply(n, "X", nil, n.X) - a.apply(n, "Low", nil, n.Low) - a.apply(n, "High", nil, n.High) - a.apply(n, "Max", nil, n.Max) - - case *ast.TypeAssertExpr: - a.apply(n, "X", nil, n.X) - a.apply(n, "Type", nil, n.Type) - - case *ast.CallExpr: - a.apply(n, "Fun", nil, n.Fun) - a.applyList(n, "Args") - - case *ast.StarExpr: - a.apply(n, "X", nil, n.X) - - case *ast.UnaryExpr: - a.apply(n, "X", nil, n.X) - - case *ast.BinaryExpr: - a.apply(n, "X", nil, n.X) - a.apply(n, "Y", nil, n.Y) - - case *ast.KeyValueExpr: - a.apply(n, "Key", nil, n.Key) - a.apply(n, "Value", nil, n.Value) - - // Types - case *ast.ArrayType: - a.apply(n, "Len", nil, n.Len) - a.apply(n, "Elt", nil, n.Elt) - - case *ast.StructType: - a.apply(n, "Fields", nil, n.Fields) - - case *ast.FuncType: - a.apply(n, "Params", nil, n.Params) - a.apply(n, "Results", nil, n.Results) - - case *ast.InterfaceType: - a.apply(n, "Methods", nil, n.Methods) - - case *ast.MapType: - a.apply(n, "Key", nil, n.Key) - a.apply(n, "Value", nil, n.Value) - - case *ast.ChanType: - a.apply(n, "Value", nil, n.Value) - - // Statements - case *ast.BadStmt: - // nothing to do - - case *ast.DeclStmt: - a.apply(n, "Decl", nil, n.Decl) - - case *ast.EmptyStmt: - // nothing to do - - case *ast.LabeledStmt: - a.apply(n, "Label", nil, n.Label) - a.apply(n, "Stmt", nil, n.Stmt) - - case *ast.ExprStmt: - a.apply(n, "X", nil, n.X) - - case *ast.SendStmt: - a.apply(n, "Chan", nil, n.Chan) - a.apply(n, "Value", nil, n.Value) - - case *ast.IncDecStmt: - a.apply(n, "X", nil, n.X) - - case *ast.AssignStmt: - a.applyList(n, "Lhs") - a.applyList(n, "Rhs") - - case *ast.GoStmt: - a.apply(n, "Call", nil, n.Call) - - case *ast.DeferStmt: - a.apply(n, "Call", nil, n.Call) - - case *ast.ReturnStmt: - a.applyList(n, "Results") - - case *ast.BranchStmt: - a.apply(n, "Label", nil, n.Label) - - case *ast.BlockStmt: - a.applyList(n, "List") - - case *ast.IfStmt: - a.apply(n, "Init", nil, n.Init) - a.apply(n, "Cond", nil, n.Cond) - a.apply(n, "Body", nil, n.Body) - a.apply(n, "Else", nil, n.Else) - - case *ast.CaseClause: - a.applyList(n, "List") - a.applyList(n, "Body") - - case *ast.SwitchStmt: - a.apply(n, "Init", nil, n.Init) - a.apply(n, "Tag", nil, n.Tag) - a.apply(n, "Body", nil, n.Body) - - case *ast.TypeSwitchStmt: - a.apply(n, "Init", nil, n.Init) - a.apply(n, "Assign", nil, n.Assign) - a.apply(n, "Body", nil, n.Body) - - case *ast.CommClause: - a.apply(n, "Comm", nil, n.Comm) - a.applyList(n, "Body") - - case *ast.SelectStmt: - a.apply(n, "Body", nil, n.Body) - - case *ast.ForStmt: - a.apply(n, "Init", nil, n.Init) - a.apply(n, "Cond", nil, n.Cond) - a.apply(n, "Post", nil, n.Post) - a.apply(n, "Body", nil, n.Body) - - case *ast.RangeStmt: - a.apply(n, "Key", nil, n.Key) - a.apply(n, "Value", nil, n.Value) - a.apply(n, "X", nil, n.X) - a.apply(n, "Body", nil, n.Body) - - // Declarations - case *ast.ImportSpec: - a.apply(n, "Doc", nil, n.Doc) - a.apply(n, "Name", nil, n.Name) - a.apply(n, "Path", nil, n.Path) - a.apply(n, "Comment", nil, n.Comment) - - case *ast.ValueSpec: - a.apply(n, "Doc", nil, n.Doc) - a.applyList(n, "Names") - a.apply(n, "Type", nil, n.Type) - a.applyList(n, "Values") - a.apply(n, "Comment", nil, n.Comment) - - case *ast.TypeSpec: - a.apply(n, "Doc", nil, n.Doc) - a.apply(n, "Name", nil, n.Name) - a.apply(n, "Type", nil, n.Type) - a.apply(n, "Comment", nil, n.Comment) - - case *ast.BadDecl: - // nothing to do - - case *ast.GenDecl: - a.apply(n, "Doc", nil, n.Doc) - a.applyList(n, "Specs") - - case *ast.FuncDecl: - a.apply(n, "Doc", nil, n.Doc) - a.apply(n, "Recv", nil, n.Recv) - a.apply(n, "Name", nil, n.Name) - a.apply(n, "Type", nil, n.Type) - a.apply(n, "Body", nil, n.Body) - - // Files and packages - case *ast.File: - a.apply(n, "Doc", nil, n.Doc) - a.apply(n, "Name", nil, n.Name) - a.applyList(n, "Decls") - // Don't walk n.Comments; they have either been walked already if - // they are Doc comments, or they can be easily walked explicitly. - - case *ast.Package: - // collect and sort names for reproducible behavior - var names []string - for name := range n.Files { - names = append(names, name) - } - sort.Strings(names) - for _, name := range names { - a.apply(n, name, nil, n.Files[name]) - } - - default: - panic(fmt.Sprintf("Apply: unexpected node type %T", n)) - } - - if a.post != nil && !a.post(&a.cursor) { - panic(abort) - } - - a.cursor = saved -} - -// An iterator controls iteration over a slice of nodes. -type iterator struct { - index, step int -} - -func (a *application) applyList(parent ast.Node, name string) { - // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead - saved := a.iter - a.iter.index = 0 - for { - // must reload parent.name each time, since cursor modifications might change it - v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name) - if a.iter.index >= v.Len() { - break - } - - // element x may be nil in a bad AST - be cautious - var x ast.Node - if e := v.Index(a.iter.index); e.IsValid() { - x = e.Interface().(ast.Node) - } - - a.iter.step = 1 - a.apply(parent, name, &a.iter, x) - a.iter.index += a.iter.step - } - a.iter = saved -} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go deleted file mode 100644 index 7630629..0000000 --- a/vendor/golang.org/x/tools/go/ast/astutil/util.go +++ /dev/null @@ -1,14 +0,0 @@ -package astutil - -import "go/ast" - -// Unparen returns e with any enclosing parentheses stripped. -func Unparen(e ast.Expr) ast.Expr { - for { - p, ok := e.(*ast.ParenExpr) - if !ok { - return e - } - e = p.X - } -} diff --git a/vendor/golang.org/x/tools/go/buildutil/allpackages.go b/vendor/golang.org/x/tools/go/buildutil/allpackages.go deleted file mode 100644 index c0cb03e..0000000 --- a/vendor/golang.org/x/tools/go/buildutil/allpackages.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package buildutil provides utilities related to the go/build -// package in the standard library. -// -// All I/O is done via the build.Context file system interface, which must -// be concurrency-safe. -package buildutil // import "golang.org/x/tools/go/buildutil" - -import ( - "go/build" - "os" - "path/filepath" - "sort" - "strings" - "sync" -) - -// AllPackages returns the package path of each Go package in any source -// directory of the specified build context (e.g. $GOROOT or an element -// of $GOPATH). Errors are ignored. The results are sorted. -// All package paths are canonical, and thus may contain "/vendor/". -// -// The result may include import paths for directories that contain no -// *.go files, such as "archive" (in $GOROOT/src). -// -// All I/O is done via the build.Context file system interface, -// which must be concurrency-safe. -// -func AllPackages(ctxt *build.Context) []string { - var list []string - ForEachPackage(ctxt, func(pkg string, _ error) { - list = append(list, pkg) - }) - sort.Strings(list) - return list -} - -// ForEachPackage calls the found function with the package path of -// each Go package it finds in any source directory of the specified -// build context (e.g. $GOROOT or an element of $GOPATH). -// All package paths are canonical, and thus may contain "/vendor/". -// -// If the package directory exists but could not be read, the second -// argument to the found function provides the error. -// -// All I/O is done via the build.Context file system interface, -// which must be concurrency-safe. -// -func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) { - ch := make(chan item) - - var wg sync.WaitGroup - for _, root := range ctxt.SrcDirs() { - root := root - wg.Add(1) - go func() { - allPackages(ctxt, root, ch) - wg.Done() - }() - } - go func() { - wg.Wait() - close(ch) - }() - - // All calls to found occur in the caller's goroutine. - for i := range ch { - found(i.importPath, i.err) - } -} - -type item struct { - importPath string - err error // (optional) -} - -// We use a process-wide counting semaphore to limit -// the number of parallel calls to ReadDir. -var ioLimit = make(chan bool, 20) - -func allPackages(ctxt *build.Context, root string, ch chan<- item) { - root = filepath.Clean(root) + string(os.PathSeparator) - - var wg sync.WaitGroup - - var walkDir func(dir string) - walkDir = func(dir string) { - // Avoid .foo, _foo, and testdata directory trees. - base := filepath.Base(dir) - if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" { - return - } - - pkg := filepath.ToSlash(strings.TrimPrefix(dir, root)) - - // Prune search if we encounter any of these import paths. - switch pkg { - case "builtin": - return - } - - ioLimit <- true - files, err := ReadDir(ctxt, dir) - <-ioLimit - if pkg != "" || err != nil { - ch <- item{pkg, err} - } - for _, fi := range files { - fi := fi - if fi.IsDir() { - wg.Add(1) - go func() { - walkDir(filepath.Join(dir, fi.Name())) - wg.Done() - }() - } - } - } - - walkDir(root) - wg.Wait() -} - -// ExpandPatterns returns the set of packages matched by patterns, -// which may have the following forms: -// -// golang.org/x/tools/cmd/guru # a single package -// golang.org/x/tools/... # all packages beneath dir -// ... # the entire workspace. -// -// Order is significant: a pattern preceded by '-' removes matching -// packages from the set. For example, these patterns match all encoding -// packages except encoding/xml: -// -// encoding/... -encoding/xml -// -// A trailing slash in a pattern is ignored. (Path components of Go -// package names are separated by slash, not the platform's path separator.) -// -func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool { - // TODO(adonovan): support other features of 'go list': - // - "std"/"cmd"/"all" meta-packages - // - "..." not at the end of a pattern - // - relative patterns using "./" or "../" prefix - - pkgs := make(map[string]bool) - doPkg := func(pkg string, neg bool) { - if neg { - delete(pkgs, pkg) - } else { - pkgs[pkg] = true - } - } - - // Scan entire workspace if wildcards are present. - // TODO(adonovan): opt: scan only the necessary subtrees of the workspace. - var all []string - for _, arg := range patterns { - if strings.HasSuffix(arg, "...") { - all = AllPackages(ctxt) - break - } - } - - for _, arg := range patterns { - if arg == "" { - continue - } - - neg := arg[0] == '-' - if neg { - arg = arg[1:] - } - - if arg == "..." { - // ... matches all packages - for _, pkg := range all { - doPkg(pkg, neg) - } - } else if dir := strings.TrimSuffix(arg, "/..."); dir != arg { - // dir/... matches all packages beneath dir - for _, pkg := range all { - if strings.HasPrefix(pkg, dir) && - (len(pkg) == len(dir) || pkg[len(dir)] == '/') { - doPkg(pkg, neg) - } - } - } else { - // single package - doPkg(strings.TrimSuffix(arg, "/"), neg) - } - } - - return pkgs -} diff --git a/vendor/golang.org/x/tools/go/buildutil/fakecontext.go b/vendor/golang.org/x/tools/go/buildutil/fakecontext.go deleted file mode 100644 index 24cbcbe..0000000 --- a/vendor/golang.org/x/tools/go/buildutil/fakecontext.go +++ /dev/null @@ -1,108 +0,0 @@ -package buildutil - -import ( - "fmt" - "go/build" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "sort" - "strings" - "time" -) - -// FakeContext returns a build.Context for the fake file tree specified -// by pkgs, which maps package import paths to a mapping from file base -// names to contents. -// -// The fake Context has a GOROOT of "/go" and no GOPATH, and overrides -// the necessary file access methods to read from memory instead of the -// real file system. -// -// Unlike a real file tree, the fake one has only two levels---packages -// and files---so ReadDir("/go/src/") returns all packages under -// /go/src/ including, for instance, "math" and "math/big". -// ReadDir("/go/src/math/big") would return all the files in the -// "math/big" package. -// -func FakeContext(pkgs map[string]map[string]string) *build.Context { - clean := func(filename string) string { - f := path.Clean(filepath.ToSlash(filename)) - // Removing "/go/src" while respecting segment - // boundaries has this unfortunate corner case: - if f == "/go/src" { - return "" - } - return strings.TrimPrefix(f, "/go/src/") - } - - ctxt := build.Default // copy - ctxt.GOROOT = "/go" - ctxt.GOPATH = "" - ctxt.IsDir = func(dir string) bool { - dir = clean(dir) - if dir == "" { - return true // needed by (*build.Context).SrcDirs - } - return pkgs[dir] != nil - } - ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) { - dir = clean(dir) - var fis []os.FileInfo - if dir == "" { - // enumerate packages - for importPath := range pkgs { - fis = append(fis, fakeDirInfo(importPath)) - } - } else { - // enumerate files of package - for basename := range pkgs[dir] { - fis = append(fis, fakeFileInfo(basename)) - } - } - sort.Sort(byName(fis)) - return fis, nil - } - ctxt.OpenFile = func(filename string) (io.ReadCloser, error) { - filename = clean(filename) - dir, base := path.Split(filename) - content, ok := pkgs[path.Clean(dir)][base] - if !ok { - return nil, fmt.Errorf("file not found: %s", filename) - } - return ioutil.NopCloser(strings.NewReader(content)), nil - } - ctxt.IsAbsPath = func(path string) bool { - path = filepath.ToSlash(path) - // Don't rely on the default (filepath.Path) since on - // Windows, it reports virtual paths as non-absolute. - return strings.HasPrefix(path, "/") - } - return &ctxt -} - -type byName []os.FileInfo - -func (s byName) Len() int { return len(s) } -func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } - -type fakeFileInfo string - -func (fi fakeFileInfo) Name() string { return string(fi) } -func (fakeFileInfo) Sys() interface{} { return nil } -func (fakeFileInfo) ModTime() time.Time { return time.Time{} } -func (fakeFileInfo) IsDir() bool { return false } -func (fakeFileInfo) Size() int64 { return 0 } -func (fakeFileInfo) Mode() os.FileMode { return 0644 } - -type fakeDirInfo string - -func (fd fakeDirInfo) Name() string { return string(fd) } -func (fakeDirInfo) Sys() interface{} { return nil } -func (fakeDirInfo) ModTime() time.Time { return time.Time{} } -func (fakeDirInfo) IsDir() bool { return true } -func (fakeDirInfo) Size() int64 { return 0 } -func (fakeDirInfo) Mode() os.FileMode { return 0755 } diff --git a/vendor/golang.org/x/tools/go/buildutil/overlay.go b/vendor/golang.org/x/tools/go/buildutil/overlay.go deleted file mode 100644 index 3f71c4f..0000000 --- a/vendor/golang.org/x/tools/go/buildutil/overlay.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package buildutil - -import ( - "bufio" - "bytes" - "fmt" - "go/build" - "io" - "io/ioutil" - "path/filepath" - "strconv" - "strings" -) - -// OverlayContext overlays a build.Context with additional files from -// a map. Files in the map take precedence over other files. -// -// In addition to plain string comparison, two file names are -// considered equal if their base names match and their directory -// components point at the same directory on the file system. That is, -// symbolic links are followed for directories, but not files. -// -// A common use case for OverlayContext is to allow editors to pass in -// a set of unsaved, modified files. -// -// Currently, only the Context.OpenFile function will respect the -// overlay. This may change in the future. -func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Context { - // TODO(dominikh): Implement IsDir, HasSubdir and ReadDir - - rc := func(data []byte) (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewBuffer(data)), nil - } - - copy := *orig // make a copy - ctxt := © - ctxt.OpenFile = func(path string) (io.ReadCloser, error) { - // Fast path: names match exactly. - if content, ok := overlay[path]; ok { - return rc(content) - } - - // Slow path: check for same file under a different - // alias, perhaps due to a symbolic link. - for filename, content := range overlay { - if sameFile(path, filename) { - return rc(content) - } - } - - return OpenFile(orig, path) - } - return ctxt -} - -// ParseOverlayArchive parses an archive containing Go files and their -// contents. The result is intended to be used with OverlayContext. -// -// -// Archive format -// -// The archive consists of a series of files. Each file consists of a -// name, a decimal file size and the file contents, separated by -// newlinews. No newline follows after the file contents. -func ParseOverlayArchive(archive io.Reader) (map[string][]byte, error) { - overlay := make(map[string][]byte) - r := bufio.NewReader(archive) - for { - // Read file name. - filename, err := r.ReadString('\n') - if err != nil { - if err == io.EOF { - break // OK - } - return nil, fmt.Errorf("reading archive file name: %v", err) - } - filename = filepath.Clean(strings.TrimSpace(filename)) - - // Read file size. - sz, err := r.ReadString('\n') - if err != nil { - return nil, fmt.Errorf("reading size of archive file %s: %v", filename, err) - } - sz = strings.TrimSpace(sz) - size, err := strconv.ParseUint(sz, 10, 32) - if err != nil { - return nil, fmt.Errorf("parsing size of archive file %s: %v", filename, err) - } - - // Read file content. - content := make([]byte, size) - if _, err := io.ReadFull(r, content); err != nil { - return nil, fmt.Errorf("reading archive file %s: %v", filename, err) - } - overlay[filename] = content - } - - return overlay, nil -} diff --git a/vendor/golang.org/x/tools/go/buildutil/tags.go b/vendor/golang.org/x/tools/go/buildutil/tags.go deleted file mode 100644 index 486606f..0000000 --- a/vendor/golang.org/x/tools/go/buildutil/tags.go +++ /dev/null @@ -1,75 +0,0 @@ -package buildutil - -// This logic was copied from stringsFlag from $GOROOT/src/cmd/go/build.go. - -import "fmt" - -const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " + - "For more information about build tags, see the description of " + - "build constraints in the documentation for the go/build package" - -// TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses -// a flag value in the same manner as go build's -tags flag and -// populates a []string slice. -// -// See $GOROOT/src/go/build/doc.go for description of build tags. -// See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag. -// -// Example: -// flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc) -type TagsFlag []string - -func (v *TagsFlag) Set(s string) error { - var err error - *v, err = splitQuotedFields(s) - if *v == nil { - *v = []string{} - } - return err -} - -func (v *TagsFlag) Get() interface{} { return *v } - -func splitQuotedFields(s string) ([]string, error) { - // Split fields allowing '' or "" around elements. - // Quotes further inside the string do not count. - var f []string - for len(s) > 0 { - for len(s) > 0 && isSpaceByte(s[0]) { - s = s[1:] - } - if len(s) == 0 { - break - } - // Accepted quoted string. No unescaping inside. - if s[0] == '"' || s[0] == '\'' { - quote := s[0] - s = s[1:] - i := 0 - for i < len(s) && s[i] != quote { - i++ - } - if i >= len(s) { - return nil, fmt.Errorf("unterminated %c string", quote) - } - f = append(f, s[:i]) - s = s[i+1:] - continue - } - i := 0 - for i < len(s) && !isSpaceByte(s[i]) { - i++ - } - f = append(f, s[:i]) - s = s[i:] - } - return f, nil -} - -func (v *TagsFlag) String() string { - return "" -} - -func isSpaceByte(c byte) bool { - return c == ' ' || c == '\t' || c == '\n' || c == '\r' -} diff --git a/vendor/golang.org/x/tools/go/buildutil/util.go b/vendor/golang.org/x/tools/go/buildutil/util.go deleted file mode 100644 index fc923d7..0000000 --- a/vendor/golang.org/x/tools/go/buildutil/util.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package buildutil - -import ( - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/token" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "strings" -) - -// ParseFile behaves like parser.ParseFile, -// but uses the build context's file system interface, if any. -// -// If file is not absolute (as defined by IsAbsPath), the (dir, file) -// components are joined using JoinPath; dir must be absolute. -// -// The displayPath function, if provided, is used to transform the -// filename that will be attached to the ASTs. -// -// TODO(adonovan): call this from go/loader.parseFiles when the tree thaws. -// -func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, file string, mode parser.Mode) (*ast.File, error) { - if !IsAbsPath(ctxt, file) { - file = JoinPath(ctxt, dir, file) - } - rd, err := OpenFile(ctxt, file) - if err != nil { - return nil, err - } - defer rd.Close() // ignore error - if displayPath != nil { - file = displayPath(file) - } - return parser.ParseFile(fset, file, rd, mode) -} - -// ContainingPackage returns the package containing filename. -// -// If filename is not absolute, it is interpreted relative to working directory dir. -// All I/O is via the build context's file system interface, if any. -// -// The '...Files []string' fields of the resulting build.Package are not -// populated (build.FindOnly mode). -// -func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Package, error) { - if !IsAbsPath(ctxt, filename) { - filename = JoinPath(ctxt, dir, filename) - } - - // We must not assume the file tree uses - // "/" always, - // `\` always, - // or os.PathSeparator (which varies by platform), - // but to make any progress, we are forced to assume that - // paths will not use `\` unless the PathSeparator - // is also `\`, thus we can rely on filepath.ToSlash for some sanity. - - dirSlash := path.Dir(filepath.ToSlash(filename)) + "/" - - // We assume that no source root (GOPATH[i] or GOROOT) contains any other. - for _, srcdir := range ctxt.SrcDirs() { - srcdirSlash := filepath.ToSlash(srcdir) + "/" - if importPath, ok := HasSubdir(ctxt, srcdirSlash, dirSlash); ok { - return ctxt.Import(importPath, dir, build.FindOnly) - } - } - - return nil, fmt.Errorf("can't find package containing %s", filename) -} - -// -- Effective methods of file system interface ------------------------- - -// (go/build.Context defines these as methods, but does not export them.) - -// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses -// the local file system to answer the question. -func HasSubdir(ctxt *build.Context, root, dir string) (rel string, ok bool) { - if f := ctxt.HasSubdir; f != nil { - return f(root, dir) - } - - // Try using paths we received. - if rel, ok = hasSubdir(root, dir); ok { - return - } - - // Try expanding symlinks and comparing - // expanded against unexpanded and - // expanded against expanded. - rootSym, _ := filepath.EvalSymlinks(root) - dirSym, _ := filepath.EvalSymlinks(dir) - - if rel, ok = hasSubdir(rootSym, dir); ok { - return - } - if rel, ok = hasSubdir(root, dirSym); ok { - return - } - return hasSubdir(rootSym, dirSym) -} - -func hasSubdir(root, dir string) (rel string, ok bool) { - const sep = string(filepath.Separator) - root = filepath.Clean(root) - if !strings.HasSuffix(root, sep) { - root += sep - } - - dir = filepath.Clean(dir) - if !strings.HasPrefix(dir, root) { - return "", false - } - - return filepath.ToSlash(dir[len(root):]), true -} - -// FileExists returns true if the specified file exists, -// using the build context's file system interface. -func FileExists(ctxt *build.Context, path string) bool { - if ctxt.OpenFile != nil { - r, err := ctxt.OpenFile(path) - if err != nil { - return false - } - r.Close() // ignore error - return true - } - _, err := os.Stat(path) - return err == nil -} - -// OpenFile behaves like os.Open, -// but uses the build context's file system interface, if any. -func OpenFile(ctxt *build.Context, path string) (io.ReadCloser, error) { - if ctxt.OpenFile != nil { - return ctxt.OpenFile(path) - } - return os.Open(path) -} - -// IsAbsPath behaves like filepath.IsAbs, -// but uses the build context's file system interface, if any. -func IsAbsPath(ctxt *build.Context, path string) bool { - if ctxt.IsAbsPath != nil { - return ctxt.IsAbsPath(path) - } - return filepath.IsAbs(path) -} - -// JoinPath behaves like filepath.Join, -// but uses the build context's file system interface, if any. -func JoinPath(ctxt *build.Context, path ...string) string { - if ctxt.JoinPath != nil { - return ctxt.JoinPath(path...) - } - return filepath.Join(path...) -} - -// IsDir behaves like os.Stat plus IsDir, -// but uses the build context's file system interface, if any. -func IsDir(ctxt *build.Context, path string) bool { - if ctxt.IsDir != nil { - return ctxt.IsDir(path) - } - fi, err := os.Stat(path) - return err == nil && fi.IsDir() -} - -// ReadDir behaves like ioutil.ReadDir, -// but uses the build context's file system interface, if any. -func ReadDir(ctxt *build.Context, path string) ([]os.FileInfo, error) { - if ctxt.ReadDir != nil { - return ctxt.ReadDir(path) - } - return ioutil.ReadDir(path) -} - -// SplitPathList behaves like filepath.SplitList, -// but uses the build context's file system interface, if any. -func SplitPathList(ctxt *build.Context, s string) []string { - if ctxt.SplitPathList != nil { - return ctxt.SplitPathList(s) - } - return filepath.SplitList(s) -} - -// sameFile returns true if x and y have the same basename and denote -// the same file. -// -func sameFile(x, y string) bool { - if path.Clean(x) == path.Clean(y) { - return true - } - if filepath.Base(x) == filepath.Base(y) { // (optimisation) - if xi, err := os.Stat(x); err == nil { - if yi, err := os.Stat(y); err == nil { - return os.SameFile(xi, yi) - } - } - } - return false -} diff --git a/vendor/golang.org/x/tools/go/gccgoexportdata/gccgoexportdata.go b/vendor/golang.org/x/tools/go/gccgoexportdata/gccgoexportdata.go deleted file mode 100644 index 30ed521..0000000 --- a/vendor/golang.org/x/tools/go/gccgoexportdata/gccgoexportdata.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package gccgoexportdata provides functions for reading export data -// files containing type information produced by the gccgo compiler. -// -// This package is a stop-gap until such time as gccgo uses the same -// export data format as gc; see Go issue 17573. Once that occurs, this -// package will be deprecated and eventually deleted. -package gccgoexportdata - -// TODO(adonovan): add Find, Write, Importer to the API, -// for symmetry with gcexportdata. - -import ( - "bytes" - "debug/elf" - "fmt" - "go/token" - "go/types" - "io" - "io/ioutil" - "strconv" - "strings" - - "golang.org/x/tools/go/internal/gccgoimporter" -) - -// CompilerInfo executes the specified gccgo compiler and returns -// information about it: its version (e.g. "4.8.0"), its target triple -// (e.g. "x86_64-unknown-linux-gnu"), and the list of directories it -// searches to find standard packages. The given arguments are passed -// directly to calls to the specified gccgo compiler. -func CompilerInfo(gccgo string, args ...string) (version, triple string, dirs []string, err error) { - var inst gccgoimporter.GccgoInstallation - err = inst.InitFromDriver(gccgo, args...) - if err == nil { - version = inst.GccVersion - triple = inst.TargetTriple - dirs = inst.SearchPaths() - } - return -} - -// NewReader returns a reader for the export data section of an object -// (.o) or archive (.a) file read from r. -func NewReader(r io.Reader) (io.Reader, error) { - data, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - // If the file is an archive, extract the first section. - const archiveMagic = "!\n" - if bytes.HasPrefix(data, []byte(archiveMagic)) { - section, err := firstSection(data[len(archiveMagic):]) - if err != nil { - return nil, err - } - data = section - } - - // Data contains an ELF file with a .go_export section. - // ELF magic number is "\x7fELF". - ef, err := elf.NewFile(bytes.NewReader(data)) - if err != nil { - return nil, err - } - sec := ef.Section(".go_export") - if sec == nil { - return nil, fmt.Errorf("no .go_export section") - } - return sec.Open(), nil -} - -// firstSection returns the contents of the first regular file in an ELF -// archive (http://www.sco.com/developers/devspecs/gabi41.pdf, §7.2). -func firstSection(a []byte) ([]byte, error) { - for len(a) >= 60 { - var hdr []byte - hdr, a = a[:60], a[60:] - - name := strings.TrimSpace(string(hdr[:16])) - - sizeStr := string(hdr[48:58]) - size, err := strconv.Atoi(strings.TrimSpace(sizeStr)) - if err != nil { - return nil, fmt.Errorf("invalid size: %q", sizeStr) - } - - if len(a) < size { - return nil, fmt.Errorf("invalid section size: %d", size) - } - - // The payload is padded to an even number of bytes. - var payload []byte - payload, a = a[:size], a[size+size&1:] - - // Skip special files: - // "/" archive symbol table - // "/SYM64/" archive symbol table on e.g. s390x - // "//" archive string table (if any filename is >15 bytes) - if name == "/" || name == "/SYM64/" || name == "//" { - continue - } - - return payload, nil - } - return nil, fmt.Errorf("archive has no regular sections") -} - -// Read reads export data from in, decodes it, and returns type -// information for the package. -// The package name is specified by path. -// -// The FileSet parameter is currently unused but exists for symmetry -// with gcexportdata. -// -// Read may inspect and add to the imports map to ensure that references -// within the export data to other packages are consistent. The caller -// must ensure that imports[path] does not exist, or exists but is -// incomplete (see types.Package.Complete), and Read inserts the -// resulting package into this map entry. -// -// On return, the state of the reader is undefined. -func Read(in io.Reader, _ *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { - return gccgoimporter.Parse(in, imports, path) -} diff --git a/vendor/golang.org/x/tools/go/internal/gccgoimporter/backdoor.go b/vendor/golang.org/x/tools/go/internal/gccgoimporter/backdoor.go deleted file mode 100644 index 4feb0dc..0000000 --- a/vendor/golang.org/x/tools/go/internal/gccgoimporter/backdoor.go +++ /dev/null @@ -1,28 +0,0 @@ -package gccgoimporter - -// This file opens a back door to the parser for golang.org/x/tools/go/gccgoexportdata. - -import ( - "go/types" - "io" -) - -// Parse reads and parses gccgo export data from in and constructs a -// Package, inserting it into the imports map. -func Parse(in io.Reader, imports map[string]*types.Package, path string) (_ *types.Package, err error) { - var p parser - p.init(path, in, imports) - defer func() { - switch x := recover().(type) { - case nil: - // success - case importError: - err = x - default: - panic(x) // resume unexpected panic - } - }() - pkg := p.parsePackage() - imports[path] = pkg - return pkg, err -} diff --git a/vendor/golang.org/x/tools/go/internal/gccgoimporter/gccgoinstallation.go b/vendor/golang.org/x/tools/go/internal/gccgoimporter/gccgoinstallation.go deleted file mode 100644 index cebfc57..0000000 --- a/vendor/golang.org/x/tools/go/internal/gccgoimporter/gccgoinstallation.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gccgoimporter - -// This is a verbatim copy of $GOROOT/src/go/internal/gccgoimporter/gccgoinstallation.go. - -import ( - "bufio" - "go/types" - "os" - "os/exec" - "path/filepath" - "strings" -) - -// Information about a specific installation of gccgo. -type GccgoInstallation struct { - // Version of gcc (e.g. 4.8.0). - GccVersion string - - // Target triple (e.g. x86_64-unknown-linux-gnu). - TargetTriple string - - // Built-in library paths used by this installation. - LibPaths []string -} - -// Ask the driver at the given path for information for this GccgoInstallation. -// The given arguments are passed directly to the call to the driver. -func (inst *GccgoInstallation) InitFromDriver(gccgoPath string, args ...string) (err error) { - argv := append([]string{"-###", "-S", "-x", "go", "-"}, args...) - cmd := exec.Command(gccgoPath, argv...) - stderr, err := cmd.StderrPipe() - if err != nil { - return - } - - err = cmd.Start() - if err != nil { - return - } - - scanner := bufio.NewScanner(stderr) - for scanner.Scan() { - line := scanner.Text() - switch { - case strings.HasPrefix(line, "Target: "): - inst.TargetTriple = line[8:] - - case line[0] == ' ': - args := strings.Fields(line) - for _, arg := range args[1:] { - if strings.HasPrefix(arg, "-L") { - inst.LibPaths = append(inst.LibPaths, arg[2:]) - } - } - } - } - - argv = append([]string{"-dumpversion"}, args...) - stdout, err := exec.Command(gccgoPath, argv...).Output() - if err != nil { - return - } - inst.GccVersion = strings.TrimSpace(string(stdout)) - - return -} - -// Return the list of export search paths for this GccgoInstallation. -func (inst *GccgoInstallation) SearchPaths() (paths []string) { - for _, lpath := range inst.LibPaths { - spath := filepath.Join(lpath, "go", inst.GccVersion) - fi, err := os.Stat(spath) - if err != nil || !fi.IsDir() { - continue - } - paths = append(paths, spath) - - spath = filepath.Join(spath, inst.TargetTriple) - fi, err = os.Stat(spath) - if err != nil || !fi.IsDir() { - continue - } - paths = append(paths, spath) - } - - paths = append(paths, inst.LibPaths...) - - return -} - -// Return an importer that searches incpaths followed by the gcc installation's -// built-in search paths and the current directory. -func (inst *GccgoInstallation) GetImporter(incpaths []string, initmap map[*types.Package]InitData) Importer { - return GetImporter(append(append(incpaths, inst.SearchPaths()...), "."), initmap) -} diff --git a/vendor/golang.org/x/tools/go/internal/gccgoimporter/importer.go b/vendor/golang.org/x/tools/go/internal/gccgoimporter/importer.go deleted file mode 100644 index a3fae9a..0000000 --- a/vendor/golang.org/x/tools/go/internal/gccgoimporter/importer.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package gccgoimporter implements Import for gccgo-generated object files. -package gccgoimporter // import "golang.org/x/tools/go/internal/gccgoimporter" - -// This is a verbatim copy of $GOROOT/src/go/internal/gccgoimporter/importer.go. - -import ( - "bytes" - "debug/elf" - "fmt" - "go/types" - "io" - "os" - "os/exec" - "path/filepath" - "strings" -) - -// A PackageInit describes an imported package that needs initialization. -type PackageInit struct { - Name string // short package name - InitFunc string // name of init function - Priority int // priority of init function, see InitData.Priority -} - -// The gccgo-specific init data for a package. -type InitData struct { - // Initialization priority of this package relative to other packages. - // This is based on the maximum depth of the package's dependency graph; - // it is guaranteed to be greater than that of its dependencies. - Priority int - - // The list of packages which this package depends on to be initialized, - // including itself if needed. This is the subset of the transitive closure of - // the package's dependencies that need initialization. - Inits []PackageInit -} - -// Locate the file from which to read export data. -// This is intended to replicate the logic in gofrontend. -func findExportFile(searchpaths []string, pkgpath string) (string, error) { - for _, spath := range searchpaths { - pkgfullpath := filepath.Join(spath, pkgpath) - pkgdir, name := filepath.Split(pkgfullpath) - - for _, filepath := range [...]string{ - pkgfullpath, - pkgfullpath + ".gox", - pkgdir + "lib" + name + ".so", - pkgdir + "lib" + name + ".a", - pkgfullpath + ".o", - } { - fi, err := os.Stat(filepath) - if err == nil && !fi.IsDir() { - return filepath, nil - } - } - } - - return "", fmt.Errorf("%s: could not find export data (tried %s)", pkgpath, strings.Join(searchpaths, ":")) -} - -const ( - gccgov1Magic = "v1;\n" - gccgov2Magic = "v2;\n" - goimporterMagic = "\n$$ " - archiveMagic = "! package object - typeMap map[int]types.Type // type number -> type - initdata InitData // package init priority data -} - -func (p *parser) init(filename string, src io.Reader, imports map[string]*types.Package) { - p.scanner.Init(src) - p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) } - p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments - p.scanner.Whitespace = 1<<'\t' | 1<<'\n' | 1<<' ' - p.scanner.Filename = filename // for good error messages - p.next() - p.imports = imports - p.typeMap = make(map[int]types.Type) -} - -type importError struct { - pos scanner.Position - err error -} - -func (e importError) Error() string { - return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err) -} - -func (p *parser) error(err interface{}) { - if s, ok := err.(string); ok { - err = errors.New(s) - } - // panic with a runtime.Error if err is not an error - panic(importError{p.scanner.Pos(), err.(error)}) -} - -func (p *parser) errorf(format string, args ...interface{}) { - p.error(fmt.Errorf(format, args...)) -} - -func (p *parser) expect(tok rune) string { - lit := p.lit - if p.tok != tok { - p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit) - } - p.next() - return lit -} - -func (p *parser) expectKeyword(keyword string) { - lit := p.expect(scanner.Ident) - if lit != keyword { - p.errorf("expected keyword %s, got %q", keyword, lit) - } -} - -func (p *parser) parseString() string { - str, err := strconv.Unquote(p.expect(scanner.String)) - if err != nil { - p.error(err) - } - return str -} - -// unquotedString = { unquotedStringChar } . -// unquotedStringChar = . -func (p *parser) parseUnquotedString() string { - if p.tok == scanner.EOF { - p.error("unexpected EOF") - } - var buf bytes.Buffer - buf.WriteString(p.scanner.TokenText()) - // This loop needs to examine each character before deciding whether to consume it. If we see a semicolon, - // we need to let it be consumed by p.next(). - for ch := p.scanner.Peek(); ch != ';' && ch != scanner.EOF && p.scanner.Whitespace&(1<" . (optional and ignored) - p.next() - p.expectKeyword("esc") - p.expect(':') - p.expect(scanner.Int) - p.expect('>') - } - if p.tok == '.' { - p.next() - p.expect('.') - p.expect('.') - isVariadic = true - } - typ := p.parseType(pkg) - if isVariadic { - typ = types.NewSlice(typ) - } - param = types.NewParam(token.NoPos, pkg, name, typ) - return -} - -// Var = Name Type . -func (p *parser) parseVar(pkg *types.Package) *types.Var { - name := p.parseName() - return types.NewVar(token.NoPos, pkg, name, p.parseType(pkg)) -} - -// Conversion = "convert" "(" Type "," ConstValue ")" . -func (p *parser) parseConversion(pkg *types.Package) (val constant.Value, typ types.Type) { - p.expectKeyword("convert") - p.expect('(') - typ = p.parseType(pkg) - p.expect(',') - val, _ = p.parseConstValue(pkg) - p.expect(')') - return -} - -// ConstValue = string | "false" | "true" | ["-"] (int ["'"] | FloatOrComplex) | Conversion . -// FloatOrComplex = float ["i" | ("+"|"-") float "i"] . -func (p *parser) parseConstValue(pkg *types.Package) (val constant.Value, typ types.Type) { - switch p.tok { - case scanner.String: - str := p.parseString() - val = constant.MakeString(str) - typ = types.Typ[types.UntypedString] - return - - case scanner.Ident: - b := false - switch p.lit { - case "false": - case "true": - b = true - - case "convert": - return p.parseConversion(pkg) - - default: - p.errorf("expected const value, got %s (%q)", scanner.TokenString(p.tok), p.lit) - } - - p.next() - val = constant.MakeBool(b) - typ = types.Typ[types.UntypedBool] - return - } - - sign := "" - if p.tok == '-' { - p.next() - sign = "-" - } - - switch p.tok { - case scanner.Int: - val = constant.MakeFromLiteral(sign+p.lit, token.INT, 0) - if val == nil { - p.error("could not parse integer literal") - } - - p.next() - if p.tok == '\'' { - p.next() - typ = types.Typ[types.UntypedRune] - } else { - typ = types.Typ[types.UntypedInt] - } - - case scanner.Float: - re := sign + p.lit - p.next() - - var im string - switch p.tok { - case '+': - p.next() - im = p.expect(scanner.Float) - - case '-': - p.next() - im = "-" + p.expect(scanner.Float) - - case scanner.Ident: - // re is in fact the imaginary component. Expect "i" below. - im = re - re = "0" - - default: - val = constant.MakeFromLiteral(re, token.FLOAT, 0) - if val == nil { - p.error("could not parse float literal") - } - typ = types.Typ[types.UntypedFloat] - return - } - - p.expectKeyword("i") - reval := constant.MakeFromLiteral(re, token.FLOAT, 0) - if reval == nil { - p.error("could not parse real component of complex literal") - } - imval := constant.MakeFromLiteral(im+"i", token.IMAG, 0) - if imval == nil { - p.error("could not parse imag component of complex literal") - } - val = constant.BinaryOp(reval, token.ADD, imval) - typ = types.Typ[types.UntypedComplex] - - default: - p.errorf("expected const value, got %s (%q)", scanner.TokenString(p.tok), p.lit) - } - - return -} - -// Const = Name [Type] "=" ConstValue . -func (p *parser) parseConst(pkg *types.Package) *types.Const { - name := p.parseName() - var typ types.Type - if p.tok == '<' { - typ = p.parseType(pkg) - } - p.expect('=') - val, vtyp := p.parseConstValue(pkg) - if typ == nil { - typ = vtyp - } - return types.NewConst(token.NoPos, pkg, name, typ, val) -} - -// NamedType = TypeName [ "=" ] Type { Method } . -// TypeName = ExportedName . -// Method = "func" "(" Param ")" Name ParamList ResultList ";" . -func (p *parser) parseNamedType(n int) types.Type { - pkg, name := p.parseExportedName() - scope := pkg.Scope() - - if p.tok == '=' { - // type alias - p.next() - typ := p.parseType(pkg) - if obj := scope.Lookup(name); obj != nil { - typ = obj.Type() // use previously imported type - if typ == nil { - p.errorf("%v (type alias) used in cycle", obj) - } - } else { - obj = types.NewTypeName(token.NoPos, pkg, name, typ) - scope.Insert(obj) - } - p.typeMap[n] = typ - return typ - } - - // named type - obj := scope.Lookup(name) - if obj == nil { - // a named type may be referred to before the underlying type - // is known - set it up - tname := types.NewTypeName(token.NoPos, pkg, name, nil) - types.NewNamed(tname, nil, nil) - scope.Insert(tname) - obj = tname - } - - typ := obj.Type() - p.typeMap[n] = typ - - nt, ok := typ.(*types.Named) - if !ok { - // This can happen for unsafe.Pointer, which is a TypeName holding a Basic type. - pt := p.parseType(pkg) - if pt != typ { - p.error("unexpected underlying type for non-named TypeName") - } - return typ - } - - underlying := p.parseType(pkg) - if nt.Underlying() == nil { - nt.SetUnderlying(underlying.Underlying()) - } - - // collect associated methods - for p.tok == scanner.Ident { - p.expectKeyword("func") - p.expect('(') - receiver, _ := p.parseParam(pkg) - p.expect(')') - name := p.parseName() - params, isVariadic := p.parseParamList(pkg) - results := p.parseResultList(pkg) - p.expect(';') - - sig := types.NewSignature(receiver, params, results, isVariadic) - nt.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig)) - } - - return nt -} - -func (p *parser) parseInt() int64 { - lit := p.expect(scanner.Int) - n, err := strconv.ParseInt(lit, 10, 0) - if err != nil { - p.error(err) - } - return n -} - -// ArrayOrSliceType = "[" [ int ] "]" Type . -func (p *parser) parseArrayOrSliceType(pkg *types.Package) types.Type { - p.expect('[') - if p.tok == ']' { - p.next() - return types.NewSlice(p.parseType(pkg)) - } - - n := p.parseInt() - p.expect(']') - return types.NewArray(p.parseType(pkg), n) -} - -// MapType = "map" "[" Type "]" Type . -func (p *parser) parseMapType(pkg *types.Package) types.Type { - p.expectKeyword("map") - p.expect('[') - key := p.parseType(pkg) - p.expect(']') - elem := p.parseType(pkg) - return types.NewMap(key, elem) -} - -// ChanType = "chan" ["<-" | "-<"] Type . -func (p *parser) parseChanType(pkg *types.Package) types.Type { - p.expectKeyword("chan") - dir := types.SendRecv - switch p.tok { - case '-': - p.next() - p.expect('<') - dir = types.SendOnly - - case '<': - // don't consume '<' if it belongs to Type - if p.scanner.Peek() == '-' { - p.next() - p.expect('-') - dir = types.RecvOnly - } - } - - return types.NewChan(dir, p.parseType(pkg)) -} - -// StructType = "struct" "{" { Field } "}" . -func (p *parser) parseStructType(pkg *types.Package) types.Type { - p.expectKeyword("struct") - - var fields []*types.Var - var tags []string - - p.expect('{') - for p.tok != '}' && p.tok != scanner.EOF { - field, tag := p.parseField(pkg) - p.expect(';') - fields = append(fields, field) - tags = append(tags, tag) - } - p.expect('}') - - return types.NewStruct(fields, tags) -} - -// ParamList = "(" [ { Parameter "," } Parameter ] ")" . -func (p *parser) parseParamList(pkg *types.Package) (*types.Tuple, bool) { - var list []*types.Var - isVariadic := false - - p.expect('(') - for p.tok != ')' && p.tok != scanner.EOF { - if len(list) > 0 { - p.expect(',') - } - par, variadic := p.parseParam(pkg) - list = append(list, par) - if variadic { - if isVariadic { - p.error("... not on final argument") - } - isVariadic = true - } - } - p.expect(')') - - return types.NewTuple(list...), isVariadic -} - -// ResultList = Type | ParamList . -func (p *parser) parseResultList(pkg *types.Package) *types.Tuple { - switch p.tok { - case '<': - return types.NewTuple(types.NewParam(token.NoPos, pkg, "", p.parseType(pkg))) - - case '(': - params, _ := p.parseParamList(pkg) - return params - - default: - return nil - } -} - -// FunctionType = ParamList ResultList . -func (p *parser) parseFunctionType(pkg *types.Package) *types.Signature { - params, isVariadic := p.parseParamList(pkg) - results := p.parseResultList(pkg) - return types.NewSignature(nil, params, results, isVariadic) -} - -// Func = Name FunctionType . -func (p *parser) parseFunc(pkg *types.Package) *types.Func { - name := p.parseName() - if strings.ContainsRune(name, '$') { - // This is a Type$equal or Type$hash function, which we don't want to parse, - // except for the types. - p.discardDirectiveWhileParsingTypes(pkg) - return nil - } - return types.NewFunc(token.NoPos, pkg, name, p.parseFunctionType(pkg)) -} - -// InterfaceType = "interface" "{" { ("?" Type | Func) ";" } "}" . -func (p *parser) parseInterfaceType(pkg *types.Package) types.Type { - p.expectKeyword("interface") - - var methods []*types.Func - var typs []*types.Named - - p.expect('{') - for p.tok != '}' && p.tok != scanner.EOF { - if p.tok == '?' { - p.next() - typs = append(typs, p.parseType(pkg).(*types.Named)) - } else { - method := p.parseFunc(pkg) - methods = append(methods, method) - } - p.expect(';') - } - p.expect('}') - - return types.NewInterface(methods, typs) -} - -// PointerType = "*" ("any" | Type) . -func (p *parser) parsePointerType(pkg *types.Package) types.Type { - p.expect('*') - if p.tok == scanner.Ident { - p.expectKeyword("any") - return types.Typ[types.UnsafePointer] - } - return types.NewPointer(p.parseType(pkg)) -} - -// TypeDefinition = NamedType | MapType | ChanType | StructType | InterfaceType | PointerType | ArrayOrSliceType | FunctionType . -func (p *parser) parseTypeDefinition(pkg *types.Package, n int) types.Type { - var t types.Type - switch p.tok { - case scanner.String: - t = p.parseNamedType(n) - - case scanner.Ident: - switch p.lit { - case "map": - t = p.parseMapType(pkg) - - case "chan": - t = p.parseChanType(pkg) - - case "struct": - t = p.parseStructType(pkg) - - case "interface": - t = p.parseInterfaceType(pkg) - } - - case '*': - t = p.parsePointerType(pkg) - - case '[': - t = p.parseArrayOrSliceType(pkg) - - case '(': - t = p.parseFunctionType(pkg) - } - - p.typeMap[n] = t - return t -} - -const ( - // From gofrontend/go/export.h - // Note that these values are negative in the gofrontend and have been made positive - // in the gccgoimporter. - gccgoBuiltinINT8 = 1 - gccgoBuiltinINT16 = 2 - gccgoBuiltinINT32 = 3 - gccgoBuiltinINT64 = 4 - gccgoBuiltinUINT8 = 5 - gccgoBuiltinUINT16 = 6 - gccgoBuiltinUINT32 = 7 - gccgoBuiltinUINT64 = 8 - gccgoBuiltinFLOAT32 = 9 - gccgoBuiltinFLOAT64 = 10 - gccgoBuiltinINT = 11 - gccgoBuiltinUINT = 12 - gccgoBuiltinUINTPTR = 13 - gccgoBuiltinBOOL = 15 - gccgoBuiltinSTRING = 16 - gccgoBuiltinCOMPLEX64 = 17 - gccgoBuiltinCOMPLEX128 = 18 - gccgoBuiltinERROR = 19 - gccgoBuiltinBYTE = 20 - gccgoBuiltinRUNE = 21 -) - -func lookupBuiltinType(typ int) types.Type { - return [...]types.Type{ - gccgoBuiltinINT8: types.Typ[types.Int8], - gccgoBuiltinINT16: types.Typ[types.Int16], - gccgoBuiltinINT32: types.Typ[types.Int32], - gccgoBuiltinINT64: types.Typ[types.Int64], - gccgoBuiltinUINT8: types.Typ[types.Uint8], - gccgoBuiltinUINT16: types.Typ[types.Uint16], - gccgoBuiltinUINT32: types.Typ[types.Uint32], - gccgoBuiltinUINT64: types.Typ[types.Uint64], - gccgoBuiltinFLOAT32: types.Typ[types.Float32], - gccgoBuiltinFLOAT64: types.Typ[types.Float64], - gccgoBuiltinINT: types.Typ[types.Int], - gccgoBuiltinUINT: types.Typ[types.Uint], - gccgoBuiltinUINTPTR: types.Typ[types.Uintptr], - gccgoBuiltinBOOL: types.Typ[types.Bool], - gccgoBuiltinSTRING: types.Typ[types.String], - gccgoBuiltinCOMPLEX64: types.Typ[types.Complex64], - gccgoBuiltinCOMPLEX128: types.Typ[types.Complex128], - gccgoBuiltinERROR: types.Universe.Lookup("error").Type(), - gccgoBuiltinBYTE: types.Universe.Lookup("byte").Type(), - gccgoBuiltinRUNE: types.Universe.Lookup("rune").Type(), - }[typ] -} - -// Type = "<" "type" ( "-" int | int [ TypeDefinition ] ) ">" . -func (p *parser) parseType(pkg *types.Package) (t types.Type) { - p.expect('<') - p.expectKeyword("type") - - switch p.tok { - case scanner.Int: - n := p.parseInt() - - if p.tok == '>' { - t = p.typeMap[int(n)] - } else { - t = p.parseTypeDefinition(pkg, int(n)) - } - - case '-': - p.next() - n := p.parseInt() - t = lookupBuiltinType(int(n)) - - default: - p.errorf("expected type number, got %s (%q)", scanner.TokenString(p.tok), p.lit) - return nil - } - - p.expect('>') - return -} - -// PackageInit = unquotedString unquotedString int . -func (p *parser) parsePackageInit() PackageInit { - name := p.parseUnquotedString() - initfunc := p.parseUnquotedString() - priority := -1 - if p.version == "v1" { - priority = int(p.parseInt()) - } - return PackageInit{Name: name, InitFunc: initfunc, Priority: priority} -} - -// Throw away tokens until we see a ';'. If we see a '<', attempt to parse as a type. -func (p *parser) discardDirectiveWhileParsingTypes(pkg *types.Package) { - for { - switch p.tok { - case ';': - return - case '<': - p.parseType(pkg) - case scanner.EOF: - p.error("unexpected EOF") - default: - p.next() - } - } -} - -// Create the package if we have parsed both the package path and package name. -func (p *parser) maybeCreatePackage() { - if p.pkgname != "" && p.pkgpath != "" { - p.pkg = p.getPkg(p.pkgpath, p.pkgname) - } -} - -// InitDataDirective = ( "v1" | "v2" ) ";" | -// "priority" int ";" | -// "init" { PackageInit } ";" | -// "checksum" unquotedString ";" . -func (p *parser) parseInitDataDirective() { - if p.tok != scanner.Ident { - // unexpected token kind; panic - p.expect(scanner.Ident) - } - - switch p.lit { - case "v1", "v2": - p.version = p.lit - p.next() - p.expect(';') - - case "priority": - p.next() - p.initdata.Priority = int(p.parseInt()) - p.expect(';') - - case "init": - p.next() - for p.tok != ';' && p.tok != scanner.EOF { - p.initdata.Inits = append(p.initdata.Inits, p.parsePackageInit()) - } - p.expect(';') - - case "init_graph": - p.next() - // The graph data is thrown away for now. - for p.tok != ';' && p.tok != scanner.EOF { - p.parseInt() - p.parseInt() - } - p.expect(';') - - case "checksum": - // Don't let the scanner try to parse the checksum as a number. - defer func(mode uint) { - p.scanner.Mode = mode - }(p.scanner.Mode) - p.scanner.Mode &^= scanner.ScanInts | scanner.ScanFloats - p.next() - p.parseUnquotedString() - p.expect(';') - - default: - p.errorf("unexpected identifier: %q", p.lit) - } -} - -// Directive = InitDataDirective | -// "package" unquotedString [ unquotedString ] [ unquotedString ] ";" | -// "pkgpath" unquotedString ";" | -// "prefix" unquotedString ";" | -// "import" unquotedString unquotedString string ";" | -// "func" Func ";" | -// "type" Type ";" | -// "var" Var ";" | -// "const" Const ";" . -func (p *parser) parseDirective() { - if p.tok != scanner.Ident { - // unexpected token kind; panic - p.expect(scanner.Ident) - } - - switch p.lit { - case "v1", "v2", "priority", "init", "init_graph", "checksum": - p.parseInitDataDirective() - - case "package": - p.next() - p.pkgname = p.parseUnquotedString() - p.maybeCreatePackage() - if p.version == "v2" && p.tok != ';' { - p.parseUnquotedString() - p.parseUnquotedString() - } - p.expect(';') - - case "pkgpath": - p.next() - p.pkgpath = p.parseUnquotedString() - p.maybeCreatePackage() - p.expect(';') - - case "prefix": - p.next() - p.pkgpath = p.parseUnquotedString() - p.expect(';') - - case "import": - p.next() - pkgname := p.parseUnquotedString() - pkgpath := p.parseUnquotedString() - p.getPkg(pkgpath, pkgname) - p.parseString() - p.expect(';') - - case "func": - p.next() - fun := p.parseFunc(p.pkg) - if fun != nil { - p.pkg.Scope().Insert(fun) - } - p.expect(';') - - case "type": - p.next() - p.parseType(p.pkg) - p.expect(';') - - case "var": - p.next() - v := p.parseVar(p.pkg) - p.pkg.Scope().Insert(v) - p.expect(';') - - case "const": - p.next() - c := p.parseConst(p.pkg) - p.pkg.Scope().Insert(c) - p.expect(';') - - default: - p.errorf("unexpected identifier: %q", p.lit) - } -} - -// Package = { Directive } . -func (p *parser) parsePackage() *types.Package { - for p.tok != scanner.EOF { - p.parseDirective() - } - for _, typ := range p.typeMap { - if it, ok := typ.(*types.Interface); ok { - it.Complete() - } - } - p.pkg.MarkComplete() - return p.pkg -} diff --git a/vendor/golang.org/x/tools/go/loader/cgo.go b/vendor/golang.org/x/tools/go/loader/cgo.go deleted file mode 100644 index 72c6f50..0000000 --- a/vendor/golang.org/x/tools/go/loader/cgo.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package loader - -// This file handles cgo preprocessing of files containing `import "C"`. -// -// DESIGN -// -// The approach taken is to run the cgo processor on the package's -// CgoFiles and parse the output, faking the filenames of the -// resulting ASTs so that the synthetic file containing the C types is -// called "C" (e.g. "~/go/src/net/C") and the preprocessed files -// have their original names (e.g. "~/go/src/net/cgo_unix.go"), -// not the names of the actual temporary files. -// -// The advantage of this approach is its fidelity to 'go build'. The -// downside is that the token.Position.Offset for each AST node is -// incorrect, being an offset within the temporary file. Line numbers -// should still be correct because of the //line comments. -// -// The logic of this file is mostly plundered from the 'go build' -// tool, which also invokes the cgo preprocessor. -// -// -// REJECTED ALTERNATIVE -// -// An alternative approach that we explored is to extend go/types' -// Importer mechanism to provide the identity of the importing package -// so that each time `import "C"` appears it resolves to a different -// synthetic package containing just the objects needed in that case. -// The loader would invoke cgo but parse only the cgo_types.go file -// defining the package-level objects, discarding the other files -// resulting from preprocessing. -// -// The benefit of this approach would have been that source-level -// syntax information would correspond exactly to the original cgo -// file, with no preprocessing involved, making source tools like -// godoc, guru, and eg happy. However, the approach was rejected -// due to the additional complexity it would impose on go/types. (It -// made for a beautiful demo, though.) -// -// cgo files, despite their *.go extension, are not legal Go source -// files per the specification since they may refer to unexported -// members of package "C" such as C.int. Also, a function such as -// C.getpwent has in effect two types, one matching its C type and one -// which additionally returns (errno C.int). The cgo preprocessor -// uses name mangling to distinguish these two functions in the -// processed code, but go/types would need to duplicate this logic in -// its handling of function calls, analogous to the treatment of map -// lookups in which y=m[k] and y,ok=m[k] are both legal. - -import ( - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/token" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "regexp" - "strings" -) - -// processCgoFiles invokes the cgo preprocessor on bp.CgoFiles, parses -// the output and returns the resulting ASTs. -// -func processCgoFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) { - tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C") - if err != nil { - return nil, err - } - defer os.RemoveAll(tmpdir) - - pkgdir := bp.Dir - if DisplayPath != nil { - pkgdir = DisplayPath(pkgdir) - } - - cgoFiles, cgoDisplayFiles, err := runCgo(bp, pkgdir, tmpdir) - if err != nil { - return nil, err - } - var files []*ast.File - for i := range cgoFiles { - rd, err := os.Open(cgoFiles[i]) - if err != nil { - return nil, err - } - display := filepath.Join(bp.Dir, cgoDisplayFiles[i]) - f, err := parser.ParseFile(fset, display, rd, mode) - rd.Close() - if err != nil { - return nil, err - } - files = append(files, f) - } - return files, nil -} - -var cgoRe = regexp.MustCompile(`[/\\:]`) - -// runCgo invokes the cgo preprocessor on bp.CgoFiles and returns two -// lists of files: the resulting processed files (in temporary -// directory tmpdir) and the corresponding names of the unprocessed files. -// -// runCgo is adapted from (*builder).cgo in -// $GOROOT/src/cmd/go/build.go, but these features are unsupported: -// Objective C, CGOPKGPATH, CGO_FLAGS. -// -func runCgo(bp *build.Package, pkgdir, tmpdir string) (files, displayFiles []string, err error) { - cgoCPPFLAGS, _, _, _ := cflags(bp, true) - _, cgoexeCFLAGS, _, _ := cflags(bp, false) - - if len(bp.CgoPkgConfig) > 0 { - pcCFLAGS, err := pkgConfigFlags(bp) - if err != nil { - return nil, nil, err - } - cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...) - } - - // Allows including _cgo_export.h from .[ch] files in the package. - cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir) - - // _cgo_gotypes.go (displayed "C") contains the type definitions. - files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go")) - displayFiles = append(displayFiles, "C") - for _, fn := range bp.CgoFiles { - // "foo.cgo1.go" (displayed "foo.go") is the processed Go source. - f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_") - files = append(files, filepath.Join(tmpdir, f+"cgo1.go")) - displayFiles = append(displayFiles, fn) - } - - var cgoflags []string - if bp.Goroot && bp.ImportPath == "runtime/cgo" { - cgoflags = append(cgoflags, "-import_runtime_cgo=false") - } - if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" { - cgoflags = append(cgoflags, "-import_syscall=false") - } - - args := stringList( - "go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--", - cgoCPPFLAGS, cgoexeCFLAGS, bp.CgoFiles, - ) - if false { - log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir) - } - cmd := exec.Command(args[0], args[1:]...) - cmd.Dir = pkgdir - cmd.Stdout = os.Stderr - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err) - } - - return files, displayFiles, nil -} - -// -- unmodified from 'go build' --------------------------------------- - -// Return the flags to use when invoking the C or C++ compilers, or cgo. -func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) { - var defaults string - if def { - defaults = "-g -O2" - } - - cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS) - cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS) - cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS) - ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS) - return -} - -// envList returns the value of the given environment variable broken -// into fields, using the default value when the variable is empty. -func envList(key, def string) []string { - v := os.Getenv(key) - if v == "" { - v = def - } - return strings.Fields(v) -} - -// stringList's arguments should be a sequence of string or []string values. -// stringList flattens them into a single []string. -func stringList(args ...interface{}) []string { - var x []string - for _, arg := range args { - switch arg := arg.(type) { - case []string: - x = append(x, arg...) - case string: - x = append(x, arg) - default: - panic("stringList: invalid argument") - } - } - return x -} diff --git a/vendor/golang.org/x/tools/go/loader/cgo_pkgconfig.go b/vendor/golang.org/x/tools/go/loader/cgo_pkgconfig.go deleted file mode 100644 index de57422..0000000 --- a/vendor/golang.org/x/tools/go/loader/cgo_pkgconfig.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package loader - -import ( - "errors" - "fmt" - "go/build" - "os/exec" - "strings" -) - -// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints. -func pkgConfig(mode string, pkgs []string) (flags []string, err error) { - cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...) - out, err := cmd.CombinedOutput() - if err != nil { - s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err) - if len(out) > 0 { - s = fmt.Sprintf("%s: %s", s, out) - } - return nil, errors.New(s) - } - if len(out) > 0 { - flags = strings.Fields(string(out)) - } - return -} - -// pkgConfigFlags calls pkg-config if needed and returns the cflags -// needed to build the package. -func pkgConfigFlags(p *build.Package) (cflags []string, err error) { - if len(p.CgoPkgConfig) == 0 { - return nil, nil - } - return pkgConfig("--cflags", p.CgoPkgConfig) -} diff --git a/vendor/golang.org/x/tools/go/loader/doc.go b/vendor/golang.org/x/tools/go/loader/doc.go deleted file mode 100644 index 9b51c9e..0000000 --- a/vendor/golang.org/x/tools/go/loader/doc.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package loader loads a complete Go program from source code, parsing -// and type-checking the initial packages plus their transitive closure -// of dependencies. The ASTs and the derived facts are retained for -// later use. -// -// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE. -// -// The package defines two primary types: Config, which specifies a -// set of initial packages to load and various other options; and -// Program, which is the result of successfully loading the packages -// specified by a configuration. -// -// The configuration can be set directly, but *Config provides various -// convenience methods to simplify the common cases, each of which can -// be called any number of times. Finally, these are followed by a -// call to Load() to actually load and type-check the program. -// -// var conf loader.Config -// -// // Use the command-line arguments to specify -// // a set of initial packages to load from source. -// // See FromArgsUsage for help. -// rest, err := conf.FromArgs(os.Args[1:], wantTests) -// -// // Parse the specified files and create an ad hoc package with path "foo". -// // All files must have the same 'package' declaration. -// conf.CreateFromFilenames("foo", "foo.go", "bar.go") -// -// // Create an ad hoc package with path "foo" from -// // the specified already-parsed files. -// // All ASTs must have the same 'package' declaration. -// conf.CreateFromFiles("foo", parsedFiles) -// -// // Add "runtime" to the set of packages to be loaded. -// conf.Import("runtime") -// -// // Adds "fmt" and "fmt_test" to the set of packages -// // to be loaded. "fmt" will include *_test.go files. -// conf.ImportWithTests("fmt") -// -// // Finally, load all the packages specified by the configuration. -// prog, err := conf.Load() -// -// See examples_test.go for examples of API usage. -// -// -// CONCEPTS AND TERMINOLOGY -// -// The WORKSPACE is the set of packages accessible to the loader. The -// workspace is defined by Config.Build, a *build.Context. The -// default context treats subdirectories of $GOROOT and $GOPATH as -// packages, but this behavior may be overridden. -// -// An AD HOC package is one specified as a set of source files on the -// command line. In the simplest case, it may consist of a single file -// such as $GOROOT/src/net/http/triv.go. -// -// EXTERNAL TEST packages are those comprised of a set of *_test.go -// files all with the same 'package foo_test' declaration, all in the -// same directory. (go/build.Package calls these files XTestFiles.) -// -// An IMPORTABLE package is one that can be referred to by some import -// spec. Every importable package is uniquely identified by its -// PACKAGE PATH or just PATH, a string such as "fmt", "encoding/json", -// or "cmd/vendor/golang.org/x/arch/x86/x86asm". A package path -// typically denotes a subdirectory of the workspace. -// -// An import declaration uses an IMPORT PATH to refer to a package. -// Most import declarations use the package path as the import path. -// -// Due to VENDORING (https://golang.org/s/go15vendor), the -// interpretation of an import path may depend on the directory in which -// it appears. To resolve an import path to a package path, go/build -// must search the enclosing directories for a subdirectory named -// "vendor". -// -// ad hoc packages and external test packages are NON-IMPORTABLE. The -// path of an ad hoc package is inferred from the package -// declarations of its files and is therefore not a unique package key. -// For example, Config.CreatePkgs may specify two initial ad hoc -// packages, both with path "main". -// -// An AUGMENTED package is an importable package P plus all the -// *_test.go files with same 'package foo' declaration as P. -// (go/build.Package calls these files TestFiles.) -// -// The INITIAL packages are those specified in the configuration. A -// DEPENDENCY is a package loaded to satisfy an import in an initial -// package or another dependency. -// -package loader - -// IMPLEMENTATION NOTES -// -// 'go test', in-package test files, and import cycles -// --------------------------------------------------- -// -// An external test package may depend upon members of the augmented -// package that are not in the unaugmented package, such as functions -// that expose internals. (See bufio/export_test.go for an example.) -// So, the loader must ensure that for each external test package -// it loads, it also augments the corresponding non-test package. -// -// The import graph over n unaugmented packages must be acyclic; the -// import graph over n-1 unaugmented packages plus one augmented -// package must also be acyclic. ('go test' relies on this.) But the -// import graph over n augmented packages may contain cycles. -// -// First, all the (unaugmented) non-test packages and their -// dependencies are imported in the usual way; the loader reports an -// error if it detects an import cycle. -// -// Then, each package P for which testing is desired is augmented by -// the list P' of its in-package test files, by calling -// (*types.Checker).Files. This arrangement ensures that P' may -// reference definitions within P, but P may not reference definitions -// within P'. Furthermore, P' may import any other package, including -// ones that depend upon P, without an import cycle error. -// -// Consider two packages A and B, both of which have lists of -// in-package test files we'll call A' and B', and which have the -// following import graph edges: -// B imports A -// B' imports A -// A' imports B -// This last edge would be expected to create an error were it not -// for the special type-checking discipline above. -// Cycles of size greater than two are possible. For example: -// compress/bzip2/bzip2_test.go (package bzip2) imports "io/ioutil" -// io/ioutil/tempfile_test.go (package ioutil) imports "regexp" -// regexp/exec_test.go (package regexp) imports "compress/bzip2" -// -// -// Concurrency -// ----------- -// -// Let us define the import dependency graph as follows. Each node is a -// list of files passed to (Checker).Files at once. Many of these lists -// are the production code of an importable Go package, so those nodes -// are labelled by the package's path. The remaining nodes are -// ad hoc packages and lists of in-package *_test.go files that augment -// an importable package; those nodes have no label. -// -// The edges of the graph represent import statements appearing within a -// file. An edge connects a node (a list of files) to the node it -// imports, which is importable and thus always labelled. -// -// Loading is controlled by this dependency graph. -// -// To reduce I/O latency, we start loading a package's dependencies -// asynchronously as soon as we've parsed its files and enumerated its -// imports (scanImports). This performs a preorder traversal of the -// import dependency graph. -// -// To exploit hardware parallelism, we type-check unrelated packages in -// parallel, where "unrelated" means not ordered by the partial order of -// the import dependency graph. -// -// We use a concurrency-safe non-blocking cache (importer.imported) to -// record the results of type-checking, whether success or failure. An -// entry is created in this cache by startLoad the first time the -// package is imported. The first goroutine to request an entry becomes -// responsible for completing the task and broadcasting completion to -// subsequent requestors, which block until then. -// -// Type checking occurs in (parallel) postorder: we cannot type-check a -// set of files until we have loaded and type-checked all of their -// immediate dependencies (and thus all of their transitive -// dependencies). If the input were guaranteed free of import cycles, -// this would be trivial: we could simply wait for completion of the -// dependencies and then invoke the typechecker. -// -// But as we saw in the 'go test' section above, some cycles in the -// import graph over packages are actually legal, so long as the -// cycle-forming edge originates in the in-package test files that -// augment the package. This explains why the nodes of the import -// dependency graph are not packages, but lists of files: the unlabelled -// nodes avoid the cycles. Consider packages A and B where B imports A -// and A's in-package tests AT import B. The naively constructed import -// graph over packages would contain a cycle (A+AT) --> B --> (A+AT) but -// the graph over lists of files is AT --> B --> A, where AT is an -// unlabelled node. -// -// Awaiting completion of the dependencies in a cyclic graph would -// deadlock, so we must materialize the import dependency graph (as -// importer.graph) and check whether each import edge forms a cycle. If -// x imports y, and the graph already contains a path from y to x, then -// there is an import cycle, in which case the processing of x must not -// wait for the completion of processing of y. -// -// When the type-checker makes a callback (doImport) to the loader for a -// given import edge, there are two possible cases. In the normal case, -// the dependency has already been completely type-checked; doImport -// does a cache lookup and returns it. In the cyclic case, the entry in -// the cache is still necessarily incomplete, indicating a cycle. We -// perform the cycle check again to obtain the error message, and return -// the error. -// -// The result of using concurrency is about a 2.5x speedup for stdlib_test. - -// TODO(adonovan): overhaul the package documentation. diff --git a/vendor/golang.org/x/tools/go/loader/loader.go b/vendor/golang.org/x/tools/go/loader/loader.go deleted file mode 100644 index de756f7..0000000 --- a/vendor/golang.org/x/tools/go/loader/loader.go +++ /dev/null @@ -1,1077 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package loader - -// See doc.go for package documentation and implementation notes. - -import ( - "errors" - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/token" - "go/types" - "os" - "path/filepath" - "sort" - "strings" - "sync" - "time" - - "golang.org/x/tools/go/ast/astutil" -) - -var ignoreVendor build.ImportMode - -const trace = false // show timing info for type-checking - -// Config specifies the configuration for loading a whole program from -// Go source code. -// The zero value for Config is a ready-to-use default configuration. -type Config struct { - // Fset is the file set for the parser to use when loading the - // program. If nil, it may be lazily initialized by any - // method of Config. - Fset *token.FileSet - - // ParserMode specifies the mode to be used by the parser when - // loading source packages. - ParserMode parser.Mode - - // TypeChecker contains options relating to the type checker. - // - // The supplied IgnoreFuncBodies is not used; the effective - // value comes from the TypeCheckFuncBodies func below. - // The supplied Import function is not used either. - TypeChecker types.Config - - // TypeCheckFuncBodies is a predicate over package paths. - // A package for which the predicate is false will - // have its package-level declarations type checked, but not - // its function bodies; this can be used to quickly load - // dependencies from source. If nil, all func bodies are type - // checked. - TypeCheckFuncBodies func(path string) bool - - // If Build is non-nil, it is used to locate source packages. - // Otherwise &build.Default is used. - // - // By default, cgo is invoked to preprocess Go files that - // import the fake package "C". This behaviour can be - // disabled by setting CGO_ENABLED=0 in the environment prior - // to startup, or by setting Build.CgoEnabled=false. - Build *build.Context - - // The current directory, used for resolving relative package - // references such as "./go/loader". If empty, os.Getwd will be - // used instead. - Cwd string - - // If DisplayPath is non-nil, it is used to transform each - // file name obtained from Build.Import(). This can be used - // to prevent a virtualized build.Config's file names from - // leaking into the user interface. - DisplayPath func(path string) string - - // If AllowErrors is true, Load will return a Program even - // if some of the its packages contained I/O, parser or type - // errors; such errors are accessible via PackageInfo.Errors. If - // false, Load will fail if any package had an error. - AllowErrors bool - - // CreatePkgs specifies a list of non-importable initial - // packages to create. The resulting packages will appear in - // the corresponding elements of the Program.Created slice. - CreatePkgs []PkgSpec - - // ImportPkgs specifies a set of initial packages to load. - // The map keys are package paths. - // - // The map value indicates whether to load tests. If true, Load - // will add and type-check two lists of files to the package: - // non-test files followed by in-package *_test.go files. In - // addition, it will append the external test package (if any) - // to Program.Created. - ImportPkgs map[string]bool - - // FindPackage is called during Load to create the build.Package - // for a given import path from a given directory. - // If FindPackage is nil, (*build.Context).Import is used. - // A client may use this hook to adapt to a proprietary build - // system that does not follow the "go build" layout - // conventions, for example. - // - // It must be safe to call concurrently from multiple goroutines. - FindPackage func(ctxt *build.Context, importPath, fromDir string, mode build.ImportMode) (*build.Package, error) - - // AfterTypeCheck is called immediately after a list of files - // has been type-checked and appended to info.Files. - // - // This optional hook function is the earliest opportunity for - // the client to observe the output of the type checker, - // which may be useful to reduce analysis latency when loading - // a large program. - // - // The function is permitted to modify info.Info, for instance - // to clear data structures that are no longer needed, which can - // dramatically reduce peak memory consumption. - // - // The function may be called twice for the same PackageInfo: - // once for the files of the package and again for the - // in-package test files. - // - // It must be safe to call concurrently from multiple goroutines. - AfterTypeCheck func(info *PackageInfo, files []*ast.File) -} - -// A PkgSpec specifies a non-importable package to be created by Load. -// Files are processed first, but typically only one of Files and -// Filenames is provided. The path needn't be globally unique. -// -// For vendoring purposes, the package's directory is the one that -// contains the first file. -type PkgSpec struct { - Path string // package path ("" => use package declaration) - Files []*ast.File // ASTs of already-parsed files - Filenames []string // names of files to be parsed -} - -// A Program is a Go program loaded from source as specified by a Config. -type Program struct { - Fset *token.FileSet // the file set for this program - - // Created[i] contains the initial package whose ASTs or - // filenames were supplied by Config.CreatePkgs[i], followed by - // the external test package, if any, of each package in - // Config.ImportPkgs ordered by ImportPath. - // - // NOTE: these files must not import "C". Cgo preprocessing is - // only performed on imported packages, not ad hoc packages. - // - // TODO(adonovan): we need to copy and adapt the logic of - // goFilesPackage (from $GOROOT/src/cmd/go/build.go) and make - // Config.Import and Config.Create methods return the same kind - // of entity, essentially a build.Package. - // Perhaps we can even reuse that type directly. - Created []*PackageInfo - - // Imported contains the initially imported packages, - // as specified by Config.ImportPkgs. - Imported map[string]*PackageInfo - - // AllPackages contains the PackageInfo of every package - // encountered by Load: all initial packages and all - // dependencies, including incomplete ones. - AllPackages map[*types.Package]*PackageInfo - - // importMap is the canonical mapping of package paths to - // packages. It contains all Imported initial packages, but not - // Created ones, and all imported dependencies. - importMap map[string]*types.Package -} - -// PackageInfo holds the ASTs and facts derived by the type-checker -// for a single package. -// -// Not mutated once exposed via the API. -// -type PackageInfo struct { - Pkg *types.Package - Importable bool // true if 'import "Pkg.Path()"' would resolve to this - TransitivelyErrorFree bool // true if Pkg and all its dependencies are free of errors - Files []*ast.File // syntax trees for the package's files - Errors []error // non-nil if the package had errors - types.Info // type-checker deductions. - dir string // package directory - - checker *types.Checker // transient type-checker state - errorFunc func(error) -} - -func (info *PackageInfo) String() string { return info.Pkg.Path() } - -func (info *PackageInfo) appendError(err error) { - if info.errorFunc != nil { - info.errorFunc(err) - } else { - fmt.Fprintln(os.Stderr, err) - } - info.Errors = append(info.Errors, err) -} - -func (conf *Config) fset() *token.FileSet { - if conf.Fset == nil { - conf.Fset = token.NewFileSet() - } - return conf.Fset -} - -// ParseFile is a convenience function (intended for testing) that invokes -// the parser using the Config's FileSet, which is initialized if nil. -// -// src specifies the parser input as a string, []byte, or io.Reader, and -// filename is its apparent name. If src is nil, the contents of -// filename are read from the file system. -// -func (conf *Config) ParseFile(filename string, src interface{}) (*ast.File, error) { - // TODO(adonovan): use conf.build() etc like parseFiles does. - return parser.ParseFile(conf.fset(), filename, src, conf.ParserMode) -} - -// FromArgsUsage is a partial usage message that applications calling -// FromArgs may wish to include in their -help output. -const FromArgsUsage = ` - is a list of arguments denoting a set of initial packages. -It may take one of two forms: - -1. A list of *.go source files. - - All of the specified files are loaded, parsed and type-checked - as a single package. All the files must belong to the same directory. - -2. A list of import paths, each denoting a package. - - The package's directory is found relative to the $GOROOT and - $GOPATH using similar logic to 'go build', and the *.go files in - that directory are loaded, parsed and type-checked as a single - package. - - In addition, all *_test.go files in the directory are then loaded - and parsed. Those files whose package declaration equals that of - the non-*_test.go files are included in the primary package. Test - files whose package declaration ends with "_test" are type-checked - as another package, the 'external' test package, so that a single - import path may denote two packages. (Whether this behaviour is - enabled is tool-specific, and may depend on additional flags.) - -A '--' argument terminates the list of packages. -` - -// FromArgs interprets args as a set of initial packages to load from -// source and updates the configuration. It returns the list of -// unconsumed arguments. -// -// It is intended for use in command-line interfaces that require a -// set of initial packages to be specified; see FromArgsUsage message -// for details. -// -// Only superficial errors are reported at this stage; errors dependent -// on I/O are detected during Load. -// -func (conf *Config) FromArgs(args []string, xtest bool) ([]string, error) { - var rest []string - for i, arg := range args { - if arg == "--" { - rest = args[i+1:] - args = args[:i] - break // consume "--" and return the remaining args - } - } - - if len(args) > 0 && strings.HasSuffix(args[0], ".go") { - // Assume args is a list of a *.go files - // denoting a single ad hoc package. - for _, arg := range args { - if !strings.HasSuffix(arg, ".go") { - return nil, fmt.Errorf("named files must be .go files: %s", arg) - } - } - conf.CreateFromFilenames("", args...) - } else { - // Assume args are directories each denoting a - // package and (perhaps) an external test, iff xtest. - for _, arg := range args { - if xtest { - conf.ImportWithTests(arg) - } else { - conf.Import(arg) - } - } - } - - return rest, nil -} - -// CreateFromFilenames is a convenience function that adds -// a conf.CreatePkgs entry to create a package of the specified *.go -// files. -// -func (conf *Config) CreateFromFilenames(path string, filenames ...string) { - conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Filenames: filenames}) -} - -// CreateFromFiles is a convenience function that adds a conf.CreatePkgs -// entry to create package of the specified path and parsed files. -// -func (conf *Config) CreateFromFiles(path string, files ...*ast.File) { - conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Files: files}) -} - -// ImportWithTests is a convenience function that adds path to -// ImportPkgs, the set of initial source packages located relative to -// $GOPATH. The package will be augmented by any *_test.go files in -// its directory that contain a "package x" (not "package x_test") -// declaration. -// -// In addition, if any *_test.go files contain a "package x_test" -// declaration, an additional package comprising just those files will -// be added to CreatePkgs. -// -func (conf *Config) ImportWithTests(path string) { conf.addImport(path, true) } - -// Import is a convenience function that adds path to ImportPkgs, the -// set of initial packages that will be imported from source. -// -func (conf *Config) Import(path string) { conf.addImport(path, false) } - -func (conf *Config) addImport(path string, tests bool) { - if path == "C" { - return // ignore; not a real package - } - if conf.ImportPkgs == nil { - conf.ImportPkgs = make(map[string]bool) - } - conf.ImportPkgs[path] = conf.ImportPkgs[path] || tests -} - -// PathEnclosingInterval returns the PackageInfo and ast.Node that -// contain source interval [start, end), and all the node's ancestors -// up to the AST root. It searches all ast.Files of all packages in prog. -// exact is defined as for astutil.PathEnclosingInterval. -// -// The zero value is returned if not found. -// -func (prog *Program) PathEnclosingInterval(start, end token.Pos) (pkg *PackageInfo, path []ast.Node, exact bool) { - for _, info := range prog.AllPackages { - for _, f := range info.Files { - if f.Pos() == token.NoPos { - // This can happen if the parser saw - // too many errors and bailed out. - // (Use parser.AllErrors to prevent that.) - continue - } - if !tokenFileContainsPos(prog.Fset.File(f.Pos()), start) { - continue - } - if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil { - return info, path, exact - } - } - } - return nil, nil, false -} - -// InitialPackages returns a new slice containing the set of initial -// packages (Created + Imported) in unspecified order. -// -func (prog *Program) InitialPackages() []*PackageInfo { - infos := make([]*PackageInfo, 0, len(prog.Created)+len(prog.Imported)) - infos = append(infos, prog.Created...) - for _, info := range prog.Imported { - infos = append(infos, info) - } - return infos -} - -// Package returns the ASTs and results of type checking for the -// specified package. -func (prog *Program) Package(path string) *PackageInfo { - if info, ok := prog.AllPackages[prog.importMap[path]]; ok { - return info - } - for _, info := range prog.Created { - if path == info.Pkg.Path() { - return info - } - } - return nil -} - -// ---------- Implementation ---------- - -// importer holds the working state of the algorithm. -type importer struct { - conf *Config // the client configuration - start time.Time // for logging - - progMu sync.Mutex // guards prog - prog *Program // the resulting program - - // findpkg is a memoization of FindPackage. - findpkgMu sync.Mutex // guards findpkg - findpkg map[findpkgKey]*findpkgValue - - importedMu sync.Mutex // guards imported - imported map[string]*importInfo // all imported packages (incl. failures) by import path - - // import dependency graph: graph[x][y] => x imports y - // - // Since non-importable packages cannot be cyclic, we ignore - // their imports, thus we only need the subgraph over importable - // packages. Nodes are identified by their import paths. - graphMu sync.Mutex - graph map[string]map[string]bool -} - -type findpkgKey struct { - importPath string - fromDir string - mode build.ImportMode -} - -type findpkgValue struct { - ready chan struct{} // closed to broadcast readiness - bp *build.Package - err error -} - -// importInfo tracks the success or failure of a single import. -// -// Upon completion, exactly one of info and err is non-nil: -// info on successful creation of a package, err otherwise. -// A successful package may still contain type errors. -// -type importInfo struct { - path string // import path - info *PackageInfo // results of typechecking (including errors) - complete chan struct{} // closed to broadcast that info is set. -} - -// awaitCompletion blocks until ii is complete, -// i.e. the info field is safe to inspect. -func (ii *importInfo) awaitCompletion() { - <-ii.complete // wait for close -} - -// Complete marks ii as complete. -// Its info and err fields will not be subsequently updated. -func (ii *importInfo) Complete(info *PackageInfo) { - if info == nil { - panic("info == nil") - } - ii.info = info - close(ii.complete) -} - -type importError struct { - path string // import path - err error // reason for failure to create a package -} - -// Load creates the initial packages specified by conf.{Create,Import}Pkgs, -// loading their dependencies packages as needed. -// -// On success, Load returns a Program containing a PackageInfo for -// each package. On failure, it returns an error. -// -// If AllowErrors is true, Load will return a Program even if some -// packages contained I/O, parser or type errors, or if dependencies -// were missing. (Such errors are accessible via PackageInfo.Errors. If -// false, Load will fail if any package had an error. -// -// It is an error if no packages were loaded. -// -func (conf *Config) Load() (*Program, error) { - // Create a simple default error handler for parse/type errors. - if conf.TypeChecker.Error == nil { - conf.TypeChecker.Error = func(e error) { fmt.Fprintln(os.Stderr, e) } - } - - // Set default working directory for relative package references. - if conf.Cwd == "" { - var err error - conf.Cwd, err = os.Getwd() - if err != nil { - return nil, err - } - } - - // Install default FindPackage hook using go/build logic. - if conf.FindPackage == nil { - conf.FindPackage = (*build.Context).Import - } - - prog := &Program{ - Fset: conf.fset(), - Imported: make(map[string]*PackageInfo), - importMap: make(map[string]*types.Package), - AllPackages: make(map[*types.Package]*PackageInfo), - } - - imp := importer{ - conf: conf, - prog: prog, - findpkg: make(map[findpkgKey]*findpkgValue), - imported: make(map[string]*importInfo), - start: time.Now(), - graph: make(map[string]map[string]bool), - } - - // -- loading proper (concurrent phase) -------------------------------- - - var errpkgs []string // packages that contained errors - - // Load the initially imported packages and their dependencies, - // in parallel. - // No vendor check on packages imported from the command line. - infos, importErrors := imp.importAll("", conf.Cwd, conf.ImportPkgs, ignoreVendor) - for _, ie := range importErrors { - conf.TypeChecker.Error(ie.err) // failed to create package - errpkgs = append(errpkgs, ie.path) - } - for _, info := range infos { - prog.Imported[info.Pkg.Path()] = info - } - - // Augment the designated initial packages by their tests. - // Dependencies are loaded in parallel. - var xtestPkgs []*build.Package - for importPath, augment := range conf.ImportPkgs { - if !augment { - continue - } - - // No vendor check on packages imported from command line. - bp, err := imp.findPackage(importPath, conf.Cwd, ignoreVendor) - if err != nil { - // Package not found, or can't even parse package declaration. - // Already reported by previous loop; ignore it. - continue - } - - // Needs external test package? - if len(bp.XTestGoFiles) > 0 { - xtestPkgs = append(xtestPkgs, bp) - } - - // Consult the cache using the canonical package path. - path := bp.ImportPath - imp.importedMu.Lock() // (unnecessary, we're sequential here) - ii, ok := imp.imported[path] - // Paranoid checks added due to issue #11012. - if !ok { - // Unreachable. - // The previous loop called importAll and thus - // startLoad for each path in ImportPkgs, which - // populates imp.imported[path] with a non-zero value. - panic(fmt.Sprintf("imported[%q] not found", path)) - } - if ii == nil { - // Unreachable. - // The ii values in this loop are the same as in - // the previous loop, which enforced the invariant - // that at least one of ii.err and ii.info is non-nil. - panic(fmt.Sprintf("imported[%q] == nil", path)) - } - if ii.info == nil { - // Unreachable. - // awaitCompletion has the postcondition - // ii.info != nil. - panic(fmt.Sprintf("imported[%q].info = nil", path)) - } - info := ii.info - imp.importedMu.Unlock() - - // Parse the in-package test files. - files, errs := imp.conf.parsePackageFiles(bp, 't') - for _, err := range errs { - info.appendError(err) - } - - // The test files augmenting package P cannot be imported, - // but may import packages that import P, - // so we must disable the cycle check. - imp.addFiles(info, files, false) - } - - createPkg := func(path, dir string, files []*ast.File, errs []error) { - info := imp.newPackageInfo(path, dir) - for _, err := range errs { - info.appendError(err) - } - - // Ad hoc packages are non-importable, - // so no cycle check is needed. - // addFiles loads dependencies in parallel. - imp.addFiles(info, files, false) - prog.Created = append(prog.Created, info) - } - - // Create packages specified by conf.CreatePkgs. - for _, cp := range conf.CreatePkgs { - files, errs := parseFiles(conf.fset(), conf.build(), nil, conf.Cwd, cp.Filenames, conf.ParserMode) - files = append(files, cp.Files...) - - path := cp.Path - if path == "" { - if len(files) > 0 { - path = files[0].Name.Name - } else { - path = "(unnamed)" - } - } - - dir := conf.Cwd - if len(files) > 0 && files[0].Pos().IsValid() { - dir = filepath.Dir(conf.fset().File(files[0].Pos()).Name()) - } - createPkg(path, dir, files, errs) - } - - // Create external test packages. - sort.Sort(byImportPath(xtestPkgs)) - for _, bp := range xtestPkgs { - files, errs := imp.conf.parsePackageFiles(bp, 'x') - createPkg(bp.ImportPath+"_test", bp.Dir, files, errs) - } - - // -- finishing up (sequential) ---------------------------------------- - - if len(prog.Imported)+len(prog.Created) == 0 { - return nil, errors.New("no initial packages were loaded") - } - - // Create infos for indirectly imported packages. - // e.g. incomplete packages without syntax, loaded from export data. - for _, obj := range prog.importMap { - info := prog.AllPackages[obj] - if info == nil { - prog.AllPackages[obj] = &PackageInfo{Pkg: obj, Importable: true} - } else { - // finished - info.checker = nil - info.errorFunc = nil - } - } - - if !conf.AllowErrors { - // Report errors in indirectly imported packages. - for _, info := range prog.AllPackages { - if len(info.Errors) > 0 { - errpkgs = append(errpkgs, info.Pkg.Path()) - } - } - if errpkgs != nil { - var more string - if len(errpkgs) > 3 { - more = fmt.Sprintf(" and %d more", len(errpkgs)-3) - errpkgs = errpkgs[:3] - } - return nil, fmt.Errorf("couldn't load packages due to errors: %s%s", - strings.Join(errpkgs, ", "), more) - } - } - - markErrorFreePackages(prog.AllPackages) - - return prog, nil -} - -type byImportPath []*build.Package - -func (b byImportPath) Len() int { return len(b) } -func (b byImportPath) Less(i, j int) bool { return b[i].ImportPath < b[j].ImportPath } -func (b byImportPath) Swap(i, j int) { b[i], b[j] = b[j], b[i] } - -// markErrorFreePackages sets the TransitivelyErrorFree flag on all -// applicable packages. -func markErrorFreePackages(allPackages map[*types.Package]*PackageInfo) { - // Build the transpose of the import graph. - importedBy := make(map[*types.Package]map[*types.Package]bool) - for P := range allPackages { - for _, Q := range P.Imports() { - clients, ok := importedBy[Q] - if !ok { - clients = make(map[*types.Package]bool) - importedBy[Q] = clients - } - clients[P] = true - } - } - - // Find all packages reachable from some error package. - reachable := make(map[*types.Package]bool) - var visit func(*types.Package) - visit = func(p *types.Package) { - if !reachable[p] { - reachable[p] = true - for q := range importedBy[p] { - visit(q) - } - } - } - for _, info := range allPackages { - if len(info.Errors) > 0 { - visit(info.Pkg) - } - } - - // Mark the others as "transitively error-free". - for _, info := range allPackages { - if !reachable[info.Pkg] { - info.TransitivelyErrorFree = true - } - } -} - -// build returns the effective build context. -func (conf *Config) build() *build.Context { - if conf.Build != nil { - return conf.Build - } - return &build.Default -} - -// parsePackageFiles enumerates the files belonging to package path, -// then loads, parses and returns them, plus a list of I/O or parse -// errors that were encountered. -// -// 'which' indicates which files to include: -// 'g': include non-test *.go source files (GoFiles + processed CgoFiles) -// 't': include in-package *_test.go source files (TestGoFiles) -// 'x': include external *_test.go source files. (XTestGoFiles) -// -func (conf *Config) parsePackageFiles(bp *build.Package, which rune) ([]*ast.File, []error) { - if bp.ImportPath == "unsafe" { - return nil, nil - } - var filenames []string - switch which { - case 'g': - filenames = bp.GoFiles - case 't': - filenames = bp.TestGoFiles - case 'x': - filenames = bp.XTestGoFiles - default: - panic(which) - } - - files, errs := parseFiles(conf.fset(), conf.build(), conf.DisplayPath, bp.Dir, filenames, conf.ParserMode) - - // Preprocess CgoFiles and parse the outputs (sequentially). - if which == 'g' && bp.CgoFiles != nil { - cgofiles, err := processCgoFiles(bp, conf.fset(), conf.DisplayPath, conf.ParserMode) - if err != nil { - errs = append(errs, err) - } else { - files = append(files, cgofiles...) - } - } - - return files, errs -} - -// doImport imports the package denoted by path. -// It implements the types.Importer signature. -// -// It returns an error if a package could not be created -// (e.g. go/build or parse error), but type errors are reported via -// the types.Config.Error callback (the first of which is also saved -// in the package's PackageInfo). -// -// Idempotent. -// -func (imp *importer) doImport(from *PackageInfo, to string) (*types.Package, error) { - if to == "C" { - // This should be unreachable, but ad hoc packages are - // not currently subject to cgo preprocessing. - // See https://github.com/golang/go/issues/11627. - return nil, fmt.Errorf(`the loader doesn't cgo-process ad hoc packages like %q; see Go issue 11627`, - from.Pkg.Path()) - } - - bp, err := imp.findPackage(to, from.dir, 0) - if err != nil { - return nil, err - } - - // The standard unsafe package is handled specially, - // and has no PackageInfo. - if bp.ImportPath == "unsafe" { - return types.Unsafe, nil - } - - // Look for the package in the cache using its canonical path. - path := bp.ImportPath - imp.importedMu.Lock() - ii := imp.imported[path] - imp.importedMu.Unlock() - if ii == nil { - panic("internal error: unexpected import: " + path) - } - if ii.info != nil { - return ii.info.Pkg, nil - } - - // Import of incomplete package: this indicates a cycle. - fromPath := from.Pkg.Path() - if cycle := imp.findPath(path, fromPath); cycle != nil { - cycle = append([]string{fromPath}, cycle...) - return nil, fmt.Errorf("import cycle: %s", strings.Join(cycle, " -> ")) - } - - panic("internal error: import of incomplete (yet acyclic) package: " + fromPath) -} - -// findPackage locates the package denoted by the importPath in the -// specified directory. -func (imp *importer) findPackage(importPath, fromDir string, mode build.ImportMode) (*build.Package, error) { - // We use a non-blocking duplicate-suppressing cache (gopl.io §9.7) - // to avoid holding the lock around FindPackage. - key := findpkgKey{importPath, fromDir, mode} - imp.findpkgMu.Lock() - v, ok := imp.findpkg[key] - if ok { - // cache hit - imp.findpkgMu.Unlock() - - <-v.ready // wait for entry to become ready - } else { - // Cache miss: this goroutine becomes responsible for - // populating the map entry and broadcasting its readiness. - v = &findpkgValue{ready: make(chan struct{})} - imp.findpkg[key] = v - imp.findpkgMu.Unlock() - - ioLimit <- true - v.bp, v.err = imp.conf.FindPackage(imp.conf.build(), importPath, fromDir, mode) - <-ioLimit - - if _, ok := v.err.(*build.NoGoError); ok { - v.err = nil // empty directory is not an error - } - - close(v.ready) // broadcast ready condition - } - return v.bp, v.err -} - -// importAll loads, parses, and type-checks the specified packages in -// parallel and returns their completed importInfos in unspecified order. -// -// fromPath is the package path of the importing package, if it is -// importable, "" otherwise. It is used for cycle detection. -// -// fromDir is the directory containing the import declaration that -// caused these imports. -// -func (imp *importer) importAll(fromPath, fromDir string, imports map[string]bool, mode build.ImportMode) (infos []*PackageInfo, errors []importError) { - // TODO(adonovan): opt: do the loop in parallel once - // findPackage is non-blocking. - var pending []*importInfo - for importPath := range imports { - bp, err := imp.findPackage(importPath, fromDir, mode) - if err != nil { - errors = append(errors, importError{ - path: importPath, - err: err, - }) - continue - } - pending = append(pending, imp.startLoad(bp)) - } - - if fromPath != "" { - // We're loading a set of imports. - // - // We must record graph edges from the importing package - // to its dependencies, and check for cycles. - imp.graphMu.Lock() - deps, ok := imp.graph[fromPath] - if !ok { - deps = make(map[string]bool) - imp.graph[fromPath] = deps - } - for _, ii := range pending { - deps[ii.path] = true - } - imp.graphMu.Unlock() - } - - for _, ii := range pending { - if fromPath != "" { - if cycle := imp.findPath(ii.path, fromPath); cycle != nil { - // Cycle-forming import: we must not await its - // completion since it would deadlock. - // - // We don't record the error in ii since - // the error is really associated with the - // cycle-forming edge, not the package itself. - // (Also it would complicate the - // invariants of importPath completion.) - if trace { - fmt.Fprintf(os.Stderr, "import cycle: %q\n", cycle) - } - continue - } - } - ii.awaitCompletion() - infos = append(infos, ii.info) - } - - return infos, errors -} - -// findPath returns an arbitrary path from 'from' to 'to' in the import -// graph, or nil if there was none. -func (imp *importer) findPath(from, to string) []string { - imp.graphMu.Lock() - defer imp.graphMu.Unlock() - - seen := make(map[string]bool) - var search func(stack []string, importPath string) []string - search = func(stack []string, importPath string) []string { - if !seen[importPath] { - seen[importPath] = true - stack = append(stack, importPath) - if importPath == to { - return stack - } - for x := range imp.graph[importPath] { - if p := search(stack, x); p != nil { - return p - } - } - } - return nil - } - return search(make([]string, 0, 20), from) -} - -// startLoad initiates the loading, parsing and type-checking of the -// specified package and its dependencies, if it has not already begun. -// -// It returns an importInfo, not necessarily in a completed state. The -// caller must call awaitCompletion() before accessing its info field. -// -// startLoad is concurrency-safe and idempotent. -// -func (imp *importer) startLoad(bp *build.Package) *importInfo { - path := bp.ImportPath - imp.importedMu.Lock() - ii, ok := imp.imported[path] - if !ok { - ii = &importInfo{path: path, complete: make(chan struct{})} - imp.imported[path] = ii - go func() { - info := imp.load(bp) - ii.Complete(info) - }() - } - imp.importedMu.Unlock() - - return ii -} - -// load implements package loading by parsing Go source files -// located by go/build. -func (imp *importer) load(bp *build.Package) *PackageInfo { - info := imp.newPackageInfo(bp.ImportPath, bp.Dir) - info.Importable = true - files, errs := imp.conf.parsePackageFiles(bp, 'g') - for _, err := range errs { - info.appendError(err) - } - - imp.addFiles(info, files, true) - - imp.progMu.Lock() - imp.prog.importMap[bp.ImportPath] = info.Pkg - imp.progMu.Unlock() - - return info -} - -// addFiles adds and type-checks the specified files to info, loading -// their dependencies if needed. The order of files determines the -// package initialization order. It may be called multiple times on the -// same package. Errors are appended to the info.Errors field. -// -// cycleCheck determines whether the imports within files create -// dependency edges that should be checked for potential cycles. -// -func (imp *importer) addFiles(info *PackageInfo, files []*ast.File, cycleCheck bool) { - // Ensure the dependencies are loaded, in parallel. - var fromPath string - if cycleCheck { - fromPath = info.Pkg.Path() - } - // TODO(adonovan): opt: make the caller do scanImports. - // Callers with a build.Package can skip it. - imp.importAll(fromPath, info.dir, scanImports(files), 0) - - if trace { - fmt.Fprintf(os.Stderr, "%s: start %q (%d)\n", - time.Since(imp.start), info.Pkg.Path(), len(files)) - } - - // Don't call checker.Files on Unsafe, even with zero files, - // because it would mutate the package, which is a global. - if info.Pkg == types.Unsafe { - if len(files) > 0 { - panic(`"unsafe" package contains unexpected files`) - } - } else { - // Ignore the returned (first) error since we - // already collect them all in the PackageInfo. - info.checker.Files(files) - info.Files = append(info.Files, files...) - } - - if imp.conf.AfterTypeCheck != nil { - imp.conf.AfterTypeCheck(info, files) - } - - if trace { - fmt.Fprintf(os.Stderr, "%s: stop %q\n", - time.Since(imp.start), info.Pkg.Path()) - } -} - -func (imp *importer) newPackageInfo(path, dir string) *PackageInfo { - var pkg *types.Package - if path == "unsafe" { - pkg = types.Unsafe - } else { - pkg = types.NewPackage(path, "") - } - info := &PackageInfo{ - Pkg: pkg, - Info: types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - }, - errorFunc: imp.conf.TypeChecker.Error, - dir: dir, - } - - // Copy the types.Config so we can vary it across PackageInfos. - tc := imp.conf.TypeChecker - tc.IgnoreFuncBodies = false - if f := imp.conf.TypeCheckFuncBodies; f != nil { - tc.IgnoreFuncBodies = !f(path) - } - tc.Importer = closure{imp, info} - tc.Error = info.appendError // appendError wraps the user's Error function - - info.checker = types.NewChecker(&tc, imp.conf.fset(), pkg, &info.Info) - imp.progMu.Lock() - imp.prog.AllPackages[pkg] = info - imp.progMu.Unlock() - return info -} - -type closure struct { - imp *importer - info *PackageInfo -} - -func (c closure) Import(to string) (*types.Package, error) { return c.imp.doImport(c.info, to) } diff --git a/vendor/golang.org/x/tools/go/loader/util.go b/vendor/golang.org/x/tools/go/loader/util.go deleted file mode 100644 index 7f38dd7..0000000 --- a/vendor/golang.org/x/tools/go/loader/util.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package loader - -import ( - "go/ast" - "go/build" - "go/parser" - "go/token" - "io" - "os" - "strconv" - "sync" - - "golang.org/x/tools/go/buildutil" -) - -// We use a counting semaphore to limit -// the number of parallel I/O calls per process. -var ioLimit = make(chan bool, 10) - -// parseFiles parses the Go source files within directory dir and -// returns the ASTs of the ones that could be at least partially parsed, -// along with a list of I/O and parse errors encountered. -// -// I/O is done via ctxt, which may specify a virtual file system. -// displayPath is used to transform the filenames attached to the ASTs. -// -func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, files []string, mode parser.Mode) ([]*ast.File, []error) { - if displayPath == nil { - displayPath = func(path string) string { return path } - } - var wg sync.WaitGroup - n := len(files) - parsed := make([]*ast.File, n) - errors := make([]error, n) - for i, file := range files { - if !buildutil.IsAbsPath(ctxt, file) { - file = buildutil.JoinPath(ctxt, dir, file) - } - wg.Add(1) - go func(i int, file string) { - ioLimit <- true // wait - defer func() { - wg.Done() - <-ioLimit // signal - }() - var rd io.ReadCloser - var err error - if ctxt.OpenFile != nil { - rd, err = ctxt.OpenFile(file) - } else { - rd, err = os.Open(file) - } - if err != nil { - errors[i] = err // open failed - return - } - - // ParseFile may return both an AST and an error. - parsed[i], errors[i] = parser.ParseFile(fset, displayPath(file), rd, mode) - rd.Close() - }(i, file) - } - wg.Wait() - - // Eliminate nils, preserving order. - var o int - for _, f := range parsed { - if f != nil { - parsed[o] = f - o++ - } - } - parsed = parsed[:o] - - o = 0 - for _, err := range errors { - if err != nil { - errors[o] = err - o++ - } - } - errors = errors[:o] - - return parsed, errors -} - -// scanImports returns the set of all import paths from all -// import specs in the specified files. -func scanImports(files []*ast.File) map[string]bool { - imports := make(map[string]bool) - for _, f := range files { - for _, decl := range f.Decls { - if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT { - for _, spec := range decl.Specs { - spec := spec.(*ast.ImportSpec) - - // NB: do not assume the program is well-formed! - path, err := strconv.Unquote(spec.Path.Value) - if err != nil { - continue // quietly ignore the error - } - if path == "C" { - continue // skip pseudopackage - } - imports[path] = true - } - } - } - } - return imports -} - -// ---------- Internal helpers ---------- - -// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos) -func tokenFileContainsPos(f *token.File, pos token.Pos) bool { - p := int(pos) - base := f.Base() - return base <= p && p < base+f.Size() -} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go deleted file mode 100644 index 9c441db..0000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/imports.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeutil - -import "go/types" - -// Dependencies returns all dependencies of the specified packages. -// -// Dependent packages appear in topological order: if package P imports -// package Q, Q appears earlier than P in the result. -// The algorithm follows import statements in the order they -// appear in the source code, so the result is a total order. -// -func Dependencies(pkgs ...*types.Package) []*types.Package { - var result []*types.Package - seen := make(map[*types.Package]bool) - var visit func(pkgs []*types.Package) - visit = func(pkgs []*types.Package) { - for _, p := range pkgs { - if !seen[p] { - seen[p] = true - visit(p.Imports()) - result = append(result, p) - } - } - } - visit(pkgs) - return result -} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go deleted file mode 100644 index c7f7545..0000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package typeutil defines various utilities for types, such as Map, -// a mapping from types.Type to interface{} values. -package typeutil // import "golang.org/x/tools/go/types/typeutil" - -import ( - "bytes" - "fmt" - "go/types" - "reflect" -) - -// Map is a hash-table-based mapping from types (types.Type) to -// arbitrary interface{} values. The concrete types that implement -// the Type interface are pointers. Since they are not canonicalized, -// == cannot be used to check for equivalence, and thus we cannot -// simply use a Go map. -// -// Just as with map[K]V, a nil *Map is a valid empty map. -// -// Not thread-safe. -// -type Map struct { - hasher Hasher // shared by many Maps - table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused - length int // number of map entries -} - -// entry is an entry (key/value association) in a hash bucket. -type entry struct { - key types.Type - value interface{} -} - -// SetHasher sets the hasher used by Map. -// -// All Hashers are functionally equivalent but contain internal state -// used to cache the results of hashing previously seen types. -// -// A single Hasher created by MakeHasher() may be shared among many -// Maps. This is recommended if the instances have many keys in -// common, as it will amortize the cost of hash computation. -// -// A Hasher may grow without bound as new types are seen. Even when a -// type is deleted from the map, the Hasher never shrinks, since other -// types in the map may reference the deleted type indirectly. -// -// Hashers are not thread-safe, and read-only operations such as -// Map.Lookup require updates to the hasher, so a full Mutex lock (not a -// read-lock) is require around all Map operations if a shared -// hasher is accessed from multiple threads. -// -// If SetHasher is not called, the Map will create a private hasher at -// the first call to Insert. -// -func (m *Map) SetHasher(hasher Hasher) { - m.hasher = hasher -} - -// Delete removes the entry with the given key, if any. -// It returns true if the entry was found. -// -func (m *Map) Delete(key types.Type) bool { - if m != nil && m.table != nil { - hash := m.hasher.Hash(key) - bucket := m.table[hash] - for i, e := range bucket { - if e.key != nil && types.Identical(key, e.key) { - // We can't compact the bucket as it - // would disturb iterators. - bucket[i] = entry{} - m.length-- - return true - } - } - } - return false -} - -// At returns the map entry for the given key. -// The result is nil if the entry is not present. -// -func (m *Map) At(key types.Type) interface{} { - if m != nil && m.table != nil { - for _, e := range m.table[m.hasher.Hash(key)] { - if e.key != nil && types.Identical(key, e.key) { - return e.value - } - } - } - return nil -} - -// Set sets the map entry for key to val, -// and returns the previous entry, if any. -func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) { - if m.table != nil { - hash := m.hasher.Hash(key) - bucket := m.table[hash] - var hole *entry - for i, e := range bucket { - if e.key == nil { - hole = &bucket[i] - } else if types.Identical(key, e.key) { - prev = e.value - bucket[i].value = value - return - } - } - - if hole != nil { - *hole = entry{key, value} // overwrite deleted entry - } else { - m.table[hash] = append(bucket, entry{key, value}) - } - } else { - if m.hasher.memo == nil { - m.hasher = MakeHasher() - } - hash := m.hasher.Hash(key) - m.table = map[uint32][]entry{hash: {entry{key, value}}} - } - - m.length++ - return -} - -// Len returns the number of map entries. -func (m *Map) Len() int { - if m != nil { - return m.length - } - return 0 -} - -// Iterate calls function f on each entry in the map in unspecified order. -// -// If f should mutate the map, Iterate provides the same guarantees as -// Go maps: if f deletes a map entry that Iterate has not yet reached, -// f will not be invoked for it, but if f inserts a map entry that -// Iterate has not yet reached, whether or not f will be invoked for -// it is unspecified. -// -func (m *Map) Iterate(f func(key types.Type, value interface{})) { - if m != nil { - for _, bucket := range m.table { - for _, e := range bucket { - if e.key != nil { - f(e.key, e.value) - } - } - } - } -} - -// Keys returns a new slice containing the set of map keys. -// The order is unspecified. -func (m *Map) Keys() []types.Type { - keys := make([]types.Type, 0, m.Len()) - m.Iterate(func(key types.Type, _ interface{}) { - keys = append(keys, key) - }) - return keys -} - -func (m *Map) toString(values bool) string { - if m == nil { - return "{}" - } - var buf bytes.Buffer - fmt.Fprint(&buf, "{") - sep := "" - m.Iterate(func(key types.Type, value interface{}) { - fmt.Fprint(&buf, sep) - sep = ", " - fmt.Fprint(&buf, key) - if values { - fmt.Fprintf(&buf, ": %q", value) - } - }) - fmt.Fprint(&buf, "}") - return buf.String() -} - -// String returns a string representation of the map's entries. -// Values are printed using fmt.Sprintf("%v", v). -// Order is unspecified. -// -func (m *Map) String() string { - return m.toString(true) -} - -// KeysString returns a string representation of the map's key set. -// Order is unspecified. -// -func (m *Map) KeysString() string { - return m.toString(false) -} - -//////////////////////////////////////////////////////////////////////// -// Hasher - -// A Hasher maps each type to its hash value. -// For efficiency, a hasher uses memoization; thus its memory -// footprint grows monotonically over time. -// Hashers are not thread-safe. -// Hashers have reference semantics. -// Call MakeHasher to create a Hasher. -type Hasher struct { - memo map[types.Type]uint32 -} - -// MakeHasher returns a new Hasher instance. -func MakeHasher() Hasher { - return Hasher{make(map[types.Type]uint32)} -} - -// Hash computes a hash value for the given type t such that -// Identical(t, t') => Hash(t) == Hash(t'). -func (h Hasher) Hash(t types.Type) uint32 { - hash, ok := h.memo[t] - if !ok { - hash = h.hashFor(t) - h.memo[t] = hash - } - return hash -} - -// hashString computes the Fowler–Noll–Vo hash of s. -func hashString(s string) uint32 { - var h uint32 - for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -// hashFor computes the hash of t. -func (h Hasher) hashFor(t types.Type) uint32 { - // See Identical for rationale. - switch t := t.(type) { - case *types.Basic: - return uint32(t.Kind()) - - case *types.Array: - return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) - - case *types.Slice: - return 9049 + 2*h.Hash(t.Elem()) - - case *types.Struct: - var hash uint32 = 9059 - for i, n := 0, t.NumFields(); i < n; i++ { - f := t.Field(i) - if f.Anonymous() { - hash += 8861 - } - hash += hashString(t.Tag(i)) - hash += hashString(f.Name()) // (ignore f.Pkg) - hash += h.Hash(f.Type()) - } - return hash - - case *types.Pointer: - return 9067 + 2*h.Hash(t.Elem()) - - case *types.Signature: - var hash uint32 = 9091 - if t.Variadic() { - hash *= 8863 - } - return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) - - case *types.Interface: - var hash uint32 = 9103 - for i, n := 0, t.NumMethods(); i < n; i++ { - // See go/types.identicalMethods for rationale. - // Method order is not significant. - // Ignore m.Pkg(). - m := t.Method(i) - hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type()) - } - return hash - - case *types.Map: - return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) - - case *types.Chan: - return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) - - case *types.Named: - // Not safe with a copying GC; objects may move. - return uint32(reflect.ValueOf(t.Obj()).Pointer()) - - case *types.Tuple: - return h.hashTuple(t) - } - panic(t) -} - -func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { - // See go/types.identicalTypes for rationale. - n := tuple.Len() - var hash uint32 = 9137 + 2*uint32(n) - for i := 0; i < n; i++ { - hash += 3 * h.Hash(tuple.At(i).Type()) - } - return hash -} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go deleted file mode 100644 index 3208461..0000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file implements a cache of method sets. - -package typeutil - -import ( - "go/types" - "sync" -) - -// A MethodSetCache records the method set of each type T for which -// MethodSet(T) is called so that repeat queries are fast. -// The zero value is a ready-to-use cache instance. -type MethodSetCache struct { - mu sync.Mutex - named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N - others map[types.Type]*types.MethodSet // all other types -} - -// MethodSet returns the method set of type T. It is thread-safe. -// -// If cache is nil, this function is equivalent to types.NewMethodSet(T). -// Utility functions can thus expose an optional *MethodSetCache -// parameter to clients that care about performance. -// -func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { - if cache == nil { - return types.NewMethodSet(T) - } - cache.mu.Lock() - defer cache.mu.Unlock() - - switch T := T.(type) { - case *types.Named: - return cache.lookupNamed(T).value - - case *types.Pointer: - if N, ok := T.Elem().(*types.Named); ok { - return cache.lookupNamed(N).pointer - } - } - - // all other types - // (The map uses pointer equivalence, not type identity.) - mset := cache.others[T] - if mset == nil { - mset = types.NewMethodSet(T) - if cache.others == nil { - cache.others = make(map[types.Type]*types.MethodSet) - } - cache.others[T] = mset - } - return mset -} - -func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { - if cache.named == nil { - cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) - } - // Avoid recomputing mset(*T) for each distinct Pointer - // instance whose underlying type is a named type. - msets, ok := cache.named[named] - if !ok { - msets.value = types.NewMethodSet(named) - msets.pointer = types.NewMethodSet(types.NewPointer(named)) - cache.named[named] = msets - } - return msets -} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go deleted file mode 100644 index 9849c24..0000000 --- a/vendor/golang.org/x/tools/go/types/typeutil/ui.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeutil - -// This file defines utilities for user interfaces that display types. - -import "go/types" - -// IntuitiveMethodSet returns the intuitive method set of a type T, -// which is the set of methods you can call on an addressable value of -// that type. -// -// The result always contains MethodSet(T), and is exactly MethodSet(T) -// for interface types and for pointer-to-concrete types. -// For all other concrete types T, the result additionally -// contains each method belonging to *T if there is no identically -// named method on T itself. -// -// This corresponds to user intuition about method sets; -// this function is intended only for user interfaces. -// -// The order of the result is as for types.MethodSet(T). -// -func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { - isPointerToConcrete := func(T types.Type) bool { - ptr, ok := T.(*types.Pointer) - return ok && !types.IsInterface(ptr.Elem()) - } - - var result []*types.Selection - mset := msets.MethodSet(T) - if types.IsInterface(T) || isPointerToConcrete(T) { - for i, n := 0, mset.Len(); i < n; i++ { - result = append(result, mset.At(i)) - } - } else { - // T is some other concrete type. - // Report methods of T and *T, preferring those of T. - pmset := msets.MethodSet(types.NewPointer(T)) - for i, n := 0, pmset.Len(); i < n; i++ { - meth := pmset.At(i) - if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { - meth = m - } - result = append(result, meth) - } - - } - return result -} diff --git a/vendor/golang.org/x/tools/refactor/importgraph/graph.go b/vendor/golang.org/x/tools/refactor/importgraph/graph.go deleted file mode 100644 index d2d8f09..0000000 --- a/vendor/golang.org/x/tools/refactor/importgraph/graph.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package importgraph computes the forward and reverse import -// dependency graphs for all packages in a Go workspace. -package importgraph // import "golang.org/x/tools/refactor/importgraph" - -import ( - "go/build" - "sync" - - "golang.org/x/tools/go/buildutil" -) - -// A Graph is an import dependency graph, either forward or reverse. -// -// The graph maps each node (a package import path) to the set of its -// successors in the graph. For a forward graph, this is the set of -// imported packages (prerequisites); for a reverse graph, it is the set -// of importing packages (clients). -// -// Graph construction inspects all imports in each package's directory, -// including those in _test.go files, so the resulting graph may be cyclic. -type Graph map[string]map[string]bool - -func (g Graph) addEdge(from, to string) { - edges := g[from] - if edges == nil { - edges = make(map[string]bool) - g[from] = edges - } - edges[to] = true -} - -// Search returns all the nodes of the graph reachable from -// any of the specified roots, by following edges forwards. -// Relationally, this is the reflexive transitive closure. -func (g Graph) Search(roots ...string) map[string]bool { - seen := make(map[string]bool) - var visit func(x string) - visit = func(x string) { - if !seen[x] { - seen[x] = true - for y := range g[x] { - visit(y) - } - } - } - for _, root := range roots { - visit(root) - } - return seen -} - -// Build scans the specified Go workspace and builds the forward and -// reverse import dependency graphs for all its packages. -// It also returns a mapping from canonical import paths to errors for packages -// whose loading was not entirely successful. -// A package may appear in the graph and in the errors mapping. -// All package paths are canonical and may contain "/vendor/". -func Build(ctxt *build.Context) (forward, reverse Graph, errors map[string]error) { - type importEdge struct { - from, to string - } - type pathError struct { - path string - err error - } - - ch := make(chan interface{}) - - go func() { - sema := make(chan int, 20) // I/O concurrency limiting semaphore - var wg sync.WaitGroup - buildutil.ForEachPackage(ctxt, func(path string, err error) { - if err != nil { - ch <- pathError{path, err} - return - } - - wg.Add(1) - go func() { - defer wg.Done() - - sema <- 1 - bp, err := ctxt.Import(path, "", 0) - <-sema - - if err != nil { - if _, ok := err.(*build.NoGoError); ok { - // empty directory is not an error - } else { - ch <- pathError{path, err} - } - // Even in error cases, Import usually returns a package. - } - - // absolutize resolves an import path relative - // to the current package bp. - // The absolute form may contain "vendor". - // - // The vendoring feature slows down Build by 3×. - // Here are timings from a 1400 package workspace: - // 1100ms: current code (with vendor check) - // 880ms: with a nonblocking cache around ctxt.IsDir - // 840ms: nonblocking cache with duplicate suppression - // 340ms: original code (no vendor check) - // TODO(adonovan): optimize, somehow. - memo := make(map[string]string) - absolutize := func(path string) string { - canon, ok := memo[path] - if !ok { - sema <- 1 - bp2, _ := ctxt.Import(path, bp.Dir, build.FindOnly) - <-sema - - if bp2 != nil { - canon = bp2.ImportPath - } else { - canon = path - } - memo[path] = canon - } - return canon - } - - if bp != nil { - for _, imp := range bp.Imports { - ch <- importEdge{path, absolutize(imp)} - } - for _, imp := range bp.TestImports { - ch <- importEdge{path, absolutize(imp)} - } - for _, imp := range bp.XTestImports { - ch <- importEdge{path, absolutize(imp)} - } - } - - }() - }) - wg.Wait() - close(ch) - }() - - forward = make(Graph) - reverse = make(Graph) - - for e := range ch { - switch e := e.(type) { - case pathError: - if errors == nil { - errors = make(map[string]error) - } - errors[e.path] = e.err - - case importEdge: - if e.to == "C" { - continue // "C" is fake - } - forward.addEdge(e.from, e.to) - reverse.addEdge(e.to, e.from) - } - } - - return forward, reverse, errors -} diff --git a/vendor/golang.org/x/tools/refactor/rename/check.go b/vendor/golang.org/x/tools/refactor/rename/check.go deleted file mode 100644 index 838fc7b..0000000 --- a/vendor/golang.org/x/tools/refactor/rename/check.go +++ /dev/null @@ -1,858 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package rename - -// This file defines the safety checks for each kind of renaming. - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" - - "golang.org/x/tools/go/loader" - "golang.org/x/tools/refactor/satisfy" -) - -// errorf reports an error (e.g. conflict) and prevents file modification. -func (r *renamer) errorf(pos token.Pos, format string, args ...interface{}) { - r.hadConflicts = true - reportError(r.iprog.Fset.Position(pos), fmt.Sprintf(format, args...)) -} - -// check performs safety checks of the renaming of the 'from' object to r.to. -func (r *renamer) check(from types.Object) { - if r.objsToUpdate[from] { - return - } - r.objsToUpdate[from] = true - - // NB: order of conditions is important. - if from_, ok := from.(*types.PkgName); ok { - r.checkInFileBlock(from_) - } else if from_, ok := from.(*types.Label); ok { - r.checkLabel(from_) - } else if isPackageLevel(from) { - r.checkInPackageBlock(from) - } else if v, ok := from.(*types.Var); ok && v.IsField() { - r.checkStructField(v) - } else if f, ok := from.(*types.Func); ok && recv(f) != nil { - r.checkMethod(f) - } else if isLocal(from) { - r.checkInLocalScope(from) - } else { - r.errorf(from.Pos(), "unexpected %s object %q (please report a bug)\n", - objectKind(from), from) - } -} - -// checkInFileBlock performs safety checks for renames of objects in the file block, -// i.e. imported package names. -func (r *renamer) checkInFileBlock(from *types.PkgName) { - // Check import name is not "init". - if r.to == "init" { - r.errorf(from.Pos(), "%q is not a valid imported package name", r.to) - } - - // Check for conflicts between file and package block. - if prev := from.Pkg().Scope().Lookup(r.to); prev != nil { - r.errorf(from.Pos(), "renaming this %s %q to %q would conflict", - objectKind(from), from.Name(), r.to) - r.errorf(prev.Pos(), "\twith this package member %s", - objectKind(prev)) - return // since checkInPackageBlock would report redundant errors - } - - // Check for conflicts in lexical scope. - r.checkInLexicalScope(from, r.packages[from.Pkg()]) - - // Finally, modify ImportSpec syntax to add or remove the Name as needed. - info, path, _ := r.iprog.PathEnclosingInterval(from.Pos(), from.Pos()) - if from.Imported().Name() == r.to { - // ImportSpec.Name not needed - path[1].(*ast.ImportSpec).Name = nil - } else { - // ImportSpec.Name needed - if spec := path[1].(*ast.ImportSpec); spec.Name == nil { - spec.Name = &ast.Ident{NamePos: spec.Path.Pos(), Name: r.to} - info.Defs[spec.Name] = from - } - } -} - -// checkInPackageBlock performs safety checks for renames of -// func/var/const/type objects in the package block. -func (r *renamer) checkInPackageBlock(from types.Object) { - // Check that there are no references to the name from another - // package if the renaming would make it unexported. - if ast.IsExported(from.Name()) && !ast.IsExported(r.to) { - for pkg, info := range r.packages { - if pkg == from.Pkg() { - continue - } - if id := someUse(info, from); id != nil && - !r.checkExport(id, pkg, from) { - break - } - } - } - - info := r.packages[from.Pkg()] - - // Check that in the package block, "init" is a function, and never referenced. - if r.to == "init" { - kind := objectKind(from) - if kind == "func" { - // Reject if intra-package references to it exist. - for id, obj := range info.Uses { - if obj == from { - r.errorf(from.Pos(), - "renaming this func %q to %q would make it a package initializer", - from.Name(), r.to) - r.errorf(id.Pos(), "\tbut references to it exist") - break - } - } - } else { - r.errorf(from.Pos(), "you cannot have a %s at package level named %q", - kind, r.to) - } - } - - // Check for conflicts between package block and all file blocks. - for _, f := range info.Files { - fileScope := info.Info.Scopes[f] - b, prev := fileScope.LookupParent(r.to, token.NoPos) - if b == fileScope { - r.errorf(from.Pos(), "renaming this %s %q to %q would conflict", - objectKind(from), from.Name(), r.to) - r.errorf(prev.Pos(), "\twith this %s", - objectKind(prev)) - return // since checkInPackageBlock would report redundant errors - } - } - - // Check for conflicts in lexical scope. - if from.Exported() { - for _, info := range r.packages { - r.checkInLexicalScope(from, info) - } - } else { - r.checkInLexicalScope(from, info) - } -} - -func (r *renamer) checkInLocalScope(from types.Object) { - info := r.packages[from.Pkg()] - - // Is this object an implicit local var for a type switch? - // Each case has its own var, whose position is the decl of y, - // but Ident in that decl does not appear in the Uses map. - // - // switch y := x.(type) { // Defs[Ident(y)] is undefined - // case int: print(y) // Implicits[CaseClause(int)] = Var(y_int) - // case string: print(y) // Implicits[CaseClause(string)] = Var(y_string) - // } - // - var isCaseVar bool - for syntax, obj := range info.Implicits { - if _, ok := syntax.(*ast.CaseClause); ok && obj.Pos() == from.Pos() { - isCaseVar = true - r.check(obj) - } - } - - r.checkInLexicalScope(from, info) - - // Finally, if this was a type switch, change the variable y. - if isCaseVar { - _, path, _ := r.iprog.PathEnclosingInterval(from.Pos(), from.Pos()) - path[0].(*ast.Ident).Name = r.to // path is [Ident AssignStmt TypeSwitchStmt...] - } -} - -// checkInLexicalScope performs safety checks that a renaming does not -// change the lexical reference structure of the specified package. -// -// For objects in lexical scope, there are three kinds of conflicts: -// same-, sub-, and super-block conflicts. We will illustrate all three -// using this example: -// -// var x int -// var z int -// -// func f(y int) { -// print(x) -// print(y) -// } -// -// Renaming x to z encounters a SAME-BLOCK CONFLICT, because an object -// with the new name already exists, defined in the same lexical block -// as the old object. -// -// Renaming x to y encounters a SUB-BLOCK CONFLICT, because there exists -// a reference to x from within (what would become) a hole in its scope. -// The definition of y in an (inner) sub-block would cast a shadow in -// the scope of the renamed variable. -// -// Renaming y to x encounters a SUPER-BLOCK CONFLICT. This is the -// converse situation: there is an existing definition of the new name -// (x) in an (enclosing) super-block, and the renaming would create a -// hole in its scope, within which there exist references to it. The -// new name casts a shadow in scope of the existing definition of x in -// the super-block. -// -// Removing the old name (and all references to it) is always safe, and -// requires no checks. -// -func (r *renamer) checkInLexicalScope(from types.Object, info *loader.PackageInfo) { - b := from.Parent() // the block defining the 'from' object - if b != nil { - toBlock, to := b.LookupParent(r.to, from.Parent().End()) - if toBlock == b { - // same-block conflict - r.errorf(from.Pos(), "renaming this %s %q to %q", - objectKind(from), from.Name(), r.to) - r.errorf(to.Pos(), "\tconflicts with %s in same block", - objectKind(to)) - return - } else if toBlock != nil { - // Check for super-block conflict. - // The name r.to is defined in a superblock. - // Is that name referenced from within this block? - forEachLexicalRef(info, to, func(id *ast.Ident, block *types.Scope) bool { - _, obj := lexicalLookup(block, from.Name(), id.Pos()) - if obj == from { - // super-block conflict - r.errorf(from.Pos(), "renaming this %s %q to %q", - objectKind(from), from.Name(), r.to) - r.errorf(id.Pos(), "\twould shadow this reference") - r.errorf(to.Pos(), "\tto the %s declared here", - objectKind(to)) - return false // stop - } - return true - }) - } - } - - // Check for sub-block conflict. - // Is there an intervening definition of r.to between - // the block defining 'from' and some reference to it? - forEachLexicalRef(info, from, func(id *ast.Ident, block *types.Scope) bool { - // Find the block that defines the found reference. - // It may be an ancestor. - fromBlock, _ := lexicalLookup(block, from.Name(), id.Pos()) - - // See what r.to would resolve to in the same scope. - toBlock, to := lexicalLookup(block, r.to, id.Pos()) - if to != nil { - // sub-block conflict - if deeper(toBlock, fromBlock) { - r.errorf(from.Pos(), "renaming this %s %q to %q", - objectKind(from), from.Name(), r.to) - r.errorf(id.Pos(), "\twould cause this reference to become shadowed") - r.errorf(to.Pos(), "\tby this intervening %s definition", - objectKind(to)) - return false // stop - } - } - return true - }) - - // Renaming a type that is used as an embedded field - // requires renaming the field too. e.g. - // type T int // if we rename this to U.. - // var s struct {T} - // print(s.T) // ...this must change too - if _, ok := from.(*types.TypeName); ok { - for id, obj := range info.Uses { - if obj == from { - if field := info.Defs[id]; field != nil { - r.check(field) - } - } - } - } -} - -// lexicalLookup is like (*types.Scope).LookupParent but respects the -// environment visible at pos. It assumes the relative position -// information is correct with each file. -func lexicalLookup(block *types.Scope, name string, pos token.Pos) (*types.Scope, types.Object) { - for b := block; b != nil; b = b.Parent() { - obj := b.Lookup(name) - // The scope of a package-level object is the entire package, - // so ignore pos in that case. - // No analogous clause is needed for file-level objects - // since no reference can appear before an import decl. - if obj != nil && (b == obj.Pkg().Scope() || obj.Pos() < pos) { - return b, obj - } - } - return nil, nil -} - -// deeper reports whether block x is lexically deeper than y. -func deeper(x, y *types.Scope) bool { - if x == y || x == nil { - return false - } else if y == nil { - return true - } else { - return deeper(x.Parent(), y.Parent()) - } -} - -// forEachLexicalRef calls fn(id, block) for each identifier id in package -// info that is a reference to obj in lexical scope. block is the -// lexical block enclosing the reference. If fn returns false the -// iteration is terminated and findLexicalRefs returns false. -func forEachLexicalRef(info *loader.PackageInfo, obj types.Object, fn func(id *ast.Ident, block *types.Scope) bool) bool { - ok := true - var stack []ast.Node - - var visit func(n ast.Node) bool - visit = func(n ast.Node) bool { - if n == nil { - stack = stack[:len(stack)-1] // pop - return false - } - if !ok { - return false // bail out - } - - stack = append(stack, n) // push - switch n := n.(type) { - case *ast.Ident: - if info.Uses[n] == obj { - block := enclosingBlock(&info.Info, stack) - if !fn(n, block) { - ok = false - } - } - return visit(nil) // pop stack - - case *ast.SelectorExpr: - // don't visit n.Sel - ast.Inspect(n.X, visit) - return visit(nil) // pop stack, don't descend - - case *ast.CompositeLit: - // Handle recursion ourselves for struct literals - // so we don't visit field identifiers. - tv := info.Types[n] - if _, ok := deref(tv.Type).Underlying().(*types.Struct); ok { - if n.Type != nil { - ast.Inspect(n.Type, visit) - } - for _, elt := range n.Elts { - if kv, ok := elt.(*ast.KeyValueExpr); ok { - ast.Inspect(kv.Value, visit) - } else { - ast.Inspect(elt, visit) - } - } - return visit(nil) // pop stack, don't descend - } - } - return true - } - - for _, f := range info.Files { - ast.Inspect(f, visit) - if len(stack) != 0 { - panic(stack) - } - if !ok { - break - } - } - return ok -} - -// enclosingBlock returns the innermost block enclosing the specified -// AST node, specified in the form of a path from the root of the file, -// [file...n]. -func enclosingBlock(info *types.Info, stack []ast.Node) *types.Scope { - for i := range stack { - n := stack[len(stack)-1-i] - // For some reason, go/types always associates a - // function's scope with its FuncType. - // TODO(adonovan): feature or a bug? - switch f := n.(type) { - case *ast.FuncDecl: - n = f.Type - case *ast.FuncLit: - n = f.Type - } - if b := info.Scopes[n]; b != nil { - return b - } - } - panic("no Scope for *ast.File") -} - -func (r *renamer) checkLabel(label *types.Label) { - // Check there are no identical labels in the function's label block. - // (Label blocks don't nest, so this is easy.) - if prev := label.Parent().Lookup(r.to); prev != nil { - r.errorf(label.Pos(), "renaming this label %q to %q", label.Name(), prev.Name()) - r.errorf(prev.Pos(), "\twould conflict with this one") - } -} - -// checkStructField checks that the field renaming will not cause -// conflicts at its declaration, or ambiguity or changes to any selection. -func (r *renamer) checkStructField(from *types.Var) { - // Check that the struct declaration is free of field conflicts, - // and field/method conflicts. - - // go/types offers no easy way to get from a field (or interface - // method) to its declaring struct (or interface), so we must - // ascend the AST. - info, path, _ := r.iprog.PathEnclosingInterval(from.Pos(), from.Pos()) - // path matches this pattern: - // [Ident SelectorExpr? StarExpr? Field FieldList StructType ParenExpr* ... File] - - // Ascend to FieldList. - var i int - for { - if _, ok := path[i].(*ast.FieldList); ok { - break - } - i++ - } - i++ - tStruct := path[i].(*ast.StructType) - i++ - // Ascend past parens (unlikely). - for { - _, ok := path[i].(*ast.ParenExpr) - if !ok { - break - } - i++ - } - if spec, ok := path[i].(*ast.TypeSpec); ok { - // This struct is also a named type. - // We must check for direct (non-promoted) field/field - // and method/field conflicts. - named := info.Defs[spec.Name].Type() - prev, indices, _ := types.LookupFieldOrMethod(named, true, info.Pkg, r.to) - if len(indices) == 1 { - r.errorf(from.Pos(), "renaming this field %q to %q", - from.Name(), r.to) - r.errorf(prev.Pos(), "\twould conflict with this %s", - objectKind(prev)) - return // skip checkSelections to avoid redundant errors - } - } else { - // This struct is not a named type. - // We need only check for direct (non-promoted) field/field conflicts. - T := info.Types[tStruct].Type.Underlying().(*types.Struct) - for i := 0; i < T.NumFields(); i++ { - if prev := T.Field(i); prev.Name() == r.to { - r.errorf(from.Pos(), "renaming this field %q to %q", - from.Name(), r.to) - r.errorf(prev.Pos(), "\twould conflict with this field") - return // skip checkSelections to avoid redundant errors - } - } - } - - // Renaming an anonymous field requires renaming the type too. e.g. - // print(s.T) // if we rename T to U, - // type T int // this and - // var s struct {T} // this must change too. - if from.Anonymous() { - if named, ok := from.Type().(*types.Named); ok { - r.check(named.Obj()) - } else if named, ok := deref(from.Type()).(*types.Named); ok { - r.check(named.Obj()) - } - } - - // Check integrity of existing (field and method) selections. - r.checkSelections(from) -} - -// checkSelection checks that all uses and selections that resolve to -// the specified object would continue to do so after the renaming. -func (r *renamer) checkSelections(from types.Object) { - for pkg, info := range r.packages { - if id := someUse(info, from); id != nil { - if !r.checkExport(id, pkg, from) { - return - } - } - - for syntax, sel := range info.Selections { - // There may be extant selections of only the old - // name or only the new name, so we must check both. - // (If neither, the renaming is sound.) - // - // In both cases, we wish to compare the lengths - // of the implicit field path (Selection.Index) - // to see if the renaming would change it. - // - // If a selection that resolves to 'from', when renamed, - // would yield a path of the same or shorter length, - // this indicates ambiguity or a changed referent, - // analogous to same- or sub-block lexical conflict. - // - // If a selection using the name 'to' would - // yield a path of the same or shorter length, - // this indicates ambiguity or shadowing, - // analogous to same- or super-block lexical conflict. - - // TODO(adonovan): fix: derive from Types[syntax.X].Mode - // TODO(adonovan): test with pointer, value, addressable value. - isAddressable := true - - if sel.Obj() == from { - if obj, indices, _ := types.LookupFieldOrMethod(sel.Recv(), isAddressable, from.Pkg(), r.to); obj != nil { - // Renaming this existing selection of - // 'from' may block access to an existing - // type member named 'to'. - delta := len(indices) - len(sel.Index()) - if delta > 0 { - continue // no ambiguity - } - r.selectionConflict(from, delta, syntax, obj) - return - } - - } else if sel.Obj().Name() == r.to { - if obj, indices, _ := types.LookupFieldOrMethod(sel.Recv(), isAddressable, from.Pkg(), from.Name()); obj == from { - // Renaming 'from' may cause this existing - // selection of the name 'to' to change - // its meaning. - delta := len(indices) - len(sel.Index()) - if delta > 0 { - continue // no ambiguity - } - r.selectionConflict(from, -delta, syntax, sel.Obj()) - return - } - } - } - } -} - -func (r *renamer) selectionConflict(from types.Object, delta int, syntax *ast.SelectorExpr, obj types.Object) { - r.errorf(from.Pos(), "renaming this %s %q to %q", - objectKind(from), from.Name(), r.to) - - switch { - case delta < 0: - // analogous to sub-block conflict - r.errorf(syntax.Sel.Pos(), - "\twould change the referent of this selection") - r.errorf(obj.Pos(), "\tof this %s", objectKind(obj)) - case delta == 0: - // analogous to same-block conflict - r.errorf(syntax.Sel.Pos(), - "\twould make this reference ambiguous") - r.errorf(obj.Pos(), "\twith this %s", objectKind(obj)) - case delta > 0: - // analogous to super-block conflict - r.errorf(syntax.Sel.Pos(), - "\twould shadow this selection") - r.errorf(obj.Pos(), "\tof the %s declared here", - objectKind(obj)) - } -} - -// checkMethod performs safety checks for renaming a method. -// There are three hazards: -// - declaration conflicts -// - selection ambiguity/changes -// - entailed renamings of assignable concrete/interface types. -// We reject renamings initiated at concrete methods if it would -// change the assignability relation. For renamings of abstract -// methods, we rename all methods transitively coupled to it via -// assignability. -func (r *renamer) checkMethod(from *types.Func) { - // e.g. error.Error - if from.Pkg() == nil { - r.errorf(from.Pos(), "you cannot rename built-in method %s", from) - return - } - - // ASSIGNABILITY: We reject renamings of concrete methods that - // would break a 'satisfy' constraint; but renamings of abstract - // methods are allowed to proceed, and we rename affected - // concrete and abstract methods as necessary. It is the - // initial method that determines the policy. - - // Check for conflict at point of declaration. - // Check to ensure preservation of assignability requirements. - R := recv(from).Type() - if isInterface(R) { - // Abstract method - - // declaration - prev, _, _ := types.LookupFieldOrMethod(R, false, from.Pkg(), r.to) - if prev != nil { - r.errorf(from.Pos(), "renaming this interface method %q to %q", - from.Name(), r.to) - r.errorf(prev.Pos(), "\twould conflict with this method") - return - } - - // Check all interfaces that embed this one for - // declaration conflicts too. - for _, info := range r.packages { - // Start with named interface types (better errors) - for _, obj := range info.Defs { - if obj, ok := obj.(*types.TypeName); ok && isInterface(obj.Type()) { - f, _, _ := types.LookupFieldOrMethod( - obj.Type(), false, from.Pkg(), from.Name()) - if f == nil { - continue - } - t, _, _ := types.LookupFieldOrMethod( - obj.Type(), false, from.Pkg(), r.to) - if t == nil { - continue - } - r.errorf(from.Pos(), "renaming this interface method %q to %q", - from.Name(), r.to) - r.errorf(t.Pos(), "\twould conflict with this method") - r.errorf(obj.Pos(), "\tin named interface type %q", obj.Name()) - } - } - - // Now look at all literal interface types (includes named ones again). - for e, tv := range info.Types { - if e, ok := e.(*ast.InterfaceType); ok { - _ = e - _ = tv.Type.(*types.Interface) - // TODO(adonovan): implement same check as above. - } - } - } - - // assignability - // - // Find the set of concrete or abstract methods directly - // coupled to abstract method 'from' by some - // satisfy.Constraint, and rename them too. - for key := range r.satisfy() { - // key = (lhs, rhs) where lhs is always an interface. - - lsel := r.msets.MethodSet(key.LHS).Lookup(from.Pkg(), from.Name()) - if lsel == nil { - continue - } - rmethods := r.msets.MethodSet(key.RHS) - rsel := rmethods.Lookup(from.Pkg(), from.Name()) - if rsel == nil { - continue - } - - // If both sides have a method of this name, - // and one of them is m, the other must be coupled. - var coupled *types.Func - switch from { - case lsel.Obj(): - coupled = rsel.Obj().(*types.Func) - case rsel.Obj(): - coupled = lsel.Obj().(*types.Func) - default: - continue - } - - // We must treat concrete-to-interface - // constraints like an implicit selection C.f of - // each interface method I.f, and check that the - // renaming leaves the selection unchanged and - // unambiguous. - // - // Fun fact: the implicit selection of C.f - // type I interface{f()} - // type C struct{I} - // func (C) g() - // var _ I = C{} // here - // yields abstract method I.f. This can make error - // messages less than obvious. - // - if !isInterface(key.RHS) { - // The logic below was derived from checkSelections. - - rtosel := rmethods.Lookup(from.Pkg(), r.to) - if rtosel != nil { - rto := rtosel.Obj().(*types.Func) - delta := len(rsel.Index()) - len(rtosel.Index()) - if delta < 0 { - continue // no ambiguity - } - - // TODO(adonovan): record the constraint's position. - keyPos := token.NoPos - - r.errorf(from.Pos(), "renaming this method %q to %q", - from.Name(), r.to) - if delta == 0 { - // analogous to same-block conflict - r.errorf(keyPos, "\twould make the %s method of %s invoked via interface %s ambiguous", - r.to, key.RHS, key.LHS) - r.errorf(rto.Pos(), "\twith (%s).%s", - recv(rto).Type(), r.to) - } else { - // analogous to super-block conflict - r.errorf(keyPos, "\twould change the %s method of %s invoked via interface %s", - r.to, key.RHS, key.LHS) - r.errorf(coupled.Pos(), "\tfrom (%s).%s", - recv(coupled).Type(), r.to) - r.errorf(rto.Pos(), "\tto (%s).%s", - recv(rto).Type(), r.to) - } - return // one error is enough - } - } - - if !r.changeMethods { - // This should be unreachable. - r.errorf(from.Pos(), "internal error: during renaming of abstract method %s", from) - r.errorf(coupled.Pos(), "\tchangedMethods=false, coupled method=%s", coupled) - r.errorf(from.Pos(), "\tPlease file a bug report") - return - } - - // Rename the coupled method to preserve assignability. - r.check(coupled) - } - } else { - // Concrete method - - // declaration - prev, indices, _ := types.LookupFieldOrMethod(R, true, from.Pkg(), r.to) - if prev != nil && len(indices) == 1 { - r.errorf(from.Pos(), "renaming this method %q to %q", - from.Name(), r.to) - r.errorf(prev.Pos(), "\twould conflict with this %s", - objectKind(prev)) - return - } - - // assignability - // - // Find the set of abstract methods coupled to concrete - // method 'from' by some satisfy.Constraint, and rename - // them too. - // - // Coupling may be indirect, e.g. I.f <-> C.f via type D. - // - // type I interface {f()} - // type C int - // type (C) f() - // type D struct{C} - // var _ I = D{} - // - for key := range r.satisfy() { - // key = (lhs, rhs) where lhs is always an interface. - if isInterface(key.RHS) { - continue - } - rsel := r.msets.MethodSet(key.RHS).Lookup(from.Pkg(), from.Name()) - if rsel == nil || rsel.Obj() != from { - continue // rhs does not have the method - } - lsel := r.msets.MethodSet(key.LHS).Lookup(from.Pkg(), from.Name()) - if lsel == nil { - continue - } - imeth := lsel.Obj().(*types.Func) - - // imeth is the abstract method (e.g. I.f) - // and key.RHS is the concrete coupling type (e.g. D). - if !r.changeMethods { - r.errorf(from.Pos(), "renaming this method %q to %q", - from.Name(), r.to) - var pos token.Pos - var iface string - - I := recv(imeth).Type() - if named, ok := I.(*types.Named); ok { - pos = named.Obj().Pos() - iface = "interface " + named.Obj().Name() - } else { - pos = from.Pos() - iface = I.String() - } - r.errorf(pos, "\twould make %s no longer assignable to %s", - key.RHS, iface) - r.errorf(imeth.Pos(), "\t(rename %s.%s if you intend to change both types)", - I, from.Name()) - return // one error is enough - } - - // Rename the coupled interface method to preserve assignability. - r.check(imeth) - } - } - - // Check integrity of existing (field and method) selections. - // We skip this if there were errors above, to avoid redundant errors. - r.checkSelections(from) -} - -func (r *renamer) checkExport(id *ast.Ident, pkg *types.Package, from types.Object) bool { - // Reject cross-package references if r.to is unexported. - // (Such references may be qualified identifiers or field/method - // selections.) - if !ast.IsExported(r.to) && pkg != from.Pkg() { - r.errorf(from.Pos(), - "renaming this %s %q to %q would make it unexported", - objectKind(from), from.Name(), r.to) - r.errorf(id.Pos(), "\tbreaking references from packages such as %q", - pkg.Path()) - return false - } - return true -} - -// satisfy returns the set of interface satisfaction constraints. -func (r *renamer) satisfy() map[satisfy.Constraint]bool { - if r.satisfyConstraints == nil { - // Compute on demand: it's expensive. - var f satisfy.Finder - for _, info := range r.packages { - f.Find(&info.Info, info.Files) - } - r.satisfyConstraints = f.Result - } - return r.satisfyConstraints -} - -// -- helpers ---------------------------------------------------------- - -// recv returns the method's receiver. -func recv(meth *types.Func) *types.Var { - return meth.Type().(*types.Signature).Recv() -} - -// someUse returns an arbitrary use of obj within info. -func someUse(info *loader.PackageInfo, obj types.Object) *ast.Ident { - for id, o := range info.Uses { - if o == obj { - return id - } - } - return nil -} - -// -- Plundered from golang.org/x/tools/go/ssa ----------------- - -func isInterface(T types.Type) bool { return types.IsInterface(T) } - -func deref(typ types.Type) types.Type { - if p, _ := typ.(*types.Pointer); p != nil { - return p.Elem() - } - return typ -} diff --git a/vendor/golang.org/x/tools/refactor/rename/mvpkg.go b/vendor/golang.org/x/tools/refactor/rename/mvpkg.go deleted file mode 100644 index 638d0cf..0000000 --- a/vendor/golang.org/x/tools/refactor/rename/mvpkg.go +++ /dev/null @@ -1,375 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// licence that can be found in the LICENSE file. - -// This file contains the implementation of the 'gomvpkg' command -// whose main function is in golang.org/x/tools/cmd/gomvpkg. - -package rename - -// TODO(matloob): -// - think about what happens if the package is moving across version control systems. -// - think about windows, which uses "\" as its directory separator. -// - dot imports are not supported. Make sure it's clearly documented. - -import ( - "bytes" - "fmt" - "go/ast" - "go/build" - "go/format" - "go/token" - "log" - "os" - "os/exec" - "path" - "path/filepath" - "regexp" - "runtime" - "strconv" - "strings" - "text/template" - - "golang.org/x/tools/go/buildutil" - "golang.org/x/tools/go/loader" - "golang.org/x/tools/refactor/importgraph" -) - -// Move, given a package path and a destination package path, will try -// to move the given package to the new path. The Move function will -// first check for any conflicts preventing the move, such as a -// package already existing at the destination package path. If the -// move can proceed, it builds an import graph to find all imports of -// the packages whose paths need to be renamed. This includes uses of -// the subpackages of the package to be moved as those packages will -// also need to be moved. It then renames all imports to point to the -// new paths, and then moves the packages to their new paths. -func Move(ctxt *build.Context, from, to, moveTmpl string) error { - srcDir, err := srcDir(ctxt, from) - if err != nil { - return err - } - - // This should be the only place in the program that constructs - // file paths. - // TODO(matloob): test on Microsoft Windows. - fromDir := buildutil.JoinPath(ctxt, srcDir, filepath.FromSlash(from)) - toDir := buildutil.JoinPath(ctxt, srcDir, filepath.FromSlash(to)) - toParent := filepath.Dir(toDir) - if !buildutil.IsDir(ctxt, toParent) { - return fmt.Errorf("parent directory does not exist for path %s", toDir) - } - - // Build the import graph and figure out which packages to update. - _, rev, errors := importgraph.Build(ctxt) - if len(errors) > 0 { - // With a large GOPATH tree, errors are inevitable. - // Report them but proceed. - fmt.Fprintf(os.Stderr, "While scanning Go workspace:\n") - for path, err := range errors { - fmt.Fprintf(os.Stderr, "Package %q: %s.\n", path, err) - } - } - - // Determine the affected packages---the set of packages whose import - // statements need updating. - affectedPackages := map[string]bool{from: true} - destinations := make(map[string]string) // maps old import path to new import path - for pkg := range subpackages(ctxt, srcDir, from) { - for r := range rev[pkg] { - affectedPackages[r] = true - } - // Ensure directories have a trailing separator. - dest := strings.Replace(pkg, - filepath.Join(from, ""), - filepath.Join(to, ""), - 1) - destinations[pkg] = filepath.ToSlash(dest) - } - - // Load all the affected packages. - iprog, err := loadProgram(ctxt, affectedPackages) - if err != nil { - return err - } - - // Prepare the move command, if one was supplied. - var cmd string - if moveTmpl != "" { - if cmd, err = moveCmd(moveTmpl, fromDir, toDir); err != nil { - return err - } - } - - m := mover{ - ctxt: ctxt, - rev: rev, - iprog: iprog, - from: from, - to: to, - fromDir: fromDir, - toDir: toDir, - affectedPackages: affectedPackages, - destinations: destinations, - cmd: cmd, - } - - if err := m.checkValid(); err != nil { - return err - } - - m.move() - - return nil -} - -// srcDir returns the absolute path of the srcdir containing pkg. -func srcDir(ctxt *build.Context, pkg string) (string, error) { - for _, srcDir := range ctxt.SrcDirs() { - path := buildutil.JoinPath(ctxt, srcDir, pkg) - if buildutil.IsDir(ctxt, path) { - return srcDir, nil - } - } - return "", fmt.Errorf("src dir not found for package: %s", pkg) -} - -// subpackages returns the set of packages in the given srcDir whose -// import paths start with dir. -func subpackages(ctxt *build.Context, srcDir string, dir string) map[string]bool { - subs := map[string]bool{dir: true} - - // Find all packages under srcDir whose import paths start with dir. - buildutil.ForEachPackage(ctxt, func(pkg string, err error) { - if err != nil { - log.Fatalf("unexpected error in ForEachPackage: %v", err) - } - - // Only process the package or a sub-package - if !(strings.HasPrefix(pkg, dir) && - (len(pkg) == len(dir) || pkg[len(dir)] == '/')) { - return - } - - p, err := ctxt.Import(pkg, "", build.FindOnly) - if err != nil { - log.Fatalf("unexpected: package %s can not be located by build context: %s", pkg, err) - } - if p.SrcRoot == "" { - log.Fatalf("unexpected: could not determine srcDir for package %s: %s", pkg, err) - } - if p.SrcRoot != srcDir { - return - } - - subs[pkg] = true - }) - - return subs -} - -type mover struct { - // iprog contains all packages whose contents need to be updated - // with new package names or import paths. - iprog *loader.Program - ctxt *build.Context - // rev is the reverse import graph. - rev importgraph.Graph - // from and to are the source and destination import - // paths. fromDir and toDir are the source and destination - // absolute paths that package source files will be moved between. - from, to, fromDir, toDir string - // affectedPackages is the set of all packages whose contents need - // to be updated to reflect new package names or import paths. - affectedPackages map[string]bool - // destinations maps each subpackage to be moved to its - // destination path. - destinations map[string]string - // cmd, if not empty, will be executed to move fromDir to toDir. - cmd string -} - -func (m *mover) checkValid() error { - const prefix = "invalid move destination" - - match, err := regexp.MatchString("^[_\\pL][_\\pL\\p{Nd}]*$", path.Base(m.to)) - if err != nil { - panic("regexp.MatchString failed") - } - if !match { - return fmt.Errorf("%s: %s; gomvpkg does not support move destinations "+ - "whose base names are not valid go identifiers", prefix, m.to) - } - - if buildutil.FileExists(m.ctxt, m.toDir) { - return fmt.Errorf("%s: %s conflicts with file %s", prefix, m.to, m.toDir) - } - if buildutil.IsDir(m.ctxt, m.toDir) { - return fmt.Errorf("%s: %s conflicts with directory %s", prefix, m.to, m.toDir) - } - - for _, toSubPkg := range m.destinations { - if _, err := m.ctxt.Import(toSubPkg, "", build.FindOnly); err == nil { - return fmt.Errorf("%s: %s; package or subpackage %s already exists", - prefix, m.to, toSubPkg) - } - } - - return nil -} - -// moveCmd produces the version control move command used to move fromDir to toDir by -// executing the given template. -func moveCmd(moveTmpl, fromDir, toDir string) (string, error) { - tmpl, err := template.New("movecmd").Parse(moveTmpl) - if err != nil { - return "", err - } - - var buf bytes.Buffer - err = tmpl.Execute(&buf, struct { - Src string - Dst string - }{fromDir, toDir}) - return buf.String(), err -} - -func (m *mover) move() error { - filesToUpdate := make(map[*ast.File]bool) - - // Change the moved package's "package" declaration to its new base name. - pkg, ok := m.iprog.Imported[m.from] - if !ok { - log.Fatalf("unexpected: package %s is not in import map", m.from) - } - newName := filepath.Base(m.to) - for _, f := range pkg.Files { - // Update all import comments. - for _, cg := range f.Comments { - c := cg.List[0] - if c.Slash >= f.Name.End() && - sameLine(m.iprog.Fset, c.Slash, f.Name.End()) && - (f.Decls == nil || c.Slash < f.Decls[0].Pos()) { - if strings.HasPrefix(c.Text, `// import "`) { - c.Text = `// import "` + m.to + `"` - break - } - if strings.HasPrefix(c.Text, `/* import "`) { - c.Text = `/* import "` + m.to + `" */` - break - } - } - } - f.Name.Name = newName // change package decl - filesToUpdate[f] = true - } - - // Look through the external test packages (m.iprog.Created contains the external test packages). - for _, info := range m.iprog.Created { - // Change the "package" declaration of the external test package. - if info.Pkg.Path() == m.from+"_test" { - for _, f := range info.Files { - f.Name.Name = newName + "_test" // change package decl - filesToUpdate[f] = true - } - } - - // Mark all the loaded external test packages, which import the "from" package, - // as affected packages and update the imports. - for _, imp := range info.Pkg.Imports() { - if imp.Path() == m.from { - m.affectedPackages[info.Pkg.Path()] = true - m.iprog.Imported[info.Pkg.Path()] = info - if err := importName(m.iprog, info, m.from, path.Base(m.from), newName); err != nil { - return err - } - } - } - } - - // Update imports of that package to use the new import name. - // None of the subpackages will change their name---only the from package - // itself will. - for p := range m.rev[m.from] { - if err := importName(m.iprog, m.iprog.Imported[p], m.from, path.Base(m.from), newName); err != nil { - return err - } - } - - // Update import paths for all imports by affected packages. - for ap := range m.affectedPackages { - info, ok := m.iprog.Imported[ap] - if !ok { - log.Fatalf("unexpected: package %s is not in import map", ap) - } - for _, f := range info.Files { - for _, imp := range f.Imports { - importPath, _ := strconv.Unquote(imp.Path.Value) - if newPath, ok := m.destinations[importPath]; ok { - imp.Path.Value = strconv.Quote(newPath) - - oldName := path.Base(importPath) - if imp.Name != nil { - oldName = imp.Name.Name - } - - newName := path.Base(newPath) - if imp.Name == nil && oldName != newName { - imp.Name = ast.NewIdent(oldName) - } else if imp.Name == nil || imp.Name.Name == newName { - imp.Name = nil - } - filesToUpdate[f] = true - } - } - } - } - - for f := range filesToUpdate { - var buf bytes.Buffer - if err := format.Node(&buf, m.iprog.Fset, f); err != nil { - log.Printf("failed to pretty-print syntax tree: %v", err) - continue - } - tokenFile := m.iprog.Fset.File(f.Pos()) - writeFile(tokenFile.Name(), buf.Bytes()) - } - - // Move the directories. - // If either the fromDir or toDir are contained under version control it is - // the user's responsibility to provide a custom move command that updates - // version control to reflect the move. - // TODO(matloob): If the parent directory of toDir does not exist, create it. - // For now, it's required that it does exist. - - if m.cmd != "" { - // TODO(matloob): Verify that the windows and plan9 cases are correct. - var cmd *exec.Cmd - switch runtime.GOOS { - case "windows": - cmd = exec.Command("cmd", "/c", m.cmd) - case "plan9": - cmd = exec.Command("rc", "-c", m.cmd) - default: - cmd = exec.Command("sh", "-c", m.cmd) - } - cmd.Stderr = os.Stderr - cmd.Stdout = os.Stdout - if err := cmd.Run(); err != nil { - return fmt.Errorf("version control system's move command failed: %v", err) - } - - return nil - } - - return moveDirectory(m.fromDir, m.toDir) -} - -// sameLine reports whether two positions in the same file are on the same line. -func sameLine(fset *token.FileSet, x, y token.Pos) bool { - return fset.Position(x).Line == fset.Position(y).Line -} - -var moveDirectory = func(from, to string) error { - return os.Rename(from, to) -} diff --git a/vendor/golang.org/x/tools/refactor/rename/rename.go b/vendor/golang.org/x/tools/refactor/rename/rename.go deleted file mode 100644 index b026e78..0000000 --- a/vendor/golang.org/x/tools/refactor/rename/rename.go +++ /dev/null @@ -1,603 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package rename contains the implementation of the 'gorename' command -// whose main function is in golang.org/x/tools/cmd/gorename. -// See the Usage constant for the command documentation. -package rename // import "golang.org/x/tools/refactor/rename" - -import ( - "bytes" - "errors" - "fmt" - "go/ast" - "go/build" - "go/format" - "go/parser" - "go/token" - "go/types" - "io" - "io/ioutil" - "log" - "os" - "os/exec" - "path" - "regexp" - "sort" - "strconv" - "strings" - - "golang.org/x/tools/go/loader" - "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/refactor/importgraph" - "golang.org/x/tools/refactor/satisfy" -) - -const Usage = `gorename: precise type-safe renaming of identifiers in Go source code. - -Usage: - - gorename (-from | -offset :#) -to [-force] - -You must specify the object (named entity) to rename using the -offset -or -from flag. Exactly one must be specified. - -Flags: - --offset specifies the filename and byte offset of an identifier to rename. - This form is intended for use by text editors. - --from specifies the object to rename using a query notation; - This form is intended for interactive use at the command line. - A legal -from query has one of the following forms: - - "encoding/json".Decoder.Decode method of package-level named type - (*"encoding/json".Decoder).Decode ditto, alternative syntax - "encoding/json".Decoder.buf field of package-level named struct type - "encoding/json".HTMLEscape package member (const, func, var, type) - "encoding/json".Decoder.Decode::x local object x within a method - "encoding/json".HTMLEscape::x local object x within a function - "encoding/json"::x object x anywhere within a package - json.go::x object x within file json.go - - Double-quotes must be escaped when writing a shell command. - Quotes may be omitted for single-segment import paths such as "fmt". - - For methods, the parens and '*' on the receiver type are both - optional. - - It is an error if one of the ::x queries matches multiple - objects. - --to the new name. - --force causes the renaming to proceed even if conflicts were reported. - The resulting program may be ill-formed, or experience a change - in behaviour. - - WARNING: this flag may even cause the renaming tool to crash. - (In due course this bug will be fixed by moving certain - analyses into the type-checker.) - --d display diffs instead of rewriting files - --v enables verbose logging. - -gorename automatically computes the set of packages that might be -affected. For a local renaming, this is just the package specified by --from or -offset, but for a potentially exported name, gorename scans -the workspace ($GOROOT and $GOPATH). - -gorename rejects renamings of concrete methods that would change the -assignability relation between types and interfaces. If the interface -change was intentional, initiate the renaming at the interface method. - -gorename rejects any renaming that would create a conflict at the point -of declaration, or a reference conflict (ambiguity or shadowing), or -anything else that could cause the resulting program not to compile. - - -Examples: - -$ gorename -offset file.go:#123 -to foo - - Rename the object whose identifier is at byte offset 123 within file file.go. - -$ gorename -from '"bytes".Buffer.Len' -to Size - - Rename the "Len" method of the *bytes.Buffer type to "Size". - ----- TODO ---- - -Correctness: -- handle dot imports correctly -- document limitations (reflection, 'implements' algorithm). -- sketch a proof of exhaustiveness. - -Features: -- support running on packages specified as *.go files on the command line -- support running on programs containing errors (loader.Config.AllowErrors) -- allow users to specify a scope other than "global" (to avoid being - stuck by neglected packages in $GOPATH that don't build). -- support renaming the package clause (no object) -- support renaming an import path (no ident or object) - (requires filesystem + SCM updates). -- detect and reject edits to autogenerated files (cgo, protobufs) - and optionally $GOROOT packages. -- report all conflicts, or at least all qualitatively distinct ones. - Sometimes we stop to avoid redundancy, but - it may give a disproportionate sense of safety in -force mode. -- support renaming all instances of a pattern, e.g. - all receiver vars of a given type, - all local variables of a given type, - all PkgNames for a given package. -- emit JSON output for other editors and tools. -` - -var ( - // Force enables patching of the source files even if conflicts were reported. - // The resulting program may be ill-formed. - // It may even cause gorename to crash. TODO(adonovan): fix that. - Force bool - - // Diff causes the tool to display diffs instead of rewriting files. - Diff bool - - // DiffCmd specifies the diff command used by the -d feature. - // (The command must accept a -u flag and two filename arguments.) - DiffCmd = "diff" - - // ConflictError is returned by Main when it aborts the renaming due to conflicts. - // (It is distinguished because the interesting errors are the conflicts themselves.) - ConflictError = errors.New("renaming aborted due to conflicts") - - // Verbose enables extra logging. - Verbose bool -) - -var stdout io.Writer = os.Stdout - -type renamer struct { - iprog *loader.Program - objsToUpdate map[types.Object]bool - hadConflicts bool - from, to string - satisfyConstraints map[satisfy.Constraint]bool - packages map[*types.Package]*loader.PackageInfo // subset of iprog.AllPackages to inspect - msets typeutil.MethodSetCache - changeMethods bool -} - -var reportError = func(posn token.Position, message string) { - fmt.Fprintf(os.Stderr, "%s: %s\n", posn, message) -} - -// importName renames imports of fromPath within the package specified by info. -// If fromName is not empty, importName renames only imports as fromName. -// If the renaming would lead to a conflict, the file is left unchanged. -func importName(iprog *loader.Program, info *loader.PackageInfo, fromPath, fromName, to string) error { - if fromName == to { - return nil // no-op (e.g. rename x/foo to y/foo) - } - for _, f := range info.Files { - var from types.Object - for _, imp := range f.Imports { - importPath, _ := strconv.Unquote(imp.Path.Value) - importName := path.Base(importPath) - if imp.Name != nil { - importName = imp.Name.Name - } - if importPath == fromPath && (fromName == "" || importName == fromName) { - from = info.Implicits[imp] - break - } - } - if from == nil { - continue - } - r := renamer{ - iprog: iprog, - objsToUpdate: make(map[types.Object]bool), - to: to, - packages: map[*types.Package]*loader.PackageInfo{info.Pkg: info}, - } - r.check(from) - if r.hadConflicts { - reportError(iprog.Fset.Position(f.Imports[0].Pos()), - "skipping update of this file") - continue // ignore errors; leave the existing name - } - if err := r.update(); err != nil { - return err - } - } - return nil -} - -func Main(ctxt *build.Context, offsetFlag, fromFlag, to string) error { - // -- Parse the -from or -offset specifier ---------------------------- - - if (offsetFlag == "") == (fromFlag == "") { - return fmt.Errorf("exactly one of the -from and -offset flags must be specified") - } - - if !isValidIdentifier(to) { - return fmt.Errorf("-to %q: not a valid identifier", to) - } - - if Diff { - defer func(saved func(string, []byte) error) { writeFile = saved }(writeFile) - writeFile = diff - } - - var spec *spec - var err error - if fromFlag != "" { - spec, err = parseFromFlag(ctxt, fromFlag) - } else { - spec, err = parseOffsetFlag(ctxt, offsetFlag) - } - if err != nil { - return err - } - - if spec.fromName == to { - return fmt.Errorf("the old and new names are the same: %s", to) - } - - // -- Load the program consisting of the initial package ------------- - - iprog, err := loadProgram(ctxt, map[string]bool{spec.pkg: true}) - if err != nil { - return err - } - - fromObjects, err := findFromObjects(iprog, spec) - if err != nil { - return err - } - - // -- Load a larger program, for global renamings --------------------- - - if requiresGlobalRename(fromObjects, to) { - // For a local refactoring, we needn't load more - // packages, but if the renaming affects the package's - // API, we we must load all packages that depend on the - // package defining the object, plus their tests. - - if Verbose { - log.Print("Potentially global renaming; scanning workspace...") - } - - // Scan the workspace and build the import graph. - _, rev, errors := importgraph.Build(ctxt) - if len(errors) > 0 { - // With a large GOPATH tree, errors are inevitable. - // Report them but proceed. - fmt.Fprintf(os.Stderr, "While scanning Go workspace:\n") - for path, err := range errors { - fmt.Fprintf(os.Stderr, "Package %q: %s.\n", path, err) - } - } - - // Enumerate the set of potentially affected packages. - affectedPackages := make(map[string]bool) - for _, obj := range fromObjects { - // External test packages are never imported, - // so they will never appear in the graph. - for path := range rev.Search(obj.Pkg().Path()) { - affectedPackages[path] = true - } - } - - // TODO(adonovan): allow the user to specify the scope, - // or -ignore patterns? Computing the scope when we - // don't (yet) support inputs containing errors can make - // the tool rather brittle. - - // Re-load the larger program. - iprog, err = loadProgram(ctxt, affectedPackages) - if err != nil { - return err - } - - fromObjects, err = findFromObjects(iprog, spec) - if err != nil { - return err - } - } - - // -- Do the renaming ------------------------------------------------- - - r := renamer{ - iprog: iprog, - objsToUpdate: make(map[types.Object]bool), - from: spec.fromName, - to: to, - packages: make(map[*types.Package]*loader.PackageInfo), - } - - // A renaming initiated at an interface method indicates the - // intention to rename abstract and concrete methods as needed - // to preserve assignability. - for _, obj := range fromObjects { - if obj, ok := obj.(*types.Func); ok { - recv := obj.Type().(*types.Signature).Recv() - if recv != nil && isInterface(recv.Type().Underlying()) { - r.changeMethods = true - break - } - } - } - - // Only the initially imported packages (iprog.Imported) and - // their external tests (iprog.Created) should be inspected or - // modified, as only they have type-checked functions bodies. - // The rest are just dependencies, needed only for package-level - // type information. - for _, info := range iprog.Imported { - r.packages[info.Pkg] = info - } - for _, info := range iprog.Created { // (tests) - r.packages[info.Pkg] = info - } - - for _, from := range fromObjects { - r.check(from) - } - if r.hadConflicts && !Force { - return ConflictError - } - return r.update() -} - -// loadProgram loads the specified set of packages (plus their tests) -// and all their dependencies, from source, through the specified build -// context. Only packages in pkgs will have their functions bodies typechecked. -func loadProgram(ctxt *build.Context, pkgs map[string]bool) (*loader.Program, error) { - conf := loader.Config{ - Build: ctxt, - ParserMode: parser.ParseComments, - - // TODO(adonovan): enable this. Requires making a lot of code more robust! - AllowErrors: false, - } - // Optimization: don't type-check the bodies of functions in our - // dependencies, since we only need exported package members. - conf.TypeCheckFuncBodies = func(p string) bool { - return pkgs[p] || pkgs[strings.TrimSuffix(p, "_test")] - } - - if Verbose { - var list []string - for pkg := range pkgs { - list = append(list, pkg) - } - sort.Strings(list) - for _, pkg := range list { - log.Printf("Loading package: %s", pkg) - } - } - - for pkg := range pkgs { - conf.ImportWithTests(pkg) - } - - // Ideally we would just return conf.Load() here, but go/types - // reports certain "soft" errors that gc does not (Go issue 14596). - // As a workaround, we set AllowErrors=true and then duplicate - // the loader's error checking but allow soft errors. - // It would be nice if the loader API permitted "AllowErrors: soft". - conf.AllowErrors = true - prog, err := conf.Load() - if err != nil { - return nil, err - } - - var errpkgs []string - // Report hard errors in indirectly imported packages. - for _, info := range prog.AllPackages { - if containsHardErrors(info.Errors) { - errpkgs = append(errpkgs, info.Pkg.Path()) - } - } - if errpkgs != nil { - var more string - if len(errpkgs) > 3 { - more = fmt.Sprintf(" and %d more", len(errpkgs)-3) - errpkgs = errpkgs[:3] - } - return nil, fmt.Errorf("couldn't load packages due to errors: %s%s", - strings.Join(errpkgs, ", "), more) - } - return prog, nil -} - -func containsHardErrors(errors []error) bool { - for _, err := range errors { - if err, ok := err.(types.Error); ok && err.Soft { - continue - } - return true - } - return false -} - -// requiresGlobalRename reports whether this renaming could potentially -// affect other packages in the Go workspace. -func requiresGlobalRename(fromObjects []types.Object, to string) bool { - var tfm bool - for _, from := range fromObjects { - if from.Exported() { - return true - } - switch objectKind(from) { - case "type", "field", "method": - tfm = true - } - } - if ast.IsExported(to) && tfm { - // A global renaming may be necessary even if we're - // exporting a previous unexported name, since if it's - // the name of a type, field or method, this could - // change selections in other packages. - // (We include "type" in this list because a type - // used as an embedded struct field entails a field - // renaming.) - return true - } - return false -} - -// update updates the input files. -func (r *renamer) update() error { - // We use token.File, not filename, since a file may appear to - // belong to multiple packages and be parsed more than once. - // token.File captures this distinction; filename does not. - - var nidents int - var filesToUpdate = make(map[*token.File]bool) - docRegexp := regexp.MustCompile(`\b` + r.from + `\b`) - for _, info := range r.packages { - // Mutate the ASTs and note the filenames. - for id, obj := range info.Defs { - if r.objsToUpdate[obj] { - nidents++ - id.Name = r.to - filesToUpdate[r.iprog.Fset.File(id.Pos())] = true - // Perform the rename in doc comments too. - if doc := r.docComment(id); doc != nil { - for _, comment := range doc.List { - comment.Text = docRegexp.ReplaceAllString(comment.Text, r.to) - } - } - } - } - - for id, obj := range info.Uses { - if r.objsToUpdate[obj] { - nidents++ - id.Name = r.to - filesToUpdate[r.iprog.Fset.File(id.Pos())] = true - } - } - } - - // Renaming not supported if cgo files are affected. - var generatedFileNames []string - for _, info := range r.packages { - for _, f := range info.Files { - tokenFile := r.iprog.Fset.File(f.Pos()) - if filesToUpdate[tokenFile] && generated(f, tokenFile) { - generatedFileNames = append(generatedFileNames, tokenFile.Name()) - } - } - } - if len(generatedFileNames) > 0 { - return fmt.Errorf("refusing to modify generated file%s containing DO NOT EDIT marker: %v", plural(len(generatedFileNames)), generatedFileNames) - } - - // Write affected files. - var nerrs, npkgs int - for _, info := range r.packages { - first := true - for _, f := range info.Files { - tokenFile := r.iprog.Fset.File(f.Pos()) - if filesToUpdate[tokenFile] { - if first { - npkgs++ - first = false - if Verbose { - log.Printf("Updating package %s", info.Pkg.Path()) - } - } - - filename := tokenFile.Name() - var buf bytes.Buffer - if err := format.Node(&buf, r.iprog.Fset, f); err != nil { - log.Printf("failed to pretty-print syntax tree: %v", err) - nerrs++ - continue - } - if err := writeFile(filename, buf.Bytes()); err != nil { - log.Print(err) - nerrs++ - } - } - } - } - if !Diff { - fmt.Printf("Renamed %d occurrence%s in %d file%s in %d package%s.\n", - nidents, plural(nidents), - len(filesToUpdate), plural(len(filesToUpdate)), - npkgs, plural(npkgs)) - } - if nerrs > 0 { - return fmt.Errorf("failed to rewrite %d file%s", nerrs, plural(nerrs)) - } - return nil -} - -// docComment returns the doc for an identifier. -func (r *renamer) docComment(id *ast.Ident) *ast.CommentGroup { - _, nodes, _ := r.iprog.PathEnclosingInterval(id.Pos(), id.End()) - for _, node := range nodes { - switch decl := node.(type) { - case *ast.FuncDecl: - return decl.Doc - case *ast.Field: - return decl.Doc - case *ast.GenDecl: - return decl.Doc - // For {Type,Value}Spec, if the doc on the spec is absent, - // search for the enclosing GenDecl - case *ast.TypeSpec: - if decl.Doc != nil { - return decl.Doc - } - case *ast.ValueSpec: - if decl.Doc != nil { - return decl.Doc - } - case *ast.Ident: - default: - return nil - } - } - return nil -} - -func plural(n int) string { - if n != 1 { - return "s" - } - return "" -} - -// writeFile is a seam for testing and for the -d flag. -var writeFile = reallyWriteFile - -func reallyWriteFile(filename string, content []byte) error { - return ioutil.WriteFile(filename, content, 0644) -} - -func diff(filename string, content []byte) error { - renamed := fmt.Sprintf("%s.%d.renamed", filename, os.Getpid()) - if err := ioutil.WriteFile(renamed, content, 0644); err != nil { - return err - } - defer os.Remove(renamed) - - diff, err := exec.Command(DiffCmd, "-u", filename, renamed).CombinedOutput() - if len(diff) > 0 { - // diff exits with a non-zero status when the files don't match. - // Ignore that failure as long as we get output. - stdout.Write(diff) - return nil - } - if err != nil { - return fmt.Errorf("computing diff: %v", err) - } - return nil -} diff --git a/vendor/golang.org/x/tools/refactor/rename/spec.go b/vendor/golang.org/x/tools/refactor/rename/spec.go deleted file mode 100644 index 0c4526d..0000000 --- a/vendor/golang.org/x/tools/refactor/rename/spec.go +++ /dev/null @@ -1,593 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package rename - -// This file contains logic related to specifying a renaming: parsing of -// the flags as a form of query, and finding the object(s) it denotes. -// See Usage for flag details. - -import ( - "bytes" - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/token" - "go/types" - "log" - "os" - "path/filepath" - "regexp" - "strconv" - "strings" - - "golang.org/x/tools/go/buildutil" - "golang.org/x/tools/go/loader" -) - -// A spec specifies an entity to rename. -// -// It is populated from an -offset flag or -from query; -// see Usage for the allowed -from query forms. -// -type spec struct { - // pkg is the package containing the position - // specified by the -from or -offset flag. - // If filename == "", our search for the 'from' entity - // is restricted to this package. - pkg string - - // The original name of the entity being renamed. - // If the query had a ::from component, this is that; - // otherwise it's the last segment, e.g. - // (encoding/json.Decoder).from - // encoding/json.from - fromName string - - // -- The remaining fields are private to this file. All are optional. -- - - // The query's ::x suffix, if any. - searchFor string - - // e.g. "Decoder" in "(encoding/json.Decoder).fieldOrMethod" - // or "encoding/json.Decoder - pkgMember string - - // e.g. fieldOrMethod in "(encoding/json.Decoder).fieldOrMethod" - typeMember string - - // Restricts the query to this file. - // Implied by -from="file.go::x" and -offset flags. - filename string - - // Byte offset of the 'from' identifier within the file named 'filename'. - // -offset mode only. - offset int -} - -// parseFromFlag interprets the "-from" flag value as a renaming specification. -// See Usage in rename.go for valid formats. -func parseFromFlag(ctxt *build.Context, fromFlag string) (*spec, error) { - var spec spec - var main string // sans "::x" suffix - switch parts := strings.Split(fromFlag, "::"); len(parts) { - case 1: - main = parts[0] - case 2: - main = parts[0] - spec.searchFor = parts[1] - if parts[1] == "" { - // error - } - default: - return nil, fmt.Errorf("-from %q: invalid identifier specification (see -help for formats)", fromFlag) - } - - if strings.HasSuffix(main, ".go") { - // main is "filename.go" - if spec.searchFor == "" { - return nil, fmt.Errorf("-from: filename %q must have a ::name suffix", main) - } - spec.filename = main - if !buildutil.FileExists(ctxt, spec.filename) { - return nil, fmt.Errorf("no such file: %s", spec.filename) - } - - bp, err := buildutil.ContainingPackage(ctxt, wd, spec.filename) - if err != nil { - return nil, err - } - spec.pkg = bp.ImportPath - - } else { - // main is one of: - // "importpath" - // "importpath".member - // (*"importpath".type).fieldormethod (parens and star optional) - if err := parseObjectSpec(&spec, main); err != nil { - return nil, err - } - } - - if spec.searchFor != "" { - spec.fromName = spec.searchFor - } - - cwd, err := os.Getwd() - if err != nil { - return nil, err - } - - // Sanitize the package. - bp, err := ctxt.Import(spec.pkg, cwd, build.FindOnly) - if err != nil { - return nil, fmt.Errorf("can't find package %q", spec.pkg) - } - spec.pkg = bp.ImportPath - - if !isValidIdentifier(spec.fromName) { - return nil, fmt.Errorf("-from: invalid identifier %q", spec.fromName) - } - - if Verbose { - log.Printf("-from spec: %+v", spec) - } - - return &spec, nil -} - -// parseObjectSpec parses main as one of the non-filename forms of -// object specification. -func parseObjectSpec(spec *spec, main string) error { - // Parse main as a Go expression, albeit a strange one. - e, _ := parser.ParseExpr(main) - - if pkg := parseImportPath(e); pkg != "" { - // e.g. bytes or "encoding/json": a package - spec.pkg = pkg - if spec.searchFor == "" { - return fmt.Errorf("-from %q: package import path %q must have a ::name suffix", - main, main) - } - return nil - } - - if e, ok := e.(*ast.SelectorExpr); ok { - x := unparen(e.X) - - // Strip off star constructor, if any. - if star, ok := x.(*ast.StarExpr); ok { - x = star.X - } - - if pkg := parseImportPath(x); pkg != "" { - // package member e.g. "encoding/json".HTMLEscape - spec.pkg = pkg // e.g. "encoding/json" - spec.pkgMember = e.Sel.Name // e.g. "HTMLEscape" - spec.fromName = e.Sel.Name - return nil - } - - if x, ok := x.(*ast.SelectorExpr); ok { - // field/method of type e.g. ("encoding/json".Decoder).Decode - y := unparen(x.X) - if pkg := parseImportPath(y); pkg != "" { - spec.pkg = pkg // e.g. "encoding/json" - spec.pkgMember = x.Sel.Name // e.g. "Decoder" - spec.typeMember = e.Sel.Name // e.g. "Decode" - spec.fromName = e.Sel.Name - return nil - } - } - } - - return fmt.Errorf("-from %q: invalid expression", main) -} - -// parseImportPath returns the import path of the package denoted by e. -// Any import path may be represented as a string literal; -// single-segment import paths (e.g. "bytes") may also be represented as -// ast.Ident. parseImportPath returns "" for all other expressions. -func parseImportPath(e ast.Expr) string { - switch e := e.(type) { - case *ast.Ident: - return e.Name // e.g. bytes - - case *ast.BasicLit: - if e.Kind == token.STRING { - pkgname, _ := strconv.Unquote(e.Value) - return pkgname // e.g. "encoding/json" - } - } - return "" -} - -// parseOffsetFlag interprets the "-offset" flag value as a renaming specification. -func parseOffsetFlag(ctxt *build.Context, offsetFlag string) (*spec, error) { - var spec spec - // Validate -offset, e.g. file.go:#123 - parts := strings.Split(offsetFlag, ":#") - if len(parts) != 2 { - return nil, fmt.Errorf("-offset %q: invalid offset specification", offsetFlag) - } - - spec.filename = parts[0] - if !buildutil.FileExists(ctxt, spec.filename) { - return nil, fmt.Errorf("no such file: %s", spec.filename) - } - - bp, err := buildutil.ContainingPackage(ctxt, wd, spec.filename) - if err != nil { - return nil, err - } - spec.pkg = bp.ImportPath - - for _, r := range parts[1] { - if !isDigit(r) { - return nil, fmt.Errorf("-offset %q: non-numeric offset", offsetFlag) - } - } - spec.offset, err = strconv.Atoi(parts[1]) - if err != nil { - return nil, fmt.Errorf("-offset %q: non-numeric offset", offsetFlag) - } - - // Parse the file and check there's an identifier at that offset. - fset := token.NewFileSet() - f, err := buildutil.ParseFile(fset, ctxt, nil, wd, spec.filename, parser.ParseComments) - if err != nil { - return nil, fmt.Errorf("-offset %q: cannot parse file: %s", offsetFlag, err) - } - - id := identAtOffset(fset, f, spec.offset) - if id == nil { - return nil, fmt.Errorf("-offset %q: no identifier at this position", offsetFlag) - } - - spec.fromName = id.Name - - return &spec, nil -} - -var wd = func() string { - wd, err := os.Getwd() - if err != nil { - panic("cannot get working directory: " + err.Error()) - } - return wd -}() - -// For source trees built with 'go build', the -from or -offset -// spec identifies exactly one initial 'from' object to rename , -// but certain proprietary build systems allow a single file to -// appear in multiple packages (e.g. the test package contains a -// copy of its library), so there may be multiple objects for -// the same source entity. - -func findFromObjects(iprog *loader.Program, spec *spec) ([]types.Object, error) { - if spec.filename != "" { - return findFromObjectsInFile(iprog, spec) - } - - // Search for objects defined in specified package. - - // TODO(adonovan): the iprog.ImportMap has an entry {"main": ...} - // for main packages, even though that's not an import path. - // Seems like a bug. - // - // pkg := iprog.ImportMap[spec.pkg] - // if pkg == nil { - // return fmt.Errorf("cannot find package %s", spec.pkg) // can't happen? - // } - // info := iprog.AllPackages[pkg] - - // Workaround: lookup by value. - var info *loader.PackageInfo - var pkg *types.Package - for pkg, info = range iprog.AllPackages { - if pkg.Path() == spec.pkg { - break - } - } - if info == nil { - return nil, fmt.Errorf("package %q was not loaded", spec.pkg) - } - - objects, err := findObjects(info, spec) - if err != nil { - return nil, err - } - if len(objects) > 1 { - // ambiguous "*" scope query - return nil, ambiguityError(iprog.Fset, objects) - } - return objects, nil -} - -func findFromObjectsInFile(iprog *loader.Program, spec *spec) ([]types.Object, error) { - var fromObjects []types.Object - for _, info := range iprog.AllPackages { - // restrict to specified filename - // NB: under certain proprietary build systems, a given - // filename may appear in multiple packages. - for _, f := range info.Files { - thisFile := iprog.Fset.File(f.Pos()) - if !sameFile(thisFile.Name(), spec.filename) { - continue - } - // This package contains the query file. - - if spec.offset != 0 { - // We cannot refactor generated files since position information is invalidated. - if generated(f, thisFile) { - return nil, fmt.Errorf("cannot rename identifiers in generated file containing DO NOT EDIT marker: %s", thisFile.Name()) - } - - // Search for a specific ident by file/offset. - id := identAtOffset(iprog.Fset, f, spec.offset) - if id == nil { - // can't happen? - return nil, fmt.Errorf("identifier not found") - } - obj := info.Uses[id] - if obj == nil { - obj = info.Defs[id] - if obj == nil { - // Ident without Object. - - // Package clause? - pos := thisFile.Pos(spec.offset) - _, path, _ := iprog.PathEnclosingInterval(pos, pos) - if len(path) == 2 { // [Ident File] - // TODO(adonovan): support this case. - return nil, fmt.Errorf("cannot rename %q: renaming package clauses is not yet supported", - path[1].(*ast.File).Name.Name) - } - - // Implicit y in "switch y := x.(type) {"? - if obj := typeSwitchVar(&info.Info, path); obj != nil { - return []types.Object{obj}, nil - } - - // Probably a type error. - return nil, fmt.Errorf("cannot find object for %q", id.Name) - } - } - if obj.Pkg() == nil { - return nil, fmt.Errorf("cannot rename predeclared identifiers (%s)", obj) - - } - - fromObjects = append(fromObjects, obj) - } else { - // do a package-wide query - objects, err := findObjects(info, spec) - if err != nil { - return nil, err - } - - // filter results: only objects defined in thisFile - var filtered []types.Object - for _, obj := range objects { - if iprog.Fset.File(obj.Pos()) == thisFile { - filtered = append(filtered, obj) - } - } - if len(filtered) == 0 { - return nil, fmt.Errorf("no object %q declared in file %s", - spec.fromName, spec.filename) - } else if len(filtered) > 1 { - return nil, ambiguityError(iprog.Fset, filtered) - } - fromObjects = append(fromObjects, filtered[0]) - } - break - } - } - if len(fromObjects) == 0 { - // can't happen? - return nil, fmt.Errorf("file %s was not part of the loaded program", spec.filename) - } - return fromObjects, nil -} - -func typeSwitchVar(info *types.Info, path []ast.Node) types.Object { - if len(path) > 3 { - // [Ident AssignStmt TypeSwitchStmt...] - if sw, ok := path[2].(*ast.TypeSwitchStmt); ok { - // choose the first case. - if len(sw.Body.List) > 0 { - obj := info.Implicits[sw.Body.List[0].(*ast.CaseClause)] - if obj != nil { - return obj - } - } - } - } - return nil -} - -// On success, findObjects returns the list of objects named -// spec.fromName matching the spec. On success, the result has exactly -// one element unless spec.searchFor!="", in which case it has at least one -// element. -// -func findObjects(info *loader.PackageInfo, spec *spec) ([]types.Object, error) { - if spec.pkgMember == "" { - if spec.searchFor == "" { - panic(spec) - } - objects := searchDefs(&info.Info, spec.searchFor) - if objects == nil { - return nil, fmt.Errorf("no object %q declared in package %q", - spec.searchFor, info.Pkg.Path()) - } - return objects, nil - } - - pkgMember := info.Pkg.Scope().Lookup(spec.pkgMember) - if pkgMember == nil { - return nil, fmt.Errorf("package %q has no member %q", - info.Pkg.Path(), spec.pkgMember) - } - - var searchFunc *types.Func - if spec.typeMember == "" { - // package member - if spec.searchFor == "" { - return []types.Object{pkgMember}, nil - } - - // Search within pkgMember, which must be a function. - searchFunc, _ = pkgMember.(*types.Func) - if searchFunc == nil { - return nil, fmt.Errorf("cannot search for %q within %s %q", - spec.searchFor, objectKind(pkgMember), pkgMember) - } - } else { - // field/method of type - // e.g. (encoding/json.Decoder).Decode - // or ::x within it. - - tName, _ := pkgMember.(*types.TypeName) - if tName == nil { - return nil, fmt.Errorf("%s.%s is a %s, not a type", - info.Pkg.Path(), pkgMember.Name(), objectKind(pkgMember)) - } - - // search within named type. - obj, _, _ := types.LookupFieldOrMethod(tName.Type(), true, info.Pkg, spec.typeMember) - if obj == nil { - return nil, fmt.Errorf("cannot find field or method %q of %s %s.%s", - spec.typeMember, typeKind(tName.Type()), info.Pkg.Path(), tName.Name()) - } - - if spec.searchFor == "" { - // If it is an embedded field, return the type of the field. - if v, ok := obj.(*types.Var); ok && v.Anonymous() { - switch t := v.Type().(type) { - case *types.Pointer: - return []types.Object{t.Elem().(*types.Named).Obj()}, nil - case *types.Named: - return []types.Object{t.Obj()}, nil - } - } - return []types.Object{obj}, nil - } - - searchFunc, _ = obj.(*types.Func) - if searchFunc == nil { - return nil, fmt.Errorf("cannot search for local name %q within %s (%s.%s).%s; need a function", - spec.searchFor, objectKind(obj), info.Pkg.Path(), tName.Name(), - obj.Name()) - } - if isInterface(tName.Type()) { - return nil, fmt.Errorf("cannot search for local name %q within abstract method (%s.%s).%s", - spec.searchFor, info.Pkg.Path(), tName.Name(), searchFunc.Name()) - } - } - - // -- search within function or method -- - - decl := funcDecl(info, searchFunc) - if decl == nil { - return nil, fmt.Errorf("cannot find syntax for %s", searchFunc) // can't happen? - } - - var objects []types.Object - for _, obj := range searchDefs(&info.Info, spec.searchFor) { - // We use positions, not scopes, to determine whether - // the obj is within searchFunc. This is clumsy, but the - // alternative, using the types.Scope tree, doesn't - // account for non-lexical objects like fields and - // interface methods. - if decl.Pos() <= obj.Pos() && obj.Pos() < decl.End() && obj != searchFunc { - objects = append(objects, obj) - } - } - if objects == nil { - return nil, fmt.Errorf("no local definition of %q within %s", - spec.searchFor, searchFunc) - } - return objects, nil -} - -func funcDecl(info *loader.PackageInfo, fn *types.Func) *ast.FuncDecl { - for _, f := range info.Files { - for _, d := range f.Decls { - if d, ok := d.(*ast.FuncDecl); ok && info.Defs[d.Name] == fn { - return d - } - } - } - return nil -} - -func searchDefs(info *types.Info, name string) []types.Object { - var objects []types.Object - for id, obj := range info.Defs { - if obj == nil { - // e.g. blank ident. - // TODO(adonovan): but also implicit y in - // switch y := x.(type) - // Needs some thought. - continue - } - if id.Name == name { - objects = append(objects, obj) - } - } - return objects -} - -func identAtOffset(fset *token.FileSet, f *ast.File, offset int) *ast.Ident { - var found *ast.Ident - ast.Inspect(f, func(n ast.Node) bool { - if id, ok := n.(*ast.Ident); ok { - idpos := fset.Position(id.Pos()).Offset - if idpos <= offset && offset < idpos+len(id.Name) { - found = id - } - } - return found == nil // keep traversing only until found - }) - return found -} - -// ambiguityError returns an error describing an ambiguous "*" scope query. -func ambiguityError(fset *token.FileSet, objects []types.Object) error { - var buf bytes.Buffer - for i, obj := range objects { - if i > 0 { - buf.WriteString(", ") - } - posn := fset.Position(obj.Pos()) - fmt.Fprintf(&buf, "%s at %s:%d:%d", - objectKind(obj), filepath.Base(posn.Filename), posn.Line, posn.Column) - } - return fmt.Errorf("ambiguous specifier %s matches %s", - objects[0].Name(), buf.String()) -} - -// Matches cgo generated comment as well as the proposed standard: -// https://golang.org/s/generatedcode -var generatedRx = regexp.MustCompile(`// .*DO NOT EDIT\.?`) - -// generated reports whether ast.File is a generated file. -func generated(f *ast.File, tokenFile *token.File) bool { - - // Iterate over the comments in the file - for _, commentGroup := range f.Comments { - for _, comment := range commentGroup.List { - if matched := generatedRx.MatchString(comment.Text); matched { - // Check if comment is at the beginning of the line in source - if pos := tokenFile.Position(comment.Slash); pos.Column == 1 { - return true - } - } - } - } - return false -} diff --git a/vendor/golang.org/x/tools/refactor/rename/util.go b/vendor/golang.org/x/tools/refactor/rename/util.go deleted file mode 100644 index e8f8d74..0000000 --- a/vendor/golang.org/x/tools/refactor/rename/util.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package rename - -import ( - "go/ast" - "go/token" - "go/types" - "os" - "path/filepath" - "reflect" - "runtime" - "strings" - "unicode" - - "golang.org/x/tools/go/ast/astutil" -) - -func objectKind(obj types.Object) string { - switch obj := obj.(type) { - case *types.PkgName: - return "imported package name" - case *types.TypeName: - return "type" - case *types.Var: - if obj.IsField() { - return "field" - } - case *types.Func: - if obj.Type().(*types.Signature).Recv() != nil { - return "method" - } - } - // label, func, var, const - return strings.ToLower(strings.TrimPrefix(reflect.TypeOf(obj).String(), "*types.")) -} - -func typeKind(T types.Type) string { - return strings.ToLower(strings.TrimPrefix(reflect.TypeOf(T.Underlying()).String(), "*types.")) -} - -// NB: for renamings, blank is not considered valid. -func isValidIdentifier(id string) bool { - if id == "" || id == "_" { - return false - } - for i, r := range id { - if !isLetter(r) && (i == 0 || !isDigit(r)) { - return false - } - } - return token.Lookup(id) == token.IDENT -} - -// isLocal reports whether obj is local to some function. -// Precondition: not a struct field or interface method. -func isLocal(obj types.Object) bool { - // [... 5=stmt 4=func 3=file 2=pkg 1=universe] - var depth int - for scope := obj.Parent(); scope != nil; scope = scope.Parent() { - depth++ - } - return depth >= 4 -} - -func isPackageLevel(obj types.Object) bool { - return obj.Pkg().Scope().Lookup(obj.Name()) == obj -} - -// -- Plundered from go/scanner: --------------------------------------- - -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// -- Plundered from golang.org/x/tools/cmd/guru ----------------- - -// sameFile returns true if x and y have the same basename and denote -// the same file. -// -func sameFile(x, y string) bool { - if runtime.GOOS == "windows" { - x = filepath.ToSlash(x) - y = filepath.ToSlash(y) - } - if x == y { - return true - } - if filepath.Base(x) == filepath.Base(y) { // (optimisation) - if xi, err := os.Stat(x); err == nil { - if yi, err := os.Stat(y); err == nil { - return os.SameFile(xi, yi) - } - } - } - return false -} - -func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) } diff --git a/vendor/golang.org/x/tools/refactor/satisfy/find.go b/vendor/golang.org/x/tools/refactor/satisfy/find.go deleted file mode 100644 index 9b365b9..0000000 --- a/vendor/golang.org/x/tools/refactor/satisfy/find.go +++ /dev/null @@ -1,705 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package satisfy inspects the type-checked ASTs of Go packages and -// reports the set of discovered type constraints of the form (lhs, rhs -// Type) where lhs is a non-trivial interface, rhs satisfies this -// interface, and this fact is necessary for the package to be -// well-typed. -// -// THIS PACKAGE IS EXPERIMENTAL AND MAY CHANGE AT ANY TIME. -// -// It is provided only for the gorename tool. Ideally this -// functionality will become part of the type-checker in due course, -// since it is computing it anyway, and it is robust for ill-typed -// inputs, which this package is not. -// -package satisfy // import "golang.org/x/tools/refactor/satisfy" - -// NOTES: -// -// We don't care about numeric conversions, so we don't descend into -// types or constant expressions. This is unsound because -// constant expressions can contain arbitrary statements, e.g. -// const x = len([1]func(){func() { -// ... -// }}) -// -// TODO(adonovan): make this robust against ill-typed input. -// Or move it into the type-checker. -// -// Assignability conversions are possible in the following places: -// - in assignments y = x, y := x, var y = x. -// - from call argument types to formal parameter types -// - in append and delete calls -// - from return operands to result parameter types -// - in composite literal T{k:v}, from k and v to T's field/element/key type -// - in map[key] from key to the map's key type -// - in comparisons x==y and switch x { case y: }. -// - in explicit conversions T(x) -// - in sends ch <- x, from x to the channel element type -// - in type assertions x.(T) and switch x.(type) { case T: } -// -// The results of this pass provide information equivalent to the -// ssa.MakeInterface and ssa.ChangeInterface instructions. - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" - - "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/go/types/typeutil" -) - -// A Constraint records the fact that the RHS type does and must -// satisify the LHS type, which is an interface. -// The names are suggestive of an assignment statement LHS = RHS. -type Constraint struct { - LHS, RHS types.Type -} - -// A Finder inspects the type-checked ASTs of Go packages and -// accumulates the set of type constraints (x, y) such that x is -// assignable to y, y is an interface, and both x and y have methods. -// -// In other words, it returns the subset of the "implements" relation -// that is checked during compilation of a package. Refactoring tools -// will need to preserve at least this part of the relation to ensure -// continued compilation. -// -type Finder struct { - Result map[Constraint]bool - msetcache typeutil.MethodSetCache - - // per-Find state - info *types.Info - sig *types.Signature -} - -// Find inspects a single package, populating Result with its pairs of -// constrained types. -// -// The result is non-canonical and thus may contain duplicates (but this -// tends to preserves names of interface types better). -// -// The package must be free of type errors, and -// info.{Defs,Uses,Selections,Types} must have been populated by the -// type-checker. -// -func (f *Finder) Find(info *types.Info, files []*ast.File) { - if f.Result == nil { - f.Result = make(map[Constraint]bool) - } - - f.info = info - for _, file := range files { - for _, d := range file.Decls { - switch d := d.(type) { - case *ast.GenDecl: - if d.Tok == token.VAR { // ignore consts - for _, spec := range d.Specs { - f.valueSpec(spec.(*ast.ValueSpec)) - } - } - - case *ast.FuncDecl: - if d.Body != nil { - f.sig = f.info.Defs[d.Name].Type().(*types.Signature) - f.stmt(d.Body) - f.sig = nil - } - } - } - } - f.info = nil -} - -var ( - tInvalid = types.Typ[types.Invalid] - tUntypedBool = types.Typ[types.UntypedBool] - tUntypedNil = types.Typ[types.UntypedNil] -) - -// exprN visits an expression in a multi-value context. -func (f *Finder) exprN(e ast.Expr) types.Type { - typ := f.info.Types[e].Type.(*types.Tuple) - switch e := e.(type) { - case *ast.ParenExpr: - return f.exprN(e.X) - - case *ast.CallExpr: - // x, err := f(args) - sig := f.expr(e.Fun).Underlying().(*types.Signature) - f.call(sig, e.Args) - - case *ast.IndexExpr: - // y, ok := x[i] - x := f.expr(e.X) - f.assign(f.expr(e.Index), x.Underlying().(*types.Map).Key()) - - case *ast.TypeAssertExpr: - // y, ok := x.(T) - f.typeAssert(f.expr(e.X), typ.At(0).Type()) - - case *ast.UnaryExpr: // must be receive <- - // y, ok := <-x - f.expr(e.X) - - default: - panic(e) - } - return typ -} - -func (f *Finder) call(sig *types.Signature, args []ast.Expr) { - if len(args) == 0 { - return - } - - // Ellipsis call? e.g. f(x, y, z...) - if _, ok := args[len(args)-1].(*ast.Ellipsis); ok { - for i, arg := range args { - // The final arg is a slice, and so is the final param. - f.assign(sig.Params().At(i).Type(), f.expr(arg)) - } - return - } - - var argtypes []types.Type - - // Gather the effective actual parameter types. - if tuple, ok := f.info.Types[args[0]].Type.(*types.Tuple); ok { - // f(g()) call where g has multiple results? - f.expr(args[0]) - // unpack the tuple - for i := 0; i < tuple.Len(); i++ { - argtypes = append(argtypes, tuple.At(i).Type()) - } - } else { - for _, arg := range args { - argtypes = append(argtypes, f.expr(arg)) - } - } - - // Assign the actuals to the formals. - if !sig.Variadic() { - for i, argtype := range argtypes { - f.assign(sig.Params().At(i).Type(), argtype) - } - } else { - // The first n-1 parameters are assigned normally. - nnormals := sig.Params().Len() - 1 - for i, argtype := range argtypes[:nnormals] { - f.assign(sig.Params().At(i).Type(), argtype) - } - // Remaining args are assigned to elements of varargs slice. - tElem := sig.Params().At(nnormals).Type().(*types.Slice).Elem() - for i := nnormals; i < len(argtypes); i++ { - f.assign(tElem, argtypes[i]) - } - } -} - -func (f *Finder) builtin(obj *types.Builtin, sig *types.Signature, args []ast.Expr, T types.Type) types.Type { - switch obj.Name() { - case "make", "new": - // skip the type operand - for _, arg := range args[1:] { - f.expr(arg) - } - - case "append": - s := f.expr(args[0]) - if _, ok := args[len(args)-1].(*ast.Ellipsis); ok && len(args) == 2 { - // append(x, y...) including append([]byte, "foo"...) - f.expr(args[1]) - } else { - // append(x, y, z) - tElem := s.Underlying().(*types.Slice).Elem() - for _, arg := range args[1:] { - f.assign(tElem, f.expr(arg)) - } - } - - case "delete": - m := f.expr(args[0]) - k := f.expr(args[1]) - f.assign(m.Underlying().(*types.Map).Key(), k) - - default: - // ordinary call - f.call(sig, args) - } - - return T -} - -func (f *Finder) extract(tuple types.Type, i int) types.Type { - if tuple, ok := tuple.(*types.Tuple); ok && i < tuple.Len() { - return tuple.At(i).Type() - } - return tInvalid -} - -func (f *Finder) valueSpec(spec *ast.ValueSpec) { - var T types.Type - if spec.Type != nil { - T = f.info.Types[spec.Type].Type - } - switch len(spec.Values) { - case len(spec.Names): // e.g. var x, y = f(), g() - for _, value := range spec.Values { - v := f.expr(value) - if T != nil { - f.assign(T, v) - } - } - - case 1: // e.g. var x, y = f() - tuple := f.exprN(spec.Values[0]) - for i := range spec.Names { - if T != nil { - f.assign(T, f.extract(tuple, i)) - } - } - } -} - -// assign records pairs of distinct types that are related by -// assignability, where the left-hand side is an interface and both -// sides have methods. -// -// It should be called for all assignability checks, type assertions, -// explicit conversions and comparisons between two types, unless the -// types are uninteresting (e.g. lhs is a concrete type, or the empty -// interface; rhs has no methods). -// -func (f *Finder) assign(lhs, rhs types.Type) { - if types.Identical(lhs, rhs) { - return - } - if !isInterface(lhs) { - return - } - - if f.msetcache.MethodSet(lhs).Len() == 0 { - return - } - if f.msetcache.MethodSet(rhs).Len() == 0 { - return - } - // record the pair - f.Result[Constraint{lhs, rhs}] = true -} - -// typeAssert must be called for each type assertion x.(T) where x has -// interface type I. -func (f *Finder) typeAssert(I, T types.Type) { - // Type assertions are slightly subtle, because they are allowed - // to be "impossible", e.g. - // - // var x interface{f()} - // _ = x.(interface{f()int}) // legal - // - // (In hindsight, the language spec should probably not have - // allowed this, but it's too late to fix now.) - // - // This means that a type assert from I to T isn't exactly a - // constraint that T is assignable to I, but for a refactoring - // tool it is a conditional constraint that, if T is assignable - // to I before a refactoring, it should remain so after. - - if types.AssignableTo(T, I) { - f.assign(I, T) - } -} - -// compare must be called for each comparison x==y. -func (f *Finder) compare(x, y types.Type) { - if types.AssignableTo(x, y) { - f.assign(y, x) - } else if types.AssignableTo(y, x) { - f.assign(x, y) - } -} - -// expr visits a true expression (not a type or defining ident) -// and returns its type. -func (f *Finder) expr(e ast.Expr) types.Type { - tv := f.info.Types[e] - if tv.Value != nil { - return tv.Type // prune the descent for constants - } - - // tv.Type may be nil for an ast.Ident. - - switch e := e.(type) { - case *ast.BadExpr, *ast.BasicLit: - // no-op - - case *ast.Ident: - // (referring idents only) - if obj, ok := f.info.Uses[e]; ok { - return obj.Type() - } - if e.Name == "_" { // e.g. "for _ = range x" - return tInvalid - } - panic("undefined ident: " + e.Name) - - case *ast.Ellipsis: - if e.Elt != nil { - f.expr(e.Elt) - } - - case *ast.FuncLit: - saved := f.sig - f.sig = tv.Type.(*types.Signature) - f.stmt(e.Body) - f.sig = saved - - case *ast.CompositeLit: - switch T := deref(tv.Type).Underlying().(type) { - case *types.Struct: - for i, elem := range e.Elts { - if kv, ok := elem.(*ast.KeyValueExpr); ok { - f.assign(f.info.Uses[kv.Key.(*ast.Ident)].Type(), f.expr(kv.Value)) - } else { - f.assign(T.Field(i).Type(), f.expr(elem)) - } - } - - case *types.Map: - for _, elem := range e.Elts { - elem := elem.(*ast.KeyValueExpr) - f.assign(T.Key(), f.expr(elem.Key)) - f.assign(T.Elem(), f.expr(elem.Value)) - } - - case *types.Array, *types.Slice: - tElem := T.(interface { - Elem() types.Type - }).Elem() - for _, elem := range e.Elts { - if kv, ok := elem.(*ast.KeyValueExpr); ok { - // ignore the key - f.assign(tElem, f.expr(kv.Value)) - } else { - f.assign(tElem, f.expr(elem)) - } - } - - default: - panic("unexpected composite literal type: " + tv.Type.String()) - } - - case *ast.ParenExpr: - f.expr(e.X) - - case *ast.SelectorExpr: - if _, ok := f.info.Selections[e]; ok { - f.expr(e.X) // selection - } else { - return f.info.Uses[e.Sel].Type() // qualified identifier - } - - case *ast.IndexExpr: - x := f.expr(e.X) - i := f.expr(e.Index) - if ux, ok := x.Underlying().(*types.Map); ok { - f.assign(ux.Key(), i) - } - - case *ast.SliceExpr: - f.expr(e.X) - if e.Low != nil { - f.expr(e.Low) - } - if e.High != nil { - f.expr(e.High) - } - if e.Max != nil { - f.expr(e.Max) - } - - case *ast.TypeAssertExpr: - x := f.expr(e.X) - f.typeAssert(x, f.info.Types[e.Type].Type) - - case *ast.CallExpr: - if tvFun := f.info.Types[e.Fun]; tvFun.IsType() { - // conversion - arg0 := f.expr(e.Args[0]) - f.assign(tvFun.Type, arg0) - } else { - // function call - if id, ok := unparen(e.Fun).(*ast.Ident); ok { - if obj, ok := f.info.Uses[id].(*types.Builtin); ok { - sig := f.info.Types[id].Type.(*types.Signature) - return f.builtin(obj, sig, e.Args, tv.Type) - } - } - // ordinary call - f.call(f.expr(e.Fun).Underlying().(*types.Signature), e.Args) - } - - case *ast.StarExpr: - f.expr(e.X) - - case *ast.UnaryExpr: - f.expr(e.X) - - case *ast.BinaryExpr: - x := f.expr(e.X) - y := f.expr(e.Y) - if e.Op == token.EQL || e.Op == token.NEQ { - f.compare(x, y) - } - - case *ast.KeyValueExpr: - f.expr(e.Key) - f.expr(e.Value) - - case *ast.ArrayType, - *ast.StructType, - *ast.FuncType, - *ast.InterfaceType, - *ast.MapType, - *ast.ChanType: - panic(e) - } - - if tv.Type == nil { - panic(fmt.Sprintf("no type for %T", e)) - } - - return tv.Type -} - -func (f *Finder) stmt(s ast.Stmt) { - switch s := s.(type) { - case *ast.BadStmt, - *ast.EmptyStmt, - *ast.BranchStmt: - // no-op - - case *ast.DeclStmt: - d := s.Decl.(*ast.GenDecl) - if d.Tok == token.VAR { // ignore consts - for _, spec := range d.Specs { - f.valueSpec(spec.(*ast.ValueSpec)) - } - } - - case *ast.LabeledStmt: - f.stmt(s.Stmt) - - case *ast.ExprStmt: - f.expr(s.X) - - case *ast.SendStmt: - ch := f.expr(s.Chan) - val := f.expr(s.Value) - f.assign(ch.Underlying().(*types.Chan).Elem(), val) - - case *ast.IncDecStmt: - f.expr(s.X) - - case *ast.AssignStmt: - switch s.Tok { - case token.ASSIGN, token.DEFINE: - // y := x or y = x - var rhsTuple types.Type - if len(s.Lhs) != len(s.Rhs) { - rhsTuple = f.exprN(s.Rhs[0]) - } - for i := range s.Lhs { - var lhs, rhs types.Type - if rhsTuple == nil { - rhs = f.expr(s.Rhs[i]) // 1:1 assignment - } else { - rhs = f.extract(rhsTuple, i) // n:1 assignment - } - - if id, ok := s.Lhs[i].(*ast.Ident); ok { - if id.Name != "_" { - if obj, ok := f.info.Defs[id]; ok { - lhs = obj.Type() // definition - } - } - } - if lhs == nil { - lhs = f.expr(s.Lhs[i]) // assignment - } - f.assign(lhs, rhs) - } - - default: - // y op= x - f.expr(s.Lhs[0]) - f.expr(s.Rhs[0]) - } - - case *ast.GoStmt: - f.expr(s.Call) - - case *ast.DeferStmt: - f.expr(s.Call) - - case *ast.ReturnStmt: - formals := f.sig.Results() - switch len(s.Results) { - case formals.Len(): // 1:1 - for i, result := range s.Results { - f.assign(formals.At(i).Type(), f.expr(result)) - } - - case 1: // n:1 - tuple := f.exprN(s.Results[0]) - for i := 0; i < formals.Len(); i++ { - f.assign(formals.At(i).Type(), f.extract(tuple, i)) - } - } - - case *ast.SelectStmt: - f.stmt(s.Body) - - case *ast.BlockStmt: - for _, s := range s.List { - f.stmt(s) - } - - case *ast.IfStmt: - if s.Init != nil { - f.stmt(s.Init) - } - f.expr(s.Cond) - f.stmt(s.Body) - if s.Else != nil { - f.stmt(s.Else) - } - - case *ast.SwitchStmt: - if s.Init != nil { - f.stmt(s.Init) - } - var tag types.Type = tUntypedBool - if s.Tag != nil { - tag = f.expr(s.Tag) - } - for _, cc := range s.Body.List { - cc := cc.(*ast.CaseClause) - for _, cond := range cc.List { - f.compare(tag, f.info.Types[cond].Type) - } - for _, s := range cc.Body { - f.stmt(s) - } - } - - case *ast.TypeSwitchStmt: - if s.Init != nil { - f.stmt(s.Init) - } - var I types.Type - switch ass := s.Assign.(type) { - case *ast.ExprStmt: // x.(type) - I = f.expr(unparen(ass.X).(*ast.TypeAssertExpr).X) - case *ast.AssignStmt: // y := x.(type) - I = f.expr(unparen(ass.Rhs[0]).(*ast.TypeAssertExpr).X) - } - for _, cc := range s.Body.List { - cc := cc.(*ast.CaseClause) - for _, cond := range cc.List { - tCase := f.info.Types[cond].Type - if tCase != tUntypedNil { - f.typeAssert(I, tCase) - } - } - for _, s := range cc.Body { - f.stmt(s) - } - } - - case *ast.CommClause: - if s.Comm != nil { - f.stmt(s.Comm) - } - for _, s := range s.Body { - f.stmt(s) - } - - case *ast.ForStmt: - if s.Init != nil { - f.stmt(s.Init) - } - if s.Cond != nil { - f.expr(s.Cond) - } - if s.Post != nil { - f.stmt(s.Post) - } - f.stmt(s.Body) - - case *ast.RangeStmt: - x := f.expr(s.X) - // No conversions are involved when Tok==DEFINE. - if s.Tok == token.ASSIGN { - if s.Key != nil { - k := f.expr(s.Key) - var xelem types.Type - // keys of array, *array, slice, string aren't interesting - switch ux := x.Underlying().(type) { - case *types.Chan: - xelem = ux.Elem() - case *types.Map: - xelem = ux.Key() - } - if xelem != nil { - f.assign(xelem, k) - } - } - if s.Value != nil { - val := f.expr(s.Value) - var xelem types.Type - // values of strings aren't interesting - switch ux := x.Underlying().(type) { - case *types.Array: - xelem = ux.Elem() - case *types.Chan: - xelem = ux.Elem() - case *types.Map: - xelem = ux.Elem() - case *types.Pointer: // *array - xelem = deref(ux).(*types.Array).Elem() - case *types.Slice: - xelem = ux.Elem() - } - if xelem != nil { - f.assign(xelem, val) - } - } - } - f.stmt(s.Body) - - default: - panic(s) - } -} - -// -- Plundered from golang.org/x/tools/go/ssa ----------------- - -// deref returns a pointer's element type; otherwise it returns typ. -func deref(typ types.Type) types.Type { - if p, ok := typ.Underlying().(*types.Pointer); ok { - return p.Elem() - } - return typ -} - -func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) } - -func isInterface(T types.Type) bool { return types.IsInterface(T) } diff --git a/vendor/golang.org/x/tools/third_party/moduleloader/LICENSE b/vendor/golang.org/x/tools/third_party/moduleloader/LICENSE deleted file mode 100644 index 1723a22..0000000 --- a/vendor/golang.org/x/tools/third_party/moduleloader/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2013-2016 Guy Bedford, Luke Hoban, Addy Osmani - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/golang.org/x/tools/third_party/typescript/LICENSE b/vendor/golang.org/x/tools/third_party/typescript/LICENSE deleted file mode 100644 index e7259f8..0000000 --- a/vendor/golang.org/x/tools/third_party/typescript/LICENSE +++ /dev/null @@ -1,55 +0,0 @@ -Apache License - -Version 2.0, January 2004 - -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. - -"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of this License; and - -You must cause any modified files to carry prominent notices stating that You changed the files; and - -You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and - -If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/vendor/golang.org/x/tools/third_party/webcomponents/LICENSE b/vendor/golang.org/x/tools/third_party/webcomponents/LICENSE deleted file mode 100644 index e648283..0000000 --- a/vendor/golang.org/x/tools/third_party/webcomponents/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2015 The Polymer Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE deleted file mode 100644 index d645695..0000000 --- a/vendor/google.golang.org/appengine/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md deleted file mode 100644 index b6b11d9..0000000 --- a/vendor/google.golang.org/appengine/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Go App Engine packages - -[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) - -This repository supports the Go runtime on App Engine, -including both the standard App Engine and the -"App Engine flexible environment" (formerly known as "Managed VMs"). -It provides APIs for interacting with App Engine services. -Its canonical import path is `google.golang.org/appengine`. - -See https://cloud.google.com/appengine/docs/go/ -for more information. - -File issue reports and feature requests on the [Google App Engine issue -tracker](https://code.google.com/p/googleappengine/issues/entry?template=Go%20defect). - -## Directory structure -The top level directory of this repository is the `appengine` package. It -contains the -basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API -packages are in subdirectories (e.g. `datastore`). - -There is an `internal` subdirectory that contains service protocol buffers, -plus packages required for connectivity to make API calls. App Engine apps -should not directly import any package under `internal`. - -## Updating a Go App Engine app - -This section describes how to update an older Go App Engine app to use -these packages. A provided tool, `aefix`, can help automate steps 2 and 3 -(run `go get google.golang.org/appengine/cmd/aefix` to install it), but -read the details below since `aefix` can't perform all the changes. - -### 1. Update YAML files (App Engine flexible environment / Managed VMs only) - -The `app.yaml` file (and YAML files for modules) should have these new lines added: -``` -vm: true -``` -See https://cloud.google.com/appengine/docs/go/modules/#Go_Instance_scaling_and_class for details. - -### 2. Update import paths - -The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`. -You will need to update your code to use import paths starting with that; for instance, -code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`. - -### 3. Update code using deprecated, removed or modified APIs - -Most App Engine services are available with exactly the same API. -A few APIs were cleaned up, and some are not available yet. -This list summarises the differences: - -* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. -* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. -* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. -* `appengine.Datacenter` now takes a `context.Context` argument. -* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels. -* `delay.Call` now returns an error. -* `search.FieldLoadSaver` now handles document metadata. -* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the - `context.Context` instead. -* `aetest` no longer declares its own Context type, and uses the standard one instead. -* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been - deprecated and unused for a long time. -* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature. - Use `appengine.ModuleHostname`and `appengine.ModuleName` instead. -* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated. - Use [Google Cloud Storage](https://godoc.org/cloud.google.com/go/storage) if the - feature you require is not present in the new - [blobstore package](https://google.golang.org/appengine/blobstore). -* `appengine/socket` is not required on App Engine flexible environment / Managed VMs. - Use the standard `net` package instead. diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go deleted file mode 100644 index 475cf2e..0000000 --- a/vendor/google.golang.org/appengine/appengine.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package appengine provides basic functionality for Google App Engine. -// -// For more information on how to write Go apps for Google App Engine, see: -// https://cloud.google.com/appengine/docs/go/ -package appengine // import "google.golang.org/appengine" - -import ( - "net/http" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// The gophers party all night; the rabbits provide the beats. - -// Main is the principal entry point for an app running in App Engine. -// -// On App Engine Flexible it installs a trivial health checker if one isn't -// already registered, and starts listening on port 8080 (overridden by the -// $PORT environment variable). -// -// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests -// for details on how to do your own health checking. -// -// Main is not yet supported on App Engine Standard. -// -// Main never returns. -// -// Main is designed so that the app's main package looks like this: -// -// package main -// -// import ( -// "google.golang.org/appengine" -// -// _ "myapp/package0" -// _ "myapp/package1" -// ) -// -// func main() { -// appengine.Main() -// } -// -// The "myapp/packageX" packages are expected to register HTTP handlers -// in their init functions. -func Main() { - internal.Main() -} - -// IsDevAppServer reports whether the App Engine app is running in the -// development App Server. -func IsDevAppServer() bool { - return internal.IsDevAppServer() -} - -// NewContext returns a context for an in-flight HTTP request. -// This function is cheap. -func NewContext(req *http.Request) context.Context { - return WithContext(context.Background(), req) -} - -// WithContext returns a copy of the parent context -// and associates it with an in-flight HTTP request. -// This function is cheap. -func WithContext(parent context.Context, req *http.Request) context.Context { - return internal.WithContext(parent, req) -} - -// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call. - -// BlobKey is a key for a blobstore blob. -// -// Conceptually, this type belongs in the blobstore package, but it lives in -// the appengine package to avoid a circular dependency: blobstore depends on -// datastore, and datastore needs to refer to the BlobKey type. -type BlobKey string - -// GeoPoint represents a location as latitude/longitude in degrees. -type GeoPoint struct { - Lat, Lng float64 -} - -// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. -func (g GeoPoint) Valid() bool { - return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 -} - -// APICallFunc defines a function type for handling an API call. -// See WithCallOverride. -type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error - -// WithAPICallFunc returns a copy of the parent context -// that will cause API calls to invoke f instead of their normal operation. -// -// This is intended for advanced users only. -func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context { - return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f)) -} - -// APICall performs an API call. -// -// This is not intended for general use; it is exported for use in conjunction -// with WithAPICallFunc. -func APICall(ctx context.Context, service, method string, in, out proto.Message) error { - return internal.Call(ctx, service, method, in, out) -} diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go deleted file mode 100644 index f4b645a..0000000 --- a/vendor/google.golang.org/appengine/appengine_vm.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package appengine - -import ( - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// BackgroundContext returns a context not associated with a request. -// This should only be used when not servicing a request. -// This only works in App Engine "flexible environment". -func BackgroundContext() context.Context { - return internal.BackgroundContext() -} diff --git a/vendor/google.golang.org/appengine/datastore/datastore.go b/vendor/google.golang.org/appengine/datastore/datastore.go deleted file mode 100644 index 9422e41..0000000 --- a/vendor/google.golang.org/appengine/datastore/datastore.go +++ /dev/null @@ -1,406 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package datastore - -import ( - "errors" - "fmt" - "reflect" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine" - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/datastore" -) - -var ( - // ErrInvalidEntityType is returned when functions like Get or Next are - // passed a dst or src argument of invalid type. - ErrInvalidEntityType = errors.New("datastore: invalid entity type") - // ErrInvalidKey is returned when an invalid key is presented. - ErrInvalidKey = errors.New("datastore: invalid key") - // ErrNoSuchEntity is returned when no entity was found for a given key. - ErrNoSuchEntity = errors.New("datastore: no such entity") -) - -// ErrFieldMismatch is returned when a field is to be loaded into a different -// type than the one it was stored from, or when a field is missing or -// unexported in the destination struct. -// StructType is the type of the struct pointed to by the destination argument -// passed to Get or to Iterator.Next. -type ErrFieldMismatch struct { - StructType reflect.Type - FieldName string - Reason string -} - -func (e *ErrFieldMismatch) Error() string { - return fmt.Sprintf("datastore: cannot load field %q into a %q: %s", - e.FieldName, e.StructType, e.Reason) -} - -// protoToKey converts a Reference proto to a *Key. -func protoToKey(r *pb.Reference) (k *Key, err error) { - appID := r.GetApp() - namespace := r.GetNameSpace() - for _, e := range r.Path.Element { - k = &Key{ - kind: e.GetType(), - stringID: e.GetName(), - intID: e.GetId(), - parent: k, - appID: appID, - namespace: namespace, - } - if !k.valid() { - return nil, ErrInvalidKey - } - } - return -} - -// keyToProto converts a *Key to a Reference proto. -func keyToProto(defaultAppID string, k *Key) *pb.Reference { - appID := k.appID - if appID == "" { - appID = defaultAppID - } - n := 0 - for i := k; i != nil; i = i.parent { - n++ - } - e := make([]*pb.Path_Element, n) - for i := k; i != nil; i = i.parent { - n-- - e[n] = &pb.Path_Element{ - Type: &i.kind, - } - // At most one of {Name,Id} should be set. - // Neither will be set for incomplete keys. - if i.stringID != "" { - e[n].Name = &i.stringID - } else if i.intID != 0 { - e[n].Id = &i.intID - } - } - var namespace *string - if k.namespace != "" { - namespace = proto.String(k.namespace) - } - return &pb.Reference{ - App: proto.String(appID), - NameSpace: namespace, - Path: &pb.Path{ - Element: e, - }, - } -} - -// multiKeyToProto is a batch version of keyToProto. -func multiKeyToProto(appID string, key []*Key) []*pb.Reference { - ret := make([]*pb.Reference, len(key)) - for i, k := range key { - ret[i] = keyToProto(appID, k) - } - return ret -} - -// multiValid is a batch version of Key.valid. It returns an error, not a -// []bool. -func multiValid(key []*Key) error { - invalid := false - for _, k := range key { - if !k.valid() { - invalid = true - break - } - } - if !invalid { - return nil - } - err := make(appengine.MultiError, len(key)) - for i, k := range key { - if !k.valid() { - err[i] = ErrInvalidKey - } - } - return err -} - -// It's unfortunate that the two semantically equivalent concepts pb.Reference -// and pb.PropertyValue_ReferenceValue aren't the same type. For example, the -// two have different protobuf field numbers. - -// referenceValueToKey is the same as protoToKey except the input is a -// PropertyValue_ReferenceValue instead of a Reference. -func referenceValueToKey(r *pb.PropertyValue_ReferenceValue) (k *Key, err error) { - appID := r.GetApp() - namespace := r.GetNameSpace() - for _, e := range r.Pathelement { - k = &Key{ - kind: e.GetType(), - stringID: e.GetName(), - intID: e.GetId(), - parent: k, - appID: appID, - namespace: namespace, - } - if !k.valid() { - return nil, ErrInvalidKey - } - } - return -} - -// keyToReferenceValue is the same as keyToProto except the output is a -// PropertyValue_ReferenceValue instead of a Reference. -func keyToReferenceValue(defaultAppID string, k *Key) *pb.PropertyValue_ReferenceValue { - ref := keyToProto(defaultAppID, k) - pe := make([]*pb.PropertyValue_ReferenceValue_PathElement, len(ref.Path.Element)) - for i, e := range ref.Path.Element { - pe[i] = &pb.PropertyValue_ReferenceValue_PathElement{ - Type: e.Type, - Id: e.Id, - Name: e.Name, - } - } - return &pb.PropertyValue_ReferenceValue{ - App: ref.App, - NameSpace: ref.NameSpace, - Pathelement: pe, - } -} - -type multiArgType int - -const ( - multiArgTypeInvalid multiArgType = iota - multiArgTypePropertyLoadSaver - multiArgTypeStruct - multiArgTypeStructPtr - multiArgTypeInterface -) - -// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct -// type S, for some interface type I, or some non-interface non-pointer type P -// such that P or *P implements PropertyLoadSaver. -// -// It returns what category the slice's elements are, and the reflect.Type -// that represents S, I or P. -// -// As a special case, PropertyList is an invalid type for v. -func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { - if v.Kind() != reflect.Slice { - return multiArgTypeInvalid, nil - } - if v.Type() == typeOfPropertyList { - return multiArgTypeInvalid, nil - } - elemType = v.Type().Elem() - if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) { - return multiArgTypePropertyLoadSaver, elemType - } - switch elemType.Kind() { - case reflect.Struct: - return multiArgTypeStruct, elemType - case reflect.Interface: - return multiArgTypeInterface, elemType - case reflect.Ptr: - elemType = elemType.Elem() - if elemType.Kind() == reflect.Struct { - return multiArgTypeStructPtr, elemType - } - } - return multiArgTypeInvalid, nil -} - -// Get loads the entity stored for k into dst, which must be a struct pointer -// or implement PropertyLoadSaver. If there is no such entity for the key, Get -// returns ErrNoSuchEntity. -// -// The values of dst's unmatched struct fields are not modified, and matching -// slice-typed fields are not reset before appending to them. In particular, it -// is recommended to pass a pointer to a zero valued struct on each Get call. -// -// ErrFieldMismatch is returned when a field is to be loaded into a different -// type than the one it was stored from, or when a field is missing or -// unexported in the destination struct. ErrFieldMismatch is only returned if -// dst is a struct pointer. -func Get(c context.Context, key *Key, dst interface{}) error { - if dst == nil { // GetMulti catches nil interface; we need to catch nil ptr here - return ErrInvalidEntityType - } - err := GetMulti(c, []*Key{key}, []interface{}{dst}) - if me, ok := err.(appengine.MultiError); ok { - return me[0] - } - return err -} - -// GetMulti is a batch version of Get. -// -// dst must be a []S, []*S, []I or []P, for some struct type S, some interface -// type I, or some non-interface non-pointer type P such that P or *P -// implements PropertyLoadSaver. If an []I, each element must be a valid dst -// for Get: it must be a struct pointer or implement PropertyLoadSaver. -// -// As a special case, PropertyList is an invalid type for dst, even though a -// PropertyList is a slice of structs. It is treated as invalid to avoid being -// mistakenly passed when []PropertyList was intended. -func GetMulti(c context.Context, key []*Key, dst interface{}) error { - v := reflect.ValueOf(dst) - multiArgType, _ := checkMultiArg(v) - if multiArgType == multiArgTypeInvalid { - return errors.New("datastore: dst has invalid type") - } - if len(key) != v.Len() { - return errors.New("datastore: key and dst slices have different length") - } - if len(key) == 0 { - return nil - } - if err := multiValid(key); err != nil { - return err - } - req := &pb.GetRequest{ - Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key), - } - res := &pb.GetResponse{} - if err := internal.Call(c, "datastore_v3", "Get", req, res); err != nil { - return err - } - if len(key) != len(res.Entity) { - return errors.New("datastore: internal error: server returned the wrong number of entities") - } - multiErr, any := make(appengine.MultiError, len(key)), false - for i, e := range res.Entity { - if e.Entity == nil { - multiErr[i] = ErrNoSuchEntity - } else { - elem := v.Index(i) - if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { - elem = elem.Addr() - } - if multiArgType == multiArgTypeStructPtr && elem.IsNil() { - elem.Set(reflect.New(elem.Type().Elem())) - } - multiErr[i] = loadEntity(elem.Interface(), e.Entity) - } - if multiErr[i] != nil { - any = true - } - } - if any { - return multiErr - } - return nil -} - -// Put saves the entity src into the datastore with key k. src must be a struct -// pointer or implement PropertyLoadSaver; if a struct pointer then any -// unexported fields of that struct will be skipped. If k is an incomplete key, -// the returned key will be a unique key generated by the datastore. -func Put(c context.Context, key *Key, src interface{}) (*Key, error) { - k, err := PutMulti(c, []*Key{key}, []interface{}{src}) - if err != nil { - if me, ok := err.(appengine.MultiError); ok { - return nil, me[0] - } - return nil, err - } - return k[0], nil -} - -// PutMulti is a batch version of Put. -// -// src must satisfy the same conditions as the dst argument to GetMulti. -func PutMulti(c context.Context, key []*Key, src interface{}) ([]*Key, error) { - v := reflect.ValueOf(src) - multiArgType, _ := checkMultiArg(v) - if multiArgType == multiArgTypeInvalid { - return nil, errors.New("datastore: src has invalid type") - } - if len(key) != v.Len() { - return nil, errors.New("datastore: key and src slices have different length") - } - if len(key) == 0 { - return nil, nil - } - appID := internal.FullyQualifiedAppID(c) - if err := multiValid(key); err != nil { - return nil, err - } - req := &pb.PutRequest{} - for i := range key { - elem := v.Index(i) - if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { - elem = elem.Addr() - } - sProto, err := saveEntity(appID, key[i], elem.Interface()) - if err != nil { - return nil, err - } - req.Entity = append(req.Entity, sProto) - } - res := &pb.PutResponse{} - if err := internal.Call(c, "datastore_v3", "Put", req, res); err != nil { - return nil, err - } - if len(key) != len(res.Key) { - return nil, errors.New("datastore: internal error: server returned the wrong number of keys") - } - ret := make([]*Key, len(key)) - for i := range ret { - var err error - ret[i], err = protoToKey(res.Key[i]) - if err != nil || ret[i].Incomplete() { - return nil, errors.New("datastore: internal error: server returned an invalid key") - } - } - return ret, nil -} - -// Delete deletes the entity for the given key. -func Delete(c context.Context, key *Key) error { - err := DeleteMulti(c, []*Key{key}) - if me, ok := err.(appengine.MultiError); ok { - return me[0] - } - return err -} - -// DeleteMulti is a batch version of Delete. -func DeleteMulti(c context.Context, key []*Key) error { - if len(key) == 0 { - return nil - } - if err := multiValid(key); err != nil { - return err - } - req := &pb.DeleteRequest{ - Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key), - } - res := &pb.DeleteResponse{} - return internal.Call(c, "datastore_v3", "Delete", req, res) -} - -func namespaceMod(m proto.Message, namespace string) { - // pb.Query is the only type that has a name_space field. - // All other namespace support in datastore is in the keys. - switch m := m.(type) { - case *pb.Query: - if m.NameSpace == nil { - m.NameSpace = &namespace - } - } -} - -func init() { - internal.NamespaceMods["datastore_v3"] = namespaceMod - internal.RegisterErrorCodeMap("datastore_v3", pb.Error_ErrorCode_name) - internal.RegisterTimeoutErrorCode("datastore_v3", int32(pb.Error_TIMEOUT)) -} diff --git a/vendor/google.golang.org/appengine/datastore/doc.go b/vendor/google.golang.org/appengine/datastore/doc.go deleted file mode 100644 index 92ffe6d..0000000 --- a/vendor/google.golang.org/appengine/datastore/doc.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -/* -Package datastore provides a client for App Engine's datastore service. - - -Basic Operations - -Entities are the unit of storage and are associated with a key. A key -consists of an optional parent key, a string application ID, a string kind -(also known as an entity type), and either a StringID or an IntID. A -StringID is also known as an entity name or key name. - -It is valid to create a key with a zero StringID and a zero IntID; this is -called an incomplete key, and does not refer to any saved entity. Putting an -entity into the datastore under an incomplete key will cause a unique key -to be generated for that entity, with a non-zero IntID. - -An entity's contents are a mapping from case-sensitive field names to values. -Valid value types are: - - signed integers (int, int8, int16, int32 and int64), - - bool, - - string, - - float32 and float64, - - []byte (up to 1 megabyte in length), - - any type whose underlying type is one of the above predeclared types, - - ByteString, - - *Key, - - time.Time (stored with microsecond precision), - - appengine.BlobKey, - - appengine.GeoPoint, - - structs whose fields are all valid value types, - - slices of any of the above. - -Slices of structs are valid, as are structs that contain slices. However, if -one struct contains another, then at most one of those can be repeated. This -disqualifies recursively defined struct types: any struct T that (directly or -indirectly) contains a []T. - -The Get and Put functions load and save an entity's contents. An entity's -contents are typically represented by a struct pointer. - -Example code: - - type Entity struct { - Value string - } - - func handle(w http.ResponseWriter, r *http.Request) { - ctx := appengine.NewContext(r) - - k := datastore.NewKey(ctx, "Entity", "stringID", 0, nil) - e := new(Entity) - if err := datastore.Get(ctx, k, e); err != nil { - http.Error(w, err.Error(), 500) - return - } - - old := e.Value - e.Value = r.URL.Path - - if _, err := datastore.Put(ctx, k, e); err != nil { - http.Error(w, err.Error(), 500) - return - } - - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - fmt.Fprintf(w, "old=%q\nnew=%q\n", old, e.Value) - } - -GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and -Delete functions. They take a []*Key instead of a *Key, and may return an -appengine.MultiError when encountering partial failure. - - -Properties - -An entity's contents can be represented by a variety of types. These are -typically struct pointers, but can also be any type that implements the -PropertyLoadSaver interface. If using a struct pointer, you do not have to -explicitly implement the PropertyLoadSaver interface; the datastore will -automatically convert via reflection. If a struct pointer does implement that -interface then those methods will be used in preference to the default -behavior for struct pointers. Struct pointers are more strongly typed and are -easier to use; PropertyLoadSavers are more flexible. - -The actual types passed do not have to match between Get and Put calls or even -across different App Engine requests. It is valid to put a *PropertyList and -get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1. -Conceptually, any entity is saved as a sequence of properties, and is loaded -into the destination value on a property-by-property basis. When loading into -a struct pointer, an entity that cannot be completely represented (such as a -missing field) will result in an ErrFieldMismatch error but it is up to the -caller whether this error is fatal, recoverable or ignorable. - -By default, for struct pointers, all properties are potentially indexed, and -the property name is the same as the field name (and hence must start with an -upper case letter). Fields may have a `datastore:"name,options"` tag. The tag -name is the property name, which must be one or more valid Go identifiers -joined by ".", but may start with a lower case letter. An empty tag name means -to just use the field name. A "-" tag name means that the datastore will -ignore that field. If options is "noindex" then the field will not be indexed. -If the options is "" then the comma may be omitted. There are no other -recognized options. - -Fields (except for []byte) are indexed by default. Strings longer than 1500 -bytes cannot be indexed; fields used to store long strings should be -tagged with "noindex". Similarly, ByteStrings longer than 1500 bytes cannot be -indexed. - -Example code: - - // A and B are renamed to a and b. - // A, C and J are not indexed. - // D's tag is equivalent to having no tag at all (E). - // I is ignored entirely by the datastore. - // J has tag information for both the datastore and json packages. - type TaggedStruct struct { - A int `datastore:"a,noindex"` - B int `datastore:"b"` - C int `datastore:",noindex"` - D int `datastore:""` - E int - I int `datastore:"-"` - J int `datastore:",noindex" json:"j"` - } - - -Structured Properties - -If the struct pointed to contains other structs, then the nested or embedded -structs are flattened. For example, given these definitions: - - type Inner1 struct { - W int32 - X string - } - - type Inner2 struct { - Y float64 - } - - type Inner3 struct { - Z bool - } - - type Outer struct { - A int16 - I []Inner1 - J Inner2 - Inner3 - } - -then an Outer's properties would be equivalent to those of: - - type OuterEquivalent struct { - A int16 - IDotW []int32 `datastore:"I.W"` - IDotX []string `datastore:"I.X"` - JDotY float64 `datastore:"J.Y"` - Z bool - } - -If Outer's embedded Inner3 field was tagged as `datastore:"Foo"` then the -equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`. - -If an outer struct is tagged "noindex" then all of its implicit flattened -fields are effectively "noindex". - - -The PropertyLoadSaver Interface - -An entity's contents can also be represented by any type that implements the -PropertyLoadSaver interface. This type may be a struct pointer, but it does -not have to be. The datastore package will call Load when getting the entity's -contents, and Save when putting the entity's contents. -Possible uses include deriving non-stored fields, verifying fields, or indexing -a field only if its value is positive. - -Example code: - - type CustomPropsExample struct { - I, J int - // Sum is not stored, but should always be equal to I + J. - Sum int `datastore:"-"` - } - - func (x *CustomPropsExample) Load(ps []datastore.Property) error { - // Load I and J as usual. - if err := datastore.LoadStruct(x, ps); err != nil { - return err - } - // Derive the Sum field. - x.Sum = x.I + x.J - return nil - } - - func (x *CustomPropsExample) Save() ([]datastore.Property, error) { - // Validate the Sum field. - if x.Sum != x.I + x.J { - return errors.New("CustomPropsExample has inconsistent sum") - } - // Save I and J as usual. The code below is equivalent to calling - // "return datastore.SaveStruct(x)", but is done manually for - // demonstration purposes. - return []datastore.Property{ - { - Name: "I", - Value: int64(x.I), - }, - { - Name: "J", - Value: int64(x.J), - }, - } - } - -The *PropertyList type implements PropertyLoadSaver, and can therefore hold an -arbitrary entity's contents. - - -Queries - -Queries retrieve entities based on their properties or key's ancestry. Running -a query yields an iterator of results: either keys or (key, entity) pairs. -Queries are re-usable and it is safe to call Query.Run from concurrent -goroutines. Iterators are not safe for concurrent use. - -Queries are immutable, and are either created by calling NewQuery, or derived -from an existing query by calling a method like Filter or Order that returns a -new query value. A query is typically constructed by calling NewQuery followed -by a chain of zero or more such methods. These methods are: - - Ancestor and Filter constrain the entities returned by running a query. - - Order affects the order in which they are returned. - - Project constrains the fields returned. - - Distinct de-duplicates projected entities. - - KeysOnly makes the iterator return only keys, not (key, entity) pairs. - - Start, End, Offset and Limit define which sub-sequence of matching entities - to return. Start and End take cursors, Offset and Limit take integers. Start - and Offset affect the first result, End and Limit affect the last result. - If both Start and Offset are set, then the offset is relative to Start. - If both End and Limit are set, then the earliest constraint wins. Limit is - relative to Start+Offset, not relative to End. As a special case, a - negative limit means unlimited. - -Example code: - - type Widget struct { - Description string - Price int - } - - func handle(w http.ResponseWriter, r *http.Request) { - ctx := appengine.NewContext(r) - q := datastore.NewQuery("Widget"). - Filter("Price <", 1000). - Order("-Price") - b := new(bytes.Buffer) - for t := q.Run(ctx); ; { - var x Widget - key, err := t.Next(&x) - if err == datastore.Done { - break - } - if err != nil { - serveError(ctx, w, err) - return - } - fmt.Fprintf(b, "Key=%v\nWidget=%#v\n\n", key, x) - } - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - io.Copy(w, b) - } - - -Transactions - -RunInTransaction runs a function in a transaction. - -Example code: - - type Counter struct { - Count int - } - - func inc(ctx context.Context, key *datastore.Key) (int, error) { - var x Counter - if err := datastore.Get(ctx, key, &x); err != nil && err != datastore.ErrNoSuchEntity { - return 0, err - } - x.Count++ - if _, err := datastore.Put(ctx, key, &x); err != nil { - return 0, err - } - return x.Count, nil - } - - func handle(w http.ResponseWriter, r *http.Request) { - ctx := appengine.NewContext(r) - var count int - err := datastore.RunInTransaction(ctx, func(ctx context.Context) error { - var err1 error - count, err1 = inc(ctx, datastore.NewKey(ctx, "Counter", "singleton", 0, nil)) - return err1 - }, nil) - if err != nil { - serveError(ctx, w, err) - return - } - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - fmt.Fprintf(w, "Count=%d", count) - } - - -Metadata - -The datastore package provides access to some of App Engine's datastore -metadata. This metadata includes information about the entity groups, -namespaces, entity kinds, and properties in the datastore, as well as the -property representations for each property. - -Example code: - - func handle(w http.ResponseWriter, r *http.Request) { - // Print all the kinds in the datastore, with all the indexed - // properties (and their representations) for each. - ctx := appengine.NewContext(r) - - kinds, err := datastore.Kinds(ctx) - if err != nil { - serveError(ctx, w, err) - return - } - - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - for _, kind := range kinds { - fmt.Fprintf(w, "%s:\n", kind) - props, err := datastore.KindProperties(ctx, kind) - if err != nil { - fmt.Fprintln(w, "\t(unable to retrieve properties)") - continue - } - for p, rep := range props { - fmt.Fprintf(w, "\t-%s (%s)\n", p, strings.Join(", ", rep)) - } - } - } -*/ -package datastore // import "google.golang.org/appengine/datastore" diff --git a/vendor/google.golang.org/appengine/datastore/key.go b/vendor/google.golang.org/appengine/datastore/key.go deleted file mode 100644 index ac1f002..0000000 --- a/vendor/google.golang.org/appengine/datastore/key.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package datastore - -import ( - "bytes" - "encoding/base64" - "encoding/gob" - "errors" - "fmt" - "strconv" - "strings" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/datastore" -) - -// Key represents the datastore key for a stored entity, and is immutable. -type Key struct { - kind string - stringID string - intID int64 - parent *Key - appID string - namespace string -} - -// Kind returns the key's kind (also known as entity type). -func (k *Key) Kind() string { - return k.kind -} - -// StringID returns the key's string ID (also known as an entity name or key -// name), which may be "". -func (k *Key) StringID() string { - return k.stringID -} - -// IntID returns the key's integer ID, which may be 0. -func (k *Key) IntID() int64 { - return k.intID -} - -// Parent returns the key's parent key, which may be nil. -func (k *Key) Parent() *Key { - return k.parent -} - -// AppID returns the key's application ID. -func (k *Key) AppID() string { - return k.appID -} - -// Namespace returns the key's namespace. -func (k *Key) Namespace() string { - return k.namespace -} - -// Incomplete returns whether the key does not refer to a stored entity. -// In particular, whether the key has a zero StringID and a zero IntID. -func (k *Key) Incomplete() bool { - return k.stringID == "" && k.intID == 0 -} - -// valid returns whether the key is valid. -func (k *Key) valid() bool { - if k == nil { - return false - } - for ; k != nil; k = k.parent { - if k.kind == "" || k.appID == "" { - return false - } - if k.stringID != "" && k.intID != 0 { - return false - } - if k.parent != nil { - if k.parent.Incomplete() { - return false - } - if k.parent.appID != k.appID || k.parent.namespace != k.namespace { - return false - } - } - } - return true -} - -// Equal returns whether two keys are equal. -func (k *Key) Equal(o *Key) bool { - for k != nil && o != nil { - if k.kind != o.kind || k.stringID != o.stringID || k.intID != o.intID || k.appID != o.appID || k.namespace != o.namespace { - return false - } - k, o = k.parent, o.parent - } - return k == o -} - -// root returns the furthest ancestor of a key, which may be itself. -func (k *Key) root() *Key { - for k.parent != nil { - k = k.parent - } - return k -} - -// marshal marshals the key's string representation to the buffer. -func (k *Key) marshal(b *bytes.Buffer) { - if k.parent != nil { - k.parent.marshal(b) - } - b.WriteByte('/') - b.WriteString(k.kind) - b.WriteByte(',') - if k.stringID != "" { - b.WriteString(k.stringID) - } else { - b.WriteString(strconv.FormatInt(k.intID, 10)) - } -} - -// String returns a string representation of the key. -func (k *Key) String() string { - if k == nil { - return "" - } - b := bytes.NewBuffer(make([]byte, 0, 512)) - k.marshal(b) - return b.String() -} - -type gobKey struct { - Kind string - StringID string - IntID int64 - Parent *gobKey - AppID string - Namespace string -} - -func keyToGobKey(k *Key) *gobKey { - if k == nil { - return nil - } - return &gobKey{ - Kind: k.kind, - StringID: k.stringID, - IntID: k.intID, - Parent: keyToGobKey(k.parent), - AppID: k.appID, - Namespace: k.namespace, - } -} - -func gobKeyToKey(gk *gobKey) *Key { - if gk == nil { - return nil - } - return &Key{ - kind: gk.Kind, - stringID: gk.StringID, - intID: gk.IntID, - parent: gobKeyToKey(gk.Parent), - appID: gk.AppID, - namespace: gk.Namespace, - } -} - -func (k *Key) GobEncode() ([]byte, error) { - buf := new(bytes.Buffer) - if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func (k *Key) GobDecode(buf []byte) error { - gk := new(gobKey) - if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil { - return err - } - *k = *gobKeyToKey(gk) - return nil -} - -func (k *Key) MarshalJSON() ([]byte, error) { - return []byte(`"` + k.Encode() + `"`), nil -} - -func (k *Key) UnmarshalJSON(buf []byte) error { - if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' { - return errors.New("datastore: bad JSON key") - } - k2, err := DecodeKey(string(buf[1 : len(buf)-1])) - if err != nil { - return err - } - *k = *k2 - return nil -} - -// Encode returns an opaque representation of the key -// suitable for use in HTML and URLs. -// This is compatible with the Python and Java runtimes. -func (k *Key) Encode() string { - ref := keyToProto("", k) - - b, err := proto.Marshal(ref) - if err != nil { - panic(err) - } - - // Trailing padding is stripped. - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// DecodeKey decodes a key from the opaque representation returned by Encode. -func DecodeKey(encoded string) (*Key, error) { - // Re-add padding. - if m := len(encoded) % 4; m != 0 { - encoded += strings.Repeat("=", 4-m) - } - - b, err := base64.URLEncoding.DecodeString(encoded) - if err != nil { - return nil, err - } - - ref := new(pb.Reference) - if err := proto.Unmarshal(b, ref); err != nil { - return nil, err - } - - return protoToKey(ref) -} - -// NewIncompleteKey creates a new incomplete key. -// kind cannot be empty. -func NewIncompleteKey(c context.Context, kind string, parent *Key) *Key { - return NewKey(c, kind, "", 0, parent) -} - -// NewKey creates a new key. -// kind cannot be empty. -// Either one or both of stringID and intID must be zero. If both are zero, -// the key returned is incomplete. -// parent must either be a complete key or nil. -func NewKey(c context.Context, kind, stringID string, intID int64, parent *Key) *Key { - // If there's a parent key, use its namespace. - // Otherwise, use any namespace attached to the context. - var namespace string - if parent != nil { - namespace = parent.namespace - } else { - namespace = internal.NamespaceFromContext(c) - } - - return &Key{ - kind: kind, - stringID: stringID, - intID: intID, - parent: parent, - appID: internal.FullyQualifiedAppID(c), - namespace: namespace, - } -} - -// AllocateIDs returns a range of n integer IDs with the given kind and parent -// combination. kind cannot be empty; parent may be nil. The IDs in the range -// returned will not be used by the datastore's automatic ID sequence generator -// and may be used with NewKey without conflict. -// -// The range is inclusive at the low end and exclusive at the high end. In -// other words, valid intIDs x satisfy low <= x && x < high. -// -// If no error is returned, low + n == high. -func AllocateIDs(c context.Context, kind string, parent *Key, n int) (low, high int64, err error) { - if kind == "" { - return 0, 0, errors.New("datastore: AllocateIDs given an empty kind") - } - if n < 0 { - return 0, 0, fmt.Errorf("datastore: AllocateIDs given a negative count: %d", n) - } - if n == 0 { - return 0, 0, nil - } - req := &pb.AllocateIdsRequest{ - ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)), - Size: proto.Int64(int64(n)), - } - res := &pb.AllocateIdsResponse{} - if err := internal.Call(c, "datastore_v3", "AllocateIds", req, res); err != nil { - return 0, 0, err - } - // The protobuf is inclusive at both ends. Idiomatic Go (e.g. slices, for loops) - // is inclusive at the low end and exclusive at the high end, so we add 1. - low = res.GetStart() - high = res.GetEnd() + 1 - if low+int64(n) != high { - return 0, 0, fmt.Errorf("datastore: internal error: could not allocate %d IDs", n) - } - return low, high, nil -} diff --git a/vendor/google.golang.org/appengine/datastore/load.go b/vendor/google.golang.org/appengine/datastore/load.go deleted file mode 100644 index 3f3c80c..0000000 --- a/vendor/google.golang.org/appengine/datastore/load.go +++ /dev/null @@ -1,334 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package datastore - -import ( - "fmt" - "reflect" - "time" - - "google.golang.org/appengine" - pb "google.golang.org/appengine/internal/datastore" -) - -var ( - typeOfBlobKey = reflect.TypeOf(appengine.BlobKey("")) - typeOfByteSlice = reflect.TypeOf([]byte(nil)) - typeOfByteString = reflect.TypeOf(ByteString(nil)) - typeOfGeoPoint = reflect.TypeOf(appengine.GeoPoint{}) - typeOfTime = reflect.TypeOf(time.Time{}) -) - -// typeMismatchReason returns a string explaining why the property p could not -// be stored in an entity field of type v.Type(). -func typeMismatchReason(p Property, v reflect.Value) string { - entityType := "empty" - switch p.Value.(type) { - case int64: - entityType = "int" - case bool: - entityType = "bool" - case string: - entityType = "string" - case float64: - entityType = "float" - case *Key: - entityType = "*datastore.Key" - case time.Time: - entityType = "time.Time" - case appengine.BlobKey: - entityType = "appengine.BlobKey" - case appengine.GeoPoint: - entityType = "appengine.GeoPoint" - case ByteString: - entityType = "datastore.ByteString" - case []byte: - entityType = "[]byte" - } - return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type()) -} - -type propertyLoader struct { - // m holds the number of times a substruct field like "Foo.Bar.Baz" has - // been seen so far. The map is constructed lazily. - m map[string]int -} - -func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, requireSlice bool) string { - var v reflect.Value - // Traverse a struct's struct-typed fields. - for name := p.Name; ; { - decoder, ok := codec.byName[name] - if !ok { - return "no such struct field" - } - v = structValue.Field(decoder.index) - if !v.IsValid() { - return "no such struct field" - } - if !v.CanSet() { - return "cannot set struct field" - } - - if decoder.substructCodec == nil { - break - } - - if v.Kind() == reflect.Slice { - if l.m == nil { - l.m = make(map[string]int) - } - index := l.m[p.Name] - l.m[p.Name] = index + 1 - for v.Len() <= index { - v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem())) - } - structValue = v.Index(index) - requireSlice = false - } else { - structValue = v - } - // Strip the "I." from "I.X". - name = name[len(codec.byIndex[decoder.index].name):] - codec = decoder.substructCodec - } - - var slice reflect.Value - if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { - slice = v - v = reflect.New(v.Type().Elem()).Elem() - } else if requireSlice { - return "multiple-valued property requires a slice field type" - } - - // Convert indexValues to a Go value with a meaning derived from the - // destination type. - pValue := p.Value - if iv, ok := pValue.(indexValue); ok { - meaning := pb.Property_NO_MEANING - switch v.Type() { - case typeOfBlobKey: - meaning = pb.Property_BLOBKEY - case typeOfByteSlice: - meaning = pb.Property_BLOB - case typeOfByteString: - meaning = pb.Property_BYTESTRING - case typeOfGeoPoint: - meaning = pb.Property_GEORSS_POINT - case typeOfTime: - meaning = pb.Property_GD_WHEN - } - var err error - pValue, err = propValue(iv.value, meaning) - if err != nil { - return err.Error() - } - } - - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x, ok := pValue.(int64) - if !ok && pValue != nil { - return typeMismatchReason(p, v) - } - if v.OverflowInt(x) { - return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) - } - v.SetInt(x) - case reflect.Bool: - x, ok := pValue.(bool) - if !ok && pValue != nil { - return typeMismatchReason(p, v) - } - v.SetBool(x) - case reflect.String: - switch x := pValue.(type) { - case appengine.BlobKey: - v.SetString(string(x)) - case ByteString: - v.SetString(string(x)) - case string: - v.SetString(x) - default: - if pValue != nil { - return typeMismatchReason(p, v) - } - } - case reflect.Float32, reflect.Float64: - x, ok := pValue.(float64) - if !ok && pValue != nil { - return typeMismatchReason(p, v) - } - if v.OverflowFloat(x) { - return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) - } - v.SetFloat(x) - case reflect.Ptr: - x, ok := pValue.(*Key) - if !ok && pValue != nil { - return typeMismatchReason(p, v) - } - if _, ok := v.Interface().(*Key); !ok { - return typeMismatchReason(p, v) - } - v.Set(reflect.ValueOf(x)) - case reflect.Struct: - switch v.Type() { - case typeOfTime: - x, ok := pValue.(time.Time) - if !ok && pValue != nil { - return typeMismatchReason(p, v) - } - v.Set(reflect.ValueOf(x)) - case typeOfGeoPoint: - x, ok := pValue.(appengine.GeoPoint) - if !ok && pValue != nil { - return typeMismatchReason(p, v) - } - v.Set(reflect.ValueOf(x)) - default: - return typeMismatchReason(p, v) - } - case reflect.Slice: - x, ok := pValue.([]byte) - if !ok { - if y, yok := pValue.(ByteString); yok { - x, ok = []byte(y), true - } - } - if !ok && pValue != nil { - return typeMismatchReason(p, v) - } - if v.Type().Elem().Kind() != reflect.Uint8 { - return typeMismatchReason(p, v) - } - v.SetBytes(x) - default: - return typeMismatchReason(p, v) - } - if slice.IsValid() { - slice.Set(reflect.Append(slice, v)) - } - return "" -} - -// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer. -func loadEntity(dst interface{}, src *pb.EntityProto) (err error) { - props, err := protoToProperties(src) - if err != nil { - return err - } - if e, ok := dst.(PropertyLoadSaver); ok { - return e.Load(props) - } - return LoadStruct(dst, props) -} - -func (s structPLS) Load(props []Property) error { - var fieldName, reason string - var l propertyLoader - for _, p := range props { - if errStr := l.load(s.codec, s.v, p, p.Multiple); errStr != "" { - // We don't return early, as we try to load as many properties as possible. - // It is valid to load an entity into a struct that cannot fully represent it. - // That case returns an error, but the caller is free to ignore it. - fieldName, reason = p.Name, errStr - } - } - if reason != "" { - return &ErrFieldMismatch{ - StructType: s.v.Type(), - FieldName: fieldName, - Reason: reason, - } - } - return nil -} - -func protoToProperties(src *pb.EntityProto) ([]Property, error) { - props, rawProps := src.Property, src.RawProperty - out := make([]Property, 0, len(props)+len(rawProps)) - for { - var ( - x *pb.Property - noIndex bool - ) - if len(props) > 0 { - x, props = props[0], props[1:] - } else if len(rawProps) > 0 { - x, rawProps = rawProps[0], rawProps[1:] - noIndex = true - } else { - break - } - - var value interface{} - if x.Meaning != nil && *x.Meaning == pb.Property_INDEX_VALUE { - value = indexValue{x.Value} - } else { - var err error - value, err = propValue(x.Value, x.GetMeaning()) - if err != nil { - return nil, err - } - } - out = append(out, Property{ - Name: x.GetName(), - Value: value, - NoIndex: noIndex, - Multiple: x.GetMultiple(), - }) - } - return out, nil -} - -// propValue returns a Go value that combines the raw PropertyValue with a -// meaning. For example, an Int64Value with GD_WHEN becomes a time.Time. -func propValue(v *pb.PropertyValue, m pb.Property_Meaning) (interface{}, error) { - switch { - case v.Int64Value != nil: - if m == pb.Property_GD_WHEN { - return fromUnixMicro(*v.Int64Value), nil - } else { - return *v.Int64Value, nil - } - case v.BooleanValue != nil: - return *v.BooleanValue, nil - case v.StringValue != nil: - if m == pb.Property_BLOB { - return []byte(*v.StringValue), nil - } else if m == pb.Property_BLOBKEY { - return appengine.BlobKey(*v.StringValue), nil - } else if m == pb.Property_BYTESTRING { - return ByteString(*v.StringValue), nil - } else { - return *v.StringValue, nil - } - case v.DoubleValue != nil: - return *v.DoubleValue, nil - case v.Referencevalue != nil: - key, err := referenceValueToKey(v.Referencevalue) - if err != nil { - return nil, err - } - return key, nil - case v.Pointvalue != nil: - // NOTE: Strangely, latitude maps to X, longitude to Y. - return appengine.GeoPoint{Lat: v.Pointvalue.GetX(), Lng: v.Pointvalue.GetY()}, nil - } - return nil, nil -} - -// indexValue is a Property value that is created when entities are loaded from -// an index, such as from a projection query. -// -// Such Property values do not contain all of the metadata required to be -// faithfully represented as a Go value, and are instead represented as an -// opaque indexValue. Load the properties into a concrete struct type (e.g. by -// passing a struct pointer to Iterator.Next) to reconstruct actual Go values -// of type int, string, time.Time, etc. -type indexValue struct { - value *pb.PropertyValue -} diff --git a/vendor/google.golang.org/appengine/datastore/metadata.go b/vendor/google.golang.org/appengine/datastore/metadata.go deleted file mode 100644 index 67995f9..0000000 --- a/vendor/google.golang.org/appengine/datastore/metadata.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package datastore - -import "golang.org/x/net/context" - -// Datastore kinds for the metadata entities. -const ( - namespaceKind = "__namespace__" - kindKind = "__kind__" - propertyKind = "__property__" -) - -// Namespaces returns all the datastore namespaces. -func Namespaces(ctx context.Context) ([]string, error) { - // TODO(djd): Support range queries. - q := NewQuery(namespaceKind).KeysOnly() - keys, err := q.GetAll(ctx, nil) - if err != nil { - return nil, err - } - // The empty namespace key uses a numeric ID (==1), but luckily - // the string ID defaults to "" for numeric IDs anyway. - return keyNames(keys), nil -} - -// Kinds returns the names of all the kinds in the current namespace. -func Kinds(ctx context.Context) ([]string, error) { - // TODO(djd): Support range queries. - q := NewQuery(kindKind).KeysOnly() - keys, err := q.GetAll(ctx, nil) - if err != nil { - return nil, err - } - return keyNames(keys), nil -} - -// keyNames returns a slice of the provided keys' names (string IDs). -func keyNames(keys []*Key) []string { - n := make([]string, 0, len(keys)) - for _, k := range keys { - n = append(n, k.StringID()) - } - return n -} - -// KindProperties returns all the indexed properties for the given kind. -// The properties are returned as a map of property names to a slice of the -// representation types. The representation types for the supported Go property -// types are: -// "INT64": signed integers and time.Time -// "DOUBLE": float32 and float64 -// "BOOLEAN": bool -// "STRING": string, []byte and ByteString -// "POINT": appengine.GeoPoint -// "REFERENCE": *Key -// "USER": (not used in the Go runtime) -func KindProperties(ctx context.Context, kind string) (map[string][]string, error) { - // TODO(djd): Support range queries. - kindKey := NewKey(ctx, kindKind, kind, 0, nil) - q := NewQuery(propertyKind).Ancestor(kindKey) - - propMap := map[string][]string{} - props := []struct { - Repr []string `datastore:property_representation` - }{} - - keys, err := q.GetAll(ctx, &props) - if err != nil { - return nil, err - } - for i, p := range props { - propMap[keys[i].StringID()] = p.Repr - } - return propMap, nil -} diff --git a/vendor/google.golang.org/appengine/datastore/prop.go b/vendor/google.golang.org/appengine/datastore/prop.go deleted file mode 100644 index 1f50ac0..0000000 --- a/vendor/google.golang.org/appengine/datastore/prop.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package datastore - -import ( - "fmt" - "reflect" - "strings" - "sync" - "unicode" -) - -// Entities with more than this many indexed properties will not be saved. -const maxIndexedProperties = 20000 - -// []byte fields more than 1 megabyte long will not be loaded or saved. -const maxBlobLen = 1 << 20 - -// Property is a name/value pair plus some metadata. A datastore entity's -// contents are loaded and saved as a sequence of Properties. An entity can -// have multiple Properties with the same name, provided that p.Multiple is -// true on all of that entity's Properties with that name. -type Property struct { - // Name is the property name. - Name string - // Value is the property value. The valid types are: - // - int64 - // - bool - // - string - // - float64 - // - ByteString - // - *Key - // - time.Time - // - appengine.BlobKey - // - appengine.GeoPoint - // - []byte (up to 1 megabyte in length) - // This set is smaller than the set of valid struct field types that the - // datastore can load and save. A Property Value cannot be a slice (apart - // from []byte); use multiple Properties instead. Also, a Value's type - // must be explicitly on the list above; it is not sufficient for the - // underlying type to be on that list. For example, a Value of "type - // myInt64 int64" is invalid. Smaller-width integers and floats are also - // invalid. Again, this is more restrictive than the set of valid struct - // field types. - // - // A Value will have an opaque type when loading entities from an index, - // such as via a projection query. Load entities into a struct instead - // of a PropertyLoadSaver when using a projection query. - // - // A Value may also be the nil interface value; this is equivalent to - // Python's None but not directly representable by a Go struct. Loading - // a nil-valued property into a struct will set that field to the zero - // value. - Value interface{} - // NoIndex is whether the datastore cannot index this property. - NoIndex bool - // Multiple is whether the entity can have multiple properties with - // the same name. Even if a particular instance only has one property with - // a certain name, Multiple should be true if a struct would best represent - // it as a field of type []T instead of type T. - Multiple bool -} - -// ByteString is a short byte slice (up to 1500 bytes) that can be indexed. -type ByteString []byte - -// PropertyLoadSaver can be converted from and to a slice of Properties. -type PropertyLoadSaver interface { - Load([]Property) error - Save() ([]Property, error) -} - -// PropertyList converts a []Property to implement PropertyLoadSaver. -type PropertyList []Property - -var ( - typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem() - typeOfPropertyList = reflect.TypeOf(PropertyList(nil)) -) - -// Load loads all of the provided properties into l. -// It does not first reset *l to an empty slice. -func (l *PropertyList) Load(p []Property) error { - *l = append(*l, p...) - return nil -} - -// Save saves all of l's properties as a slice or Properties. -func (l *PropertyList) Save() ([]Property, error) { - return *l, nil -} - -// validPropertyName returns whether name consists of one or more valid Go -// identifiers joined by ".". -func validPropertyName(name string) bool { - if name == "" { - return false - } - for _, s := range strings.Split(name, ".") { - if s == "" { - return false - } - first := true - for _, c := range s { - if first { - first = false - if c != '_' && !unicode.IsLetter(c) { - return false - } - } else { - if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - } - return true -} - -// structTag is the parsed `datastore:"name,options"` tag of a struct field. -// If a field has no tag, or the tag has an empty name, then the structTag's -// name is just the field name. A "-" name means that the datastore ignores -// that field. -type structTag struct { - name string - noIndex bool -} - -// structCodec describes how to convert a struct to and from a sequence of -// properties. -type structCodec struct { - // byIndex gives the structTag for the i'th field. - byIndex []structTag - // byName gives the field codec for the structTag with the given name. - byName map[string]fieldCodec - // hasSlice is whether a struct or any of its nested or embedded structs - // has a slice-typed field (other than []byte). - hasSlice bool - // complete is whether the structCodec is complete. An incomplete - // structCodec may be encountered when walking a recursive struct. - complete bool -} - -// fieldCodec is a struct field's index and, if that struct field's type is -// itself a struct, that substruct's structCodec. -type fieldCodec struct { - index int - substructCodec *structCodec -} - -// structCodecs collects the structCodecs that have already been calculated. -var ( - structCodecsMutex sync.Mutex - structCodecs = make(map[reflect.Type]*structCodec) -) - -// getStructCodec returns the structCodec for the given struct type. -func getStructCodec(t reflect.Type) (*structCodec, error) { - structCodecsMutex.Lock() - defer structCodecsMutex.Unlock() - return getStructCodecLocked(t) -} - -// getStructCodecLocked implements getStructCodec. The structCodecsMutex must -// be held when calling this function. -func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) { - c, ok := structCodecs[t] - if ok { - return c, nil - } - c = &structCodec{ - byIndex: make([]structTag, t.NumField()), - byName: make(map[string]fieldCodec), - } - - // Add c to the structCodecs map before we are sure it is good. If t is - // a recursive type, it needs to find the incomplete entry for itself in - // the map. - structCodecs[t] = c - defer func() { - if retErr != nil { - delete(structCodecs, t) - } - }() - - for i := range c.byIndex { - f := t.Field(i) - tags := strings.Split(f.Tag.Get("datastore"), ",") - name := tags[0] - opts := make(map[string]bool) - for _, t := range tags[1:] { - opts[t] = true - } - if name == "" { - if !f.Anonymous { - name = f.Name - } - } else if name == "-" { - c.byIndex[i] = structTag{name: name} - continue - } else if !validPropertyName(name) { - return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name) - } - - substructType, fIsSlice := reflect.Type(nil), false - switch f.Type.Kind() { - case reflect.Struct: - substructType = f.Type - case reflect.Slice: - if f.Type.Elem().Kind() == reflect.Struct { - substructType = f.Type.Elem() - } - fIsSlice = f.Type != typeOfByteSlice - c.hasSlice = c.hasSlice || fIsSlice - } - - if substructType != nil && substructType != typeOfTime && substructType != typeOfGeoPoint { - if name != "" { - name = name + "." - } - sub, err := getStructCodecLocked(substructType) - if err != nil { - return nil, err - } - if !sub.complete { - return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name) - } - if fIsSlice && sub.hasSlice { - return nil, fmt.Errorf( - "datastore: flattening nested structs leads to a slice of slices: field %q", f.Name) - } - c.hasSlice = c.hasSlice || sub.hasSlice - for relName := range sub.byName { - absName := name + relName - if _, ok := c.byName[absName]; ok { - return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", absName) - } - c.byName[absName] = fieldCodec{index: i, substructCodec: sub} - } - } else { - if _, ok := c.byName[name]; ok { - return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name) - } - c.byName[name] = fieldCodec{index: i} - } - - c.byIndex[i] = structTag{ - name: name, - noIndex: opts["noindex"], - } - } - c.complete = true - return c, nil -} - -// structPLS adapts a struct to be a PropertyLoadSaver. -type structPLS struct { - v reflect.Value - codec *structCodec -} - -// newStructPLS returns a PropertyLoadSaver for the struct pointer p. -func newStructPLS(p interface{}) (PropertyLoadSaver, error) { - v := reflect.ValueOf(p) - if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { - return nil, ErrInvalidEntityType - } - v = v.Elem() - codec, err := getStructCodec(v.Type()) - if err != nil { - return nil, err - } - return structPLS{v, codec}, nil -} - -// LoadStruct loads the properties from p to dst. -// dst must be a struct pointer. -func LoadStruct(dst interface{}, p []Property) error { - x, err := newStructPLS(dst) - if err != nil { - return err - } - return x.Load(p) -} - -// SaveStruct returns the properties from src as a slice of Properties. -// src must be a struct pointer. -func SaveStruct(src interface{}) ([]Property, error) { - x, err := newStructPLS(src) - if err != nil { - return nil, err - } - return x.Save() -} diff --git a/vendor/google.golang.org/appengine/datastore/query.go b/vendor/google.golang.org/appengine/datastore/query.go deleted file mode 100644 index 3847b0f..0000000 --- a/vendor/google.golang.org/appengine/datastore/query.go +++ /dev/null @@ -1,724 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package datastore - -import ( - "encoding/base64" - "errors" - "fmt" - "math" - "reflect" - "strings" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/datastore" -) - -type operator int - -const ( - lessThan operator = iota - lessEq - equal - greaterEq - greaterThan -) - -var operatorToProto = map[operator]*pb.Query_Filter_Operator{ - lessThan: pb.Query_Filter_LESS_THAN.Enum(), - lessEq: pb.Query_Filter_LESS_THAN_OR_EQUAL.Enum(), - equal: pb.Query_Filter_EQUAL.Enum(), - greaterEq: pb.Query_Filter_GREATER_THAN_OR_EQUAL.Enum(), - greaterThan: pb.Query_Filter_GREATER_THAN.Enum(), -} - -// filter is a conditional filter on query results. -type filter struct { - FieldName string - Op operator - Value interface{} -} - -type sortDirection int - -const ( - ascending sortDirection = iota - descending -) - -var sortDirectionToProto = map[sortDirection]*pb.Query_Order_Direction{ - ascending: pb.Query_Order_ASCENDING.Enum(), - descending: pb.Query_Order_DESCENDING.Enum(), -} - -// order is a sort order on query results. -type order struct { - FieldName string - Direction sortDirection -} - -// NewQuery creates a new Query for a specific entity kind. -// -// An empty kind means to return all entities, including entities created and -// managed by other App Engine features, and is called a kindless query. -// Kindless queries cannot include filters or sort orders on property values. -func NewQuery(kind string) *Query { - return &Query{ - kind: kind, - limit: -1, - } -} - -// Query represents a datastore query. -type Query struct { - kind string - ancestor *Key - filter []filter - order []order - projection []string - - distinct bool - keysOnly bool - eventual bool - limit int32 - offset int32 - start *pb.CompiledCursor - end *pb.CompiledCursor - - err error -} - -func (q *Query) clone() *Query { - x := *q - // Copy the contents of the slice-typed fields to a new backing store. - if len(q.filter) > 0 { - x.filter = make([]filter, len(q.filter)) - copy(x.filter, q.filter) - } - if len(q.order) > 0 { - x.order = make([]order, len(q.order)) - copy(x.order, q.order) - } - return &x -} - -// Ancestor returns a derivative query with an ancestor filter. -// The ancestor should not be nil. -func (q *Query) Ancestor(ancestor *Key) *Query { - q = q.clone() - if ancestor == nil { - q.err = errors.New("datastore: nil query ancestor") - return q - } - q.ancestor = ancestor - return q -} - -// EventualConsistency returns a derivative query that returns eventually -// consistent results. -// It only has an effect on ancestor queries. -func (q *Query) EventualConsistency() *Query { - q = q.clone() - q.eventual = true - return q -} - -// Filter returns a derivative query with a field-based filter. -// The filterStr argument must be a field name followed by optional space, -// followed by an operator, one of ">", "<", ">=", "<=", or "=". -// Fields are compared against the provided value using the operator. -// Multiple filters are AND'ed together. -func (q *Query) Filter(filterStr string, value interface{}) *Query { - q = q.clone() - filterStr = strings.TrimSpace(filterStr) - if len(filterStr) < 1 { - q.err = errors.New("datastore: invalid filter: " + filterStr) - return q - } - f := filter{ - FieldName: strings.TrimRight(filterStr, " ><=!"), - Value: value, - } - switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op { - case "<=": - f.Op = lessEq - case ">=": - f.Op = greaterEq - case "<": - f.Op = lessThan - case ">": - f.Op = greaterThan - case "=": - f.Op = equal - default: - q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr) - return q - } - q.filter = append(q.filter, f) - return q -} - -// Order returns a derivative query with a field-based sort order. Orders are -// applied in the order they are added. The default order is ascending; to sort -// in descending order prefix the fieldName with a minus sign (-). -func (q *Query) Order(fieldName string) *Query { - q = q.clone() - fieldName = strings.TrimSpace(fieldName) - o := order{ - Direction: ascending, - FieldName: fieldName, - } - if strings.HasPrefix(fieldName, "-") { - o.Direction = descending - o.FieldName = strings.TrimSpace(fieldName[1:]) - } else if strings.HasPrefix(fieldName, "+") { - q.err = fmt.Errorf("datastore: invalid order: %q", fieldName) - return q - } - if len(o.FieldName) == 0 { - q.err = errors.New("datastore: empty order") - return q - } - q.order = append(q.order, o) - return q -} - -// Project returns a derivative query that yields only the given fields. It -// cannot be used with KeysOnly. -func (q *Query) Project(fieldNames ...string) *Query { - q = q.clone() - q.projection = append([]string(nil), fieldNames...) - return q -} - -// Distinct returns a derivative query that yields de-duplicated entities with -// respect to the set of projected fields. It is only used for projection -// queries. -func (q *Query) Distinct() *Query { - q = q.clone() - q.distinct = true - return q -} - -// KeysOnly returns a derivative query that yields only keys, not keys and -// entities. It cannot be used with projection queries. -func (q *Query) KeysOnly() *Query { - q = q.clone() - q.keysOnly = true - return q -} - -// Limit returns a derivative query that has a limit on the number of results -// returned. A negative value means unlimited. -func (q *Query) Limit(limit int) *Query { - q = q.clone() - if limit < math.MinInt32 || limit > math.MaxInt32 { - q.err = errors.New("datastore: query limit overflow") - return q - } - q.limit = int32(limit) - return q -} - -// Offset returns a derivative query that has an offset of how many keys to -// skip over before returning results. A negative value is invalid. -func (q *Query) Offset(offset int) *Query { - q = q.clone() - if offset < 0 { - q.err = errors.New("datastore: negative query offset") - return q - } - if offset > math.MaxInt32 { - q.err = errors.New("datastore: query offset overflow") - return q - } - q.offset = int32(offset) - return q -} - -// Start returns a derivative query with the given start point. -func (q *Query) Start(c Cursor) *Query { - q = q.clone() - if c.cc == nil { - q.err = errors.New("datastore: invalid cursor") - return q - } - q.start = c.cc - return q -} - -// End returns a derivative query with the given end point. -func (q *Query) End(c Cursor) *Query { - q = q.clone() - if c.cc == nil { - q.err = errors.New("datastore: invalid cursor") - return q - } - q.end = c.cc - return q -} - -// toProto converts the query to a protocol buffer. -func (q *Query) toProto(dst *pb.Query, appID string) error { - if len(q.projection) != 0 && q.keysOnly { - return errors.New("datastore: query cannot both project and be keys-only") - } - dst.Reset() - dst.App = proto.String(appID) - if q.kind != "" { - dst.Kind = proto.String(q.kind) - } - if q.ancestor != nil { - dst.Ancestor = keyToProto(appID, q.ancestor) - if q.eventual { - dst.Strong = proto.Bool(false) - } - } - if q.projection != nil { - dst.PropertyName = q.projection - if q.distinct { - dst.GroupByPropertyName = q.projection - } - } - if q.keysOnly { - dst.KeysOnly = proto.Bool(true) - dst.RequirePerfectPlan = proto.Bool(true) - } - for _, qf := range q.filter { - if qf.FieldName == "" { - return errors.New("datastore: empty query filter field name") - } - p, errStr := valueToProto(appID, qf.FieldName, reflect.ValueOf(qf.Value), false) - if errStr != "" { - return errors.New("datastore: bad query filter value type: " + errStr) - } - xf := &pb.Query_Filter{ - Op: operatorToProto[qf.Op], - Property: []*pb.Property{p}, - } - if xf.Op == nil { - return errors.New("datastore: unknown query filter operator") - } - dst.Filter = append(dst.Filter, xf) - } - for _, qo := range q.order { - if qo.FieldName == "" { - return errors.New("datastore: empty query order field name") - } - xo := &pb.Query_Order{ - Property: proto.String(qo.FieldName), - Direction: sortDirectionToProto[qo.Direction], - } - if xo.Direction == nil { - return errors.New("datastore: unknown query order direction") - } - dst.Order = append(dst.Order, xo) - } - if q.limit >= 0 { - dst.Limit = proto.Int32(q.limit) - } - if q.offset != 0 { - dst.Offset = proto.Int32(q.offset) - } - dst.CompiledCursor = q.start - dst.EndCompiledCursor = q.end - dst.Compile = proto.Bool(true) - return nil -} - -// Count returns the number of results for the query. -// -// The running time and number of API calls made by Count scale linearly with -// the sum of the query's offset and limit. Unless the result count is -// expected to be small, it is best to specify a limit; otherwise Count will -// continue until it finishes counting or the provided context expires. -func (q *Query) Count(c context.Context) (int, error) { - // Check that the query is well-formed. - if q.err != nil { - return 0, q.err - } - - // Run a copy of the query, with keysOnly true (if we're not a projection, - // since the two are incompatible), and an adjusted offset. We also set the - // limit to zero, as we don't want any actual entity data, just the number - // of skipped results. - newQ := q.clone() - newQ.keysOnly = len(newQ.projection) == 0 - newQ.limit = 0 - if q.limit < 0 { - // If the original query was unlimited, set the new query's offset to maximum. - newQ.offset = math.MaxInt32 - } else { - newQ.offset = q.offset + q.limit - if newQ.offset < 0 { - // Do the best we can, in the presence of overflow. - newQ.offset = math.MaxInt32 - } - } - req := &pb.Query{} - if err := newQ.toProto(req, internal.FullyQualifiedAppID(c)); err != nil { - return 0, err - } - res := &pb.QueryResult{} - if err := internal.Call(c, "datastore_v3", "RunQuery", req, res); err != nil { - return 0, err - } - - // n is the count we will return. For example, suppose that our original - // query had an offset of 4 and a limit of 2008: the count will be 2008, - // provided that there are at least 2012 matching entities. However, the - // RPCs will only skip 1000 results at a time. The RPC sequence is: - // call RunQuery with (offset, limit) = (2012, 0) // 2012 == newQ.offset - // response has (skippedResults, moreResults) = (1000, true) - // n += 1000 // n == 1000 - // call Next with (offset, limit) = (1012, 0) // 1012 == newQ.offset - n - // response has (skippedResults, moreResults) = (1000, true) - // n += 1000 // n == 2000 - // call Next with (offset, limit) = (12, 0) // 12 == newQ.offset - n - // response has (skippedResults, moreResults) = (12, false) - // n += 12 // n == 2012 - // // exit the loop - // n -= 4 // n == 2008 - var n int32 - for { - // The QueryResult should have no actual entity data, just skipped results. - if len(res.Result) != 0 { - return 0, errors.New("datastore: internal error: Count request returned too much data") - } - n += res.GetSkippedResults() - if !res.GetMoreResults() { - break - } - if err := callNext(c, res, newQ.offset-n, 0); err != nil { - return 0, err - } - } - n -= q.offset - if n < 0 { - // If the offset was greater than the number of matching entities, - // return 0 instead of negative. - n = 0 - } - return int(n), nil -} - -// callNext issues a datastore_v3/Next RPC to advance a cursor, such as that -// returned by a query with more results. -func callNext(c context.Context, res *pb.QueryResult, offset, limit int32) error { - if res.Cursor == nil { - return errors.New("datastore: internal error: server did not return a cursor") - } - req := &pb.NextRequest{ - Cursor: res.Cursor, - } - if limit >= 0 { - req.Count = proto.Int32(limit) - } - if offset != 0 { - req.Offset = proto.Int32(offset) - } - if res.CompiledCursor != nil { - req.Compile = proto.Bool(true) - } - res.Reset() - return internal.Call(c, "datastore_v3", "Next", req, res) -} - -// GetAll runs the query in the given context and returns all keys that match -// that query, as well as appending the values to dst. -// -// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non- -// interface, non-pointer type P such that P or *P implements PropertyLoadSaver. -// -// As a special case, *PropertyList is an invalid type for dst, even though a -// PropertyList is a slice of structs. It is treated as invalid to avoid being -// mistakenly passed when *[]PropertyList was intended. -// -// The keys returned by GetAll will be in a 1-1 correspondence with the entities -// added to dst. -// -// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys. -// -// The running time and number of API calls made by GetAll scale linearly with -// with the sum of the query's offset and limit. Unless the result count is -// expected to be small, it is best to specify a limit; otherwise GetAll will -// continue until it finishes collecting results or the provided context -// expires. -func (q *Query) GetAll(c context.Context, dst interface{}) ([]*Key, error) { - var ( - dv reflect.Value - mat multiArgType - elemType reflect.Type - errFieldMismatch error - ) - if !q.keysOnly { - dv = reflect.ValueOf(dst) - if dv.Kind() != reflect.Ptr || dv.IsNil() { - return nil, ErrInvalidEntityType - } - dv = dv.Elem() - mat, elemType = checkMultiArg(dv) - if mat == multiArgTypeInvalid || mat == multiArgTypeInterface { - return nil, ErrInvalidEntityType - } - } - - var keys []*Key - for t := q.Run(c); ; { - k, e, err := t.next() - if err == Done { - break - } - if err != nil { - return keys, err - } - if !q.keysOnly { - ev := reflect.New(elemType) - if elemType.Kind() == reflect.Map { - // This is a special case. The zero values of a map type are - // not immediately useful; they have to be make'd. - // - // Funcs and channels are similar, in that a zero value is not useful, - // but even a freshly make'd channel isn't useful: there's no fixed - // channel buffer size that is always going to be large enough, and - // there's no goroutine to drain the other end. Theoretically, these - // types could be supported, for example by sniffing for a constructor - // method or requiring prior registration, but for now it's not a - // frequent enough concern to be worth it. Programmers can work around - // it by explicitly using Iterator.Next instead of the Query.GetAll - // convenience method. - x := reflect.MakeMap(elemType) - ev.Elem().Set(x) - } - if err = loadEntity(ev.Interface(), e); err != nil { - if _, ok := err.(*ErrFieldMismatch); ok { - // We continue loading entities even in the face of field mismatch errors. - // If we encounter any other error, that other error is returned. Otherwise, - // an ErrFieldMismatch is returned. - errFieldMismatch = err - } else { - return keys, err - } - } - if mat != multiArgTypeStructPtr { - ev = ev.Elem() - } - dv.Set(reflect.Append(dv, ev)) - } - keys = append(keys, k) - } - return keys, errFieldMismatch -} - -// Run runs the query in the given context. -func (q *Query) Run(c context.Context) *Iterator { - if q.err != nil { - return &Iterator{err: q.err} - } - t := &Iterator{ - c: c, - limit: q.limit, - q: q, - prevCC: q.start, - } - var req pb.Query - if err := q.toProto(&req, internal.FullyQualifiedAppID(c)); err != nil { - t.err = err - return t - } - if err := internal.Call(c, "datastore_v3", "RunQuery", &req, &t.res); err != nil { - t.err = err - return t - } - offset := q.offset - t.res.GetSkippedResults() - for offset > 0 && t.res.GetMoreResults() { - t.prevCC = t.res.CompiledCursor - if err := callNext(t.c, &t.res, offset, t.limit); err != nil { - t.err = err - break - } - skip := t.res.GetSkippedResults() - if skip < 0 { - t.err = errors.New("datastore: internal error: negative number of skipped_results") - break - } - offset -= skip - } - if offset < 0 { - t.err = errors.New("datastore: internal error: query offset was overshot") - } - return t -} - -// Iterator is the result of running a query. -type Iterator struct { - c context.Context - err error - // res is the result of the most recent RunQuery or Next API call. - res pb.QueryResult - // i is how many elements of res.Result we have iterated over. - i int - // limit is the limit on the number of results this iterator should return. - // A negative value means unlimited. - limit int32 - // q is the original query which yielded this iterator. - q *Query - // prevCC is the compiled cursor that marks the end of the previous batch - // of results. - prevCC *pb.CompiledCursor -} - -// Done is returned when a query iteration has completed. -var Done = errors.New("datastore: query has no more results") - -// Next returns the key of the next result. When there are no more results, -// Done is returned as the error. -// -// If the query is not keys only and dst is non-nil, it also loads the entity -// stored for that key into the struct pointer or PropertyLoadSaver dst, with -// the same semantics and possible errors as for the Get function. -func (t *Iterator) Next(dst interface{}) (*Key, error) { - k, e, err := t.next() - if err != nil { - return nil, err - } - if dst != nil && !t.q.keysOnly { - err = loadEntity(dst, e) - } - return k, err -} - -func (t *Iterator) next() (*Key, *pb.EntityProto, error) { - if t.err != nil { - return nil, nil, t.err - } - - // Issue datastore_v3/Next RPCs as necessary. - for t.i == len(t.res.Result) { - if !t.res.GetMoreResults() { - t.err = Done - return nil, nil, t.err - } - t.prevCC = t.res.CompiledCursor - if err := callNext(t.c, &t.res, 0, t.limit); err != nil { - t.err = err - return nil, nil, t.err - } - if t.res.GetSkippedResults() != 0 { - t.err = errors.New("datastore: internal error: iterator has skipped results") - return nil, nil, t.err - } - t.i = 0 - if t.limit >= 0 { - t.limit -= int32(len(t.res.Result)) - if t.limit < 0 { - t.err = errors.New("datastore: internal error: query returned more results than the limit") - return nil, nil, t.err - } - } - } - - // Extract the key from the t.i'th element of t.res.Result. - e := t.res.Result[t.i] - t.i++ - if e.Key == nil { - return nil, nil, errors.New("datastore: internal error: server did not return a key") - } - k, err := protoToKey(e.Key) - if err != nil || k.Incomplete() { - return nil, nil, errors.New("datastore: internal error: server returned an invalid key") - } - return k, e, nil -} - -// Cursor returns a cursor for the iterator's current location. -func (t *Iterator) Cursor() (Cursor, error) { - if t.err != nil && t.err != Done { - return Cursor{}, t.err - } - // If we are at either end of the current batch of results, - // return the compiled cursor at that end. - skipped := t.res.GetSkippedResults() - if t.i == 0 && skipped == 0 { - if t.prevCC == nil { - // A nil pointer (of type *pb.CompiledCursor) means no constraint: - // passing it as the end cursor of a new query means unlimited results - // (glossing over the integer limit parameter for now). - // A non-nil pointer to an empty pb.CompiledCursor means the start: - // passing it as the end cursor of a new query means 0 results. - // If prevCC was nil, then the original query had no start cursor, but - // Iterator.Cursor should return "the start" instead of unlimited. - return Cursor{&zeroCC}, nil - } - return Cursor{t.prevCC}, nil - } - if t.i == len(t.res.Result) { - return Cursor{t.res.CompiledCursor}, nil - } - // Otherwise, re-run the query offset to this iterator's position, starting from - // the most recent compiled cursor. This is done on a best-effort basis, as it - // is racy; if a concurrent process has added or removed entities, then the - // cursor returned may be inconsistent. - q := t.q.clone() - q.start = t.prevCC - q.offset = skipped + int32(t.i) - q.limit = 0 - q.keysOnly = len(q.projection) == 0 - t1 := q.Run(t.c) - _, _, err := t1.next() - if err != Done { - if err == nil { - err = fmt.Errorf("datastore: internal error: zero-limit query did not have zero results") - } - return Cursor{}, err - } - return Cursor{t1.res.CompiledCursor}, nil -} - -var zeroCC pb.CompiledCursor - -// Cursor is an iterator's position. It can be converted to and from an opaque -// string. A cursor can be used from different HTTP requests, but only with a -// query with the same kind, ancestor, filter and order constraints. -type Cursor struct { - cc *pb.CompiledCursor -} - -// String returns a base-64 string representation of a cursor. -func (c Cursor) String() string { - if c.cc == nil { - return "" - } - b, err := proto.Marshal(c.cc) - if err != nil { - // The only way to construct a Cursor with a non-nil cc field is to - // unmarshal from the byte representation. We panic if the unmarshal - // succeeds but the marshaling of the unchanged protobuf value fails. - panic(fmt.Sprintf("datastore: internal error: malformed cursor: %v", err)) - } - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// Decode decodes a cursor from its base-64 string representation. -func DecodeCursor(s string) (Cursor, error) { - if s == "" { - return Cursor{&zeroCC}, nil - } - if n := len(s) % 4; n != 0 { - s += strings.Repeat("=", 4-n) - } - b, err := base64.URLEncoding.DecodeString(s) - if err != nil { - return Cursor{}, err - } - cc := &pb.CompiledCursor{} - if err := proto.Unmarshal(b, cc); err != nil { - return Cursor{}, err - } - return Cursor{cc}, nil -} diff --git a/vendor/google.golang.org/appengine/datastore/save.go b/vendor/google.golang.org/appengine/datastore/save.go deleted file mode 100644 index b5f9592..0000000 --- a/vendor/google.golang.org/appengine/datastore/save.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package datastore - -import ( - "errors" - "fmt" - "math" - "reflect" - "time" - - "github.com/golang/protobuf/proto" - - "google.golang.org/appengine" - pb "google.golang.org/appengine/internal/datastore" -) - -func toUnixMicro(t time.Time) int64 { - // We cannot use t.UnixNano() / 1e3 because we want to handle times more than - // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot - // be represented in the numerator of a single int64 divide. - return t.Unix()*1e6 + int64(t.Nanosecond()/1e3) -} - -func fromUnixMicro(t int64) time.Time { - return time.Unix(t/1e6, (t%1e6)*1e3).UTC() -} - -var ( - minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3) - maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3) -) - -// valueToProto converts a named value to a newly allocated Property. -// The returned error string is empty on success. -func valueToProto(defaultAppID, name string, v reflect.Value, multiple bool) (p *pb.Property, errStr string) { - var ( - pv pb.PropertyValue - unsupported bool - ) - switch v.Kind() { - case reflect.Invalid: - // No-op. - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - pv.Int64Value = proto.Int64(v.Int()) - case reflect.Bool: - pv.BooleanValue = proto.Bool(v.Bool()) - case reflect.String: - pv.StringValue = proto.String(v.String()) - case reflect.Float32, reflect.Float64: - pv.DoubleValue = proto.Float64(v.Float()) - case reflect.Ptr: - if k, ok := v.Interface().(*Key); ok { - if k != nil { - pv.Referencevalue = keyToReferenceValue(defaultAppID, k) - } - } else { - unsupported = true - } - case reflect.Struct: - switch t := v.Interface().(type) { - case time.Time: - if t.Before(minTime) || t.After(maxTime) { - return nil, "time value out of range" - } - pv.Int64Value = proto.Int64(toUnixMicro(t)) - case appengine.GeoPoint: - if !t.Valid() { - return nil, "invalid GeoPoint value" - } - // NOTE: Strangely, latitude maps to X, longitude to Y. - pv.Pointvalue = &pb.PropertyValue_PointValue{X: &t.Lat, Y: &t.Lng} - default: - unsupported = true - } - case reflect.Slice: - if b, ok := v.Interface().([]byte); ok { - pv.StringValue = proto.String(string(b)) - } else { - // nvToProto should already catch slice values. - // If we get here, we have a slice of slice values. - unsupported = true - } - default: - unsupported = true - } - if unsupported { - return nil, "unsupported datastore value type: " + v.Type().String() - } - p = &pb.Property{ - Name: proto.String(name), - Value: &pv, - Multiple: proto.Bool(multiple), - } - if v.IsValid() { - switch v.Interface().(type) { - case []byte: - p.Meaning = pb.Property_BLOB.Enum() - case ByteString: - p.Meaning = pb.Property_BYTESTRING.Enum() - case appengine.BlobKey: - p.Meaning = pb.Property_BLOBKEY.Enum() - case time.Time: - p.Meaning = pb.Property_GD_WHEN.Enum() - case appengine.GeoPoint: - p.Meaning = pb.Property_GEORSS_POINT.Enum() - } - } - return p, "" -} - -// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer. -func saveEntity(defaultAppID string, key *Key, src interface{}) (*pb.EntityProto, error) { - var err error - var props []Property - if e, ok := src.(PropertyLoadSaver); ok { - props, err = e.Save() - } else { - props, err = SaveStruct(src) - } - if err != nil { - return nil, err - } - return propertiesToProto(defaultAppID, key, props) -} - -func saveStructProperty(props *[]Property, name string, noIndex, multiple bool, v reflect.Value) error { - p := Property{ - Name: name, - NoIndex: noIndex, - Multiple: multiple, - } - switch x := v.Interface().(type) { - case *Key: - p.Value = x - case time.Time: - p.Value = x - case appengine.BlobKey: - p.Value = x - case appengine.GeoPoint: - p.Value = x - case ByteString: - p.Value = x - default: - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - p.Value = v.Int() - case reflect.Bool: - p.Value = v.Bool() - case reflect.String: - p.Value = v.String() - case reflect.Float32, reflect.Float64: - p.Value = v.Float() - case reflect.Slice: - if v.Type().Elem().Kind() == reflect.Uint8 { - p.NoIndex = true - p.Value = v.Bytes() - } - case reflect.Struct: - if !v.CanAddr() { - return fmt.Errorf("datastore: unsupported struct field: value is unaddressable") - } - sub, err := newStructPLS(v.Addr().Interface()) - if err != nil { - return fmt.Errorf("datastore: unsupported struct field: %v", err) - } - return sub.(structPLS).save(props, name, noIndex, multiple) - } - } - if p.Value == nil { - return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type()) - } - *props = append(*props, p) - return nil -} - -func (s structPLS) Save() ([]Property, error) { - var props []Property - if err := s.save(&props, "", false, false); err != nil { - return nil, err - } - return props, nil -} - -func (s structPLS) save(props *[]Property, prefix string, noIndex, multiple bool) error { - for i, t := range s.codec.byIndex { - if t.name == "-" { - continue - } - name := t.name - if prefix != "" { - name = prefix + name - } - v := s.v.Field(i) - if !v.IsValid() || !v.CanSet() { - continue - } - noIndex1 := noIndex || t.noIndex - // For slice fields that aren't []byte, save each element. - if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { - for j := 0; j < v.Len(); j++ { - if err := saveStructProperty(props, name, noIndex1, true, v.Index(j)); err != nil { - return err - } - } - continue - } - // Otherwise, save the field itself. - if err := saveStructProperty(props, name, noIndex1, multiple, v); err != nil { - return err - } - } - return nil -} - -func propertiesToProto(defaultAppID string, key *Key, props []Property) (*pb.EntityProto, error) { - e := &pb.EntityProto{ - Key: keyToProto(defaultAppID, key), - } - if key.parent == nil { - e.EntityGroup = &pb.Path{} - } else { - e.EntityGroup = keyToProto(defaultAppID, key.root()).Path - } - prevMultiple := make(map[string]bool) - - for _, p := range props { - if pm, ok := prevMultiple[p.Name]; ok { - if !pm || !p.Multiple { - return nil, fmt.Errorf("datastore: multiple Properties with Name %q, but Multiple is false", p.Name) - } - } else { - prevMultiple[p.Name] = p.Multiple - } - - x := &pb.Property{ - Name: proto.String(p.Name), - Value: new(pb.PropertyValue), - Multiple: proto.Bool(p.Multiple), - } - switch v := p.Value.(type) { - case int64: - x.Value.Int64Value = proto.Int64(v) - case bool: - x.Value.BooleanValue = proto.Bool(v) - case string: - x.Value.StringValue = proto.String(v) - if p.NoIndex { - x.Meaning = pb.Property_TEXT.Enum() - } - case float64: - x.Value.DoubleValue = proto.Float64(v) - case *Key: - if v != nil { - x.Value.Referencevalue = keyToReferenceValue(defaultAppID, v) - } - case time.Time: - if v.Before(minTime) || v.After(maxTime) { - return nil, fmt.Errorf("datastore: time value out of range") - } - x.Value.Int64Value = proto.Int64(toUnixMicro(v)) - x.Meaning = pb.Property_GD_WHEN.Enum() - case appengine.BlobKey: - x.Value.StringValue = proto.String(string(v)) - x.Meaning = pb.Property_BLOBKEY.Enum() - case appengine.GeoPoint: - if !v.Valid() { - return nil, fmt.Errorf("datastore: invalid GeoPoint value") - } - // NOTE: Strangely, latitude maps to X, longitude to Y. - x.Value.Pointvalue = &pb.PropertyValue_PointValue{X: &v.Lat, Y: &v.Lng} - x.Meaning = pb.Property_GEORSS_POINT.Enum() - case []byte: - x.Value.StringValue = proto.String(string(v)) - x.Meaning = pb.Property_BLOB.Enum() - if !p.NoIndex { - return nil, fmt.Errorf("datastore: cannot index a []byte valued Property with Name %q", p.Name) - } - case ByteString: - x.Value.StringValue = proto.String(string(v)) - x.Meaning = pb.Property_BYTESTRING.Enum() - default: - if p.Value != nil { - return nil, fmt.Errorf("datastore: invalid Value type for a Property with Name %q", p.Name) - } - } - - if p.NoIndex { - e.RawProperty = append(e.RawProperty, x) - } else { - e.Property = append(e.Property, x) - if len(e.Property) > maxIndexedProperties { - return nil, errors.New("datastore: too many indexed properties") - } - } - } - return e, nil -} diff --git a/vendor/google.golang.org/appengine/datastore/transaction.go b/vendor/google.golang.org/appengine/datastore/transaction.go deleted file mode 100644 index a7f3f2b..0000000 --- a/vendor/google.golang.org/appengine/datastore/transaction.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package datastore - -import ( - "errors" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/datastore" -) - -func init() { - internal.RegisterTransactionSetter(func(x *pb.Query, t *pb.Transaction) { - x.Transaction = t - }) - internal.RegisterTransactionSetter(func(x *pb.GetRequest, t *pb.Transaction) { - x.Transaction = t - }) - internal.RegisterTransactionSetter(func(x *pb.PutRequest, t *pb.Transaction) { - x.Transaction = t - }) - internal.RegisterTransactionSetter(func(x *pb.DeleteRequest, t *pb.Transaction) { - x.Transaction = t - }) -} - -// ErrConcurrentTransaction is returned when a transaction is rolled back due -// to a conflict with a concurrent transaction. -var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction") - -// RunInTransaction runs f in a transaction. It calls f with a transaction -// context tc that f should use for all App Engine operations. -// -// If f returns nil, RunInTransaction attempts to commit the transaction, -// returning nil if it succeeds. If the commit fails due to a conflicting -// transaction, RunInTransaction retries f, each time with a new transaction -// context. It gives up and returns ErrConcurrentTransaction after three -// failed attempts. The number of attempts can be configured by specifying -// TransactionOptions.Attempts. -// -// If f returns non-nil, then any datastore changes will not be applied and -// RunInTransaction returns that same error. The function f is not retried. -// -// Note that when f returns, the transaction is not yet committed. Calling code -// must be careful not to assume that any of f's changes have been committed -// until RunInTransaction returns nil. -// -// Since f may be called multiple times, f should usually be idempotent. -// datastore.Get is not idempotent when unmarshaling slice fields. -// -// Nested transactions are not supported; c may not be a transaction context. -func RunInTransaction(c context.Context, f func(tc context.Context) error, opts *TransactionOptions) error { - xg := false - if opts != nil { - xg = opts.XG - } - attempts := 3 - if opts != nil && opts.Attempts > 0 { - attempts = opts.Attempts - } - for i := 0; i < attempts; i++ { - if err := internal.RunTransactionOnce(c, f, xg); err != internal.ErrConcurrentTransaction { - return err - } - } - return ErrConcurrentTransaction -} - -// TransactionOptions are the options for running a transaction. -type TransactionOptions struct { - // XG is whether the transaction can cross multiple entity groups. In - // comparison, a single group transaction is one where all datastore keys - // used have the same root key. Note that cross group transactions do not - // have the same behavior as single group transactions. In particular, it - // is much more likely to see partially applied transactions in different - // entity groups, in global queries. - // It is valid to set XG to true even if the transaction is within a - // single entity group. - XG bool - // Attempts controls the number of retries to perform when commits fail - // due to a conflicting transaction. If omitted, it defaults to 3. - Attempts int -} diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go deleted file mode 100644 index 16d0772..0000000 --- a/vendor/google.golang.org/appengine/errors.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// This file provides error functions for common API failure modes. - -package appengine - -import ( - "fmt" - - "google.golang.org/appengine/internal" -) - -// IsOverQuota reports whether err represents an API call failure -// due to insufficient available quota. -func IsOverQuota(err error) bool { - callErr, ok := err.(*internal.CallError) - return ok && callErr.Code == 4 -} - -// MultiError is returned by batch operations when there are errors with -// particular elements. Errors will be in a one-to-one correspondence with -// the input elements; successful elements will have a nil entry. -type MultiError []error - -func (m MultiError) Error() string { - s, n := "", 0 - for _, e := range m { - if e != nil { - if n == 0 { - s = e.Error() - } - n++ - } - } - switch n { - case 0: - return "(0 errors)" - case 1: - return s - case 2: - return s + " (and 1 other error)" - } - return fmt.Sprintf("%s (and %d other errors)", s, n-1) -} diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go deleted file mode 100644 index b8dcf8f..0000000 --- a/vendor/google.golang.org/appengine/identity.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import ( - "time" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/app_identity" - modpb "google.golang.org/appengine/internal/modules" -) - -// AppID returns the application ID for the current application. -// The string will be a plain application ID (e.g. "appid"), with a -// domain prefix for custom domain deployments (e.g. "example.com:appid"). -func AppID(c context.Context) string { return internal.AppID(c) } - -// DefaultVersionHostname returns the standard hostname of the default version -// of the current application (e.g. "my-app.appspot.com"). This is suitable for -// use in constructing URLs. -func DefaultVersionHostname(c context.Context) string { - return internal.DefaultVersionHostname(c) -} - -// ModuleName returns the module name of the current instance. -func ModuleName(c context.Context) string { - return internal.ModuleName(c) -} - -// ModuleHostname returns a hostname of a module instance. -// If module is the empty string, it refers to the module of the current instance. -// If version is empty, it refers to the version of the current instance if valid, -// or the default version of the module of the current instance. -// If instance is empty, ModuleHostname returns the load-balancing hostname. -func ModuleHostname(c context.Context, module, version, instance string) (string, error) { - req := &modpb.GetHostnameRequest{} - if module != "" { - req.Module = &module - } - if version != "" { - req.Version = &version - } - if instance != "" { - req.Instance = &instance - } - res := &modpb.GetHostnameResponse{} - if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil { - return "", err - } - return *res.Hostname, nil -} - -// VersionID returns the version ID for the current application. -// It will be of the form "X.Y", where X is specified in app.yaml, -// and Y is a number generated when each version of the app is uploaded. -// It does not include a module name. -func VersionID(c context.Context) string { return internal.VersionID(c) } - -// InstanceID returns a mostly-unique identifier for this instance. -func InstanceID() string { return internal.InstanceID() } - -// Datacenter returns an identifier for the datacenter that the instance is running in. -func Datacenter(c context.Context) string { return internal.Datacenter(c) } - -// ServerSoftware returns the App Engine release version. -// In production, it looks like "Google App Engine/X.Y.Z". -// In the development appserver, it looks like "Development/X.Y". -func ServerSoftware() string { return internal.ServerSoftware() } - -// RequestID returns a string that uniquely identifies the request. -func RequestID(c context.Context) string { return internal.RequestID(c) } - -// AccessToken generates an OAuth2 access token for the specified scopes on -// behalf of service account of this application. This token will expire after -// the returned time. -func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) { - req := &pb.GetAccessTokenRequest{Scope: scopes} - res := &pb.GetAccessTokenResponse{} - - err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res) - if err != nil { - return "", time.Time{}, err - } - return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil -} - -// Certificate represents a public certificate for the app. -type Certificate struct { - KeyName string - Data []byte // PEM-encoded X.509 certificate -} - -// PublicCertificates retrieves the public certificates for the app. -// They can be used to verify a signature returned by SignBytes. -func PublicCertificates(c context.Context) ([]Certificate, error) { - req := &pb.GetPublicCertificateForAppRequest{} - res := &pb.GetPublicCertificateForAppResponse{} - if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil { - return nil, err - } - var cs []Certificate - for _, pc := range res.PublicCertificateList { - cs = append(cs, Certificate{ - KeyName: pc.GetKeyName(), - Data: []byte(pc.GetX509CertificatePem()), - }) - } - return cs, nil -} - -// ServiceAccount returns a string representing the service account name, in -// the form of an email address (typically app_id@appspot.gserviceaccount.com). -func ServiceAccount(c context.Context) (string, error) { - req := &pb.GetServiceAccountNameRequest{} - res := &pb.GetServiceAccountNameResponse{} - - err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res) - if err != nil { - return "", err - } - return res.GetServiceAccountName(), err -} - -// SignBytes signs bytes using a private key unique to your application. -func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) { - req := &pb.SignForAppRequest{BytesToSign: bytes} - res := &pb.SignForAppResponse{} - - if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil { - return "", nil, err - } - return res.GetKeyName(), res.GetSignatureBytes(), nil -} - -func init() { - internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name) - internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name) -} diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go deleted file mode 100644 index ec5aa59..0000000 --- a/vendor/google.golang.org/appengine/internal/api.go +++ /dev/null @@ -1,646 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" - - basepb "google.golang.org/appengine/internal/base" - logpb "google.golang.org/appengine/internal/log" - remotepb "google.golang.org/appengine/internal/remote_api" -) - -const ( - apiPath = "/rpc_http" -) - -var ( - // Incoming headers. - ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") - dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") - traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") - curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") - userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") - remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") - - // Outgoing headers. - apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") - apiEndpointHeaderValue = []string{"app-engine-apis"} - apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") - apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} - apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") - apiContentType = http.CanonicalHeaderKey("Content-Type") - apiContentTypeValue = []string{"application/octet-stream"} - logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") - - apiHTTPClient = &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: limitDial, - }, - } -) - -func apiURL() *url.URL { - host, port := "appengine.googleapis.internal", "10001" - if h := os.Getenv("API_HOST"); h != "" { - host = h - } - if p := os.Getenv("API_PORT"); p != "" { - port = p - } - return &url.URL{ - Scheme: "http", - Host: host + ":" + port, - Path: apiPath, - } -} - -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - stopFlushing := make(chan int) - - ctxs.Lock() - ctxs.m[r] = c - ctxs.Unlock() - defer func() { - ctxs.Lock() - delete(ctxs.m, r) - ctxs.Unlock() - }() - - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } - - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) - - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more - - stopFlushing <- 1 // any logging beyond this point will be dropped - - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - go c.flushLog(false) - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) - - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } -} - -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() - - http.DefaultServeMux.ServeHTTP(c, r) -} - -func renderPanic(x interface{}) string { - buf := make([]byte, 16<<10) // 16 KB should be plenty - buf = buf[:runtime.Stack(buf, false)] - - // Remove the first few stack frames: - // this func - // the recover closure in the caller - // That will root the stack trace at the site of the panic. - const ( - skipStart = "internal.renderPanic" - skipFrames = 2 - ) - start := bytes.Index(buf, []byte(skipStart)) - p := start - for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { - p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 - if p < 0 { - break - } - } - if p >= 0 { - // buf[start:p+1] is the block to remove. - // Copy buf[p+1:] over buf[start:] and shrink buf. - copy(buf[start:], buf[p+1:]) - buf = buf[:len(buf)-(p+1-start)] - } - - // Add panic heading. - head := fmt.Sprintf("panic: %v\n\n", x) - if len(head) > len(buf) { - // Extremely unlikely to happen. - return head - } - copy(buf[len(head):], buf) - copy(buf, head) - - return string(buf) -} - -var ctxs = struct { - sync.Mutex - m map[*http.Request]*context - bg *context // background context, lazily initialized - // dec is used by tests to decorate the netcontext.Context returned - // for a given request. This allows tests to add overrides (such as - // WithAppIDOverride) to the context. The map is nil outside tests. - dec map[*http.Request]func(netcontext.Context) netcontext.Context -}{ - m: make(map[*http.Request]*context), -} - -// context represents the context of an in-flight HTTP request. -// It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { - req *http.Request - - outCode int - outHeader http.Header - outBody []byte - - pendingLogs struct { - sync.Mutex - lines []*logpb.UserAppLogLine - flushes int - } - - apiURL *url.URL -} - -var contextKey = "holds a *context" - -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) - return c -} - -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) - if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { - ctx = withNamespace(ctx, ns) - } - return ctx -} - -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) -} - -func IncomingHeaders(ctx netcontext.Context) http.Header { - if c := fromContext(ctx); c != nil { - return c.req.Header - } - return nil -} - -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - ctxs.Lock() - c := ctxs.m[req] - d := ctxs.dec[req] - ctxs.Unlock() - - if d != nil { - parent = d(parent) - } - - if c == nil { - // Someone passed in an http.Request that is not in-flight. - // We panic here rather than panicking at a later point - // so that stack traces will be more sensible. - log.Panic("appengine: NewContext passed an unknown http.Request") - } - return withContext(parent, c) -} - -func BackgroundContext() netcontext.Context { - ctxs.Lock() - defer ctxs.Unlock() - - if ctxs.bg != nil { - return toContext(ctxs.bg) - } - - // Compute background security ticket. - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - - ctxs.bg = &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go ctxs.bg.logFlusher(make(chan int)) - - return toContext(ctxs.bg) -} - -// RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. -// It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() { - c := &context{ - req: req, - apiURL: apiURL, - } - ctxs.Lock() - defer ctxs.Unlock() - if _, ok := ctxs.m[req]; ok { - log.Panic("req already associated with context") - } - if _, ok := ctxs.dec[req]; ok { - log.Panic("req already associated with context") - } - if ctxs.dec == nil { - ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context) - } - ctxs.m[req] = c - ctxs.dec[req] = decorate - - return func() { - ctxs.Lock() - delete(ctxs.m, req) - delete(ctxs.dec, req) - ctxs.Unlock() - } -} - -var errTimeout = &CallError{ - Detail: "Deadline exceeded", - Code: int32(remotepb.RpcError_CANCELLED), - Timeout: true, -} - -func (c *context) Header() http.Header { return c.outHeader } - -// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status -// codes do not permit a response body (nor response entity headers such as -// Content-Length, Content-Type, etc). -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -func (c *context) Write(b []byte) (int, error) { - if c.outCode == 0 { - c.WriteHeader(http.StatusOK) - } - if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { - return 0, http.ErrBodyNotAllowed - } - c.outBody = append(c.outBody, b...) - return len(b), nil -} - -func (c *context) WriteHeader(code int) { - if c.outCode != 0 { - logf(c, 3, "WriteHeader called multiple times on request.") // error level - return - } - c.outCode = code -} - -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { - hreq := &http.Request{ - Method: "POST", - URL: c.apiURL, - Header: http.Header{ - apiEndpointHeader: apiEndpointHeaderValue, - apiMethodHeader: apiMethodHeaderValue, - apiContentType: apiContentTypeValue, - apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, - }, - Body: ioutil.NopCloser(bytes.NewReader(body)), - ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) - } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) - } - - tr := apiHTTPClient.Transport.(*http.Transport) - - var timedOut int32 // atomic; set to 1 if timed out - t := time.AfterFunc(timeout, func() { - atomic.StoreInt32(&timedOut, 1) - tr.CancelRequest(hreq) - }) - defer t.Stop() - defer func() { - // Check if timeout was exceeded. - if atomic.LoadInt32(&timedOut) != 0 { - err = errTimeout - } - }() - - hresp, err := apiHTTPClient.Do(hreq) - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - defer hresp.Body.Close() - hrespBody, err := ioutil.ReadAll(hresp.Body) - if hresp.StatusCode != 200 { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge response bad: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return hrespBody, nil -} - -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - if ns := NamespaceFromContext(ctx); ns != "" { - if fn, ok := NamespaceMods[service]; ok { - fn(in, ns) - } - } - - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errors.New("not an App Engine context") - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - // Default RPC timeout is 60s. - timeout := 60 * time.Second - if deadline, ok := ctx.Deadline(); ok { - timeout = deadline.Sub(time.Now()) - } - - data, err := proto.Marshal(in) - if err != nil { - return err - } - - ticket := c.req.Header.Get(ticketHeader) - req := &remotepb.Request{ - ServiceName: &service, - Method: &method, - Request: data, - RequestId: &ticket, - } - hreqBody, err := proto.Marshal(req) - if err != nil { - return err - } - - hrespBody, err := c.post(hreqBody, timeout) - if err != nil { - return err - } - - res := &remotepb.Response{} - if err := proto.Unmarshal(hrespBody, res); err != nil { - return err - } - if res.RpcError != nil { - ce := &CallError{ - Detail: res.RpcError.GetDetail(), - Code: *res.RpcError.Code, - } - switch remotepb.RpcError_ErrorCode(ce.Code) { - case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: - ce.Timeout = true - } - return ce - } - if res.ApplicationError != nil { - return &APIError{ - Service: *req.ServiceName, - Detail: res.ApplicationError.GetDetail(), - Code: *res.ApplicationError.Code, - } - } - if res.Exception != nil || res.JavaException != nil { - // This shouldn't happen, but let's be defensive. - return &CallError{ - Detail: "service bridge returned exception", - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return proto.Unmarshal(res.Response, out) -} - -func (c *context) Request() *http.Request { - return c.req -} - -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { - // Truncate long log lines. - // TODO(dsymonds): Check if this is still necessary. - const lim = 8 << 10 - if len(*ll.Message) > lim { - suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) - ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) - } - - c.pendingLogs.Lock() - c.pendingLogs.lines = append(c.pendingLogs.lines, ll) - c.pendingLogs.Unlock() -} - -var logLevelName = map[int64]string{ - 0: "DEBUG", - 1: "INFO", - 2: "WARNING", - 3: "ERROR", - 4: "CRITICAL", -} - -func logf(c *context, level int64, format string, args ...interface{}) { - s := fmt.Sprintf(format, args...) - s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - log.Print(logLevelName[level] + ": " + s) -} - -// flushLog attempts to flush any pending logs to the appserver. -// It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { - c.pendingLogs.Lock() - // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. - n, rem := 0, 30<<20 - for ; n < len(c.pendingLogs.lines); n++ { - ll := c.pendingLogs.lines[n] - // Each log line will require about 3 bytes of overhead. - nb := proto.Size(ll) + 3 - if nb > rem { - break - } - rem -= nb - } - lines := c.pendingLogs.lines[:n] - c.pendingLogs.lines = c.pendingLogs.lines[n:] - c.pendingLogs.Unlock() - - if len(lines) == 0 && !force { - // Nothing to flush. - return false - } - - rescueLogs := false - defer func() { - if rescueLogs { - c.pendingLogs.Lock() - c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) - c.pendingLogs.Unlock() - } - }() - - buf, err := proto.Marshal(&logpb.UserAppLogGroup{ - LogLine: lines, - }) - if err != nil { - log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) - rescueLogs = true - return false - } - - req := &logpb.FlushRequest{ - Logs: buf, - } - res := &basepb.VoidProto{} - c.pendingLogs.Lock() - c.pendingLogs.flushes++ - c.pendingLogs.Unlock() - if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { - log.Printf("internal.flushLog: Flush RPC: %v", err) - rescueLogs = true - return false - } - return true -} - -const ( - // Log flushing parameters. - flushInterval = 1 * time.Second - forceFlushInterval = 60 * time.Second -) - -func (c *context) logFlusher(stop <-chan int) { - lastFlush := time.Now() - tick := time.NewTicker(flushInterval) - for { - select { - case <-stop: - // Request finished. - tick.Stop() - return - case <-tick.C: - force := time.Now().Sub(lastFlush) > forceFlushInterval - if c.flushLog(force) { - lastFlush = time.Now() - } - } - } -} - -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) -} diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go deleted file mode 100644 index 597f66e..0000000 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import ( - "errors" - "fmt" - "net/http" - "time" - - "appengine" - "appengine_internal" - basepb "appengine_internal/base" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" -) - -var contextKey = "holds an appengine.Context" - -func fromContext(ctx netcontext.Context) appengine.Context { - c, _ := ctx.Value(&contextKey).(appengine.Context) - return c -} - -// This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) appengine.Context { - return fromContext(ctx) -} - -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) - - s := &basepb.StringProto{} - c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) - if ns := s.GetValue(); ns != "" { - ctx = NamespacedContext(ctx, ns) - } - - return ctx -} - -func IncomingHeaders(ctx netcontext.Context) http.Header { - if c := fromContext(ctx); c != nil { - if req, ok := c.Request().(*http.Request); ok { - return req.Header - } - } - return nil -} - -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - c := appengine.NewContext(req) - return withContext(parent, c) -} - -type testingContext struct { - appengine.Context - - req *http.Request -} - -func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" } -func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error { - if service == "__go__" && method == "GetNamespace" { - return nil - } - return fmt.Errorf("testingContext: unsupported Call") -} -func (t *testingContext) Request() interface{} { return t.req } - -func ContextForTesting(req *http.Request) netcontext.Context { - return withContext(netcontext.Background(), &testingContext{req: req}) -} - -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - if ns := NamespaceFromContext(ctx); ns != "" { - if fn, ok := NamespaceMods[service]; ok { - fn(in, ns) - } - } - - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errors.New("not an App Engine context") - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - var opts *appengine_internal.CallOptions - if d, ok := ctx.Deadline(); ok { - opts = &appengine_internal.CallOptions{ - Timeout: d.Sub(time.Now()), - } - } - - err := c.Call(service, method, in, out, opts) - switch v := err.(type) { - case *appengine_internal.APIError: - return &APIError{ - Service: v.Service, - Detail: v.Detail, - Code: v.Code, - } - case *appengine_internal.CallError: - return &CallError{ - Detail: v.Detail, - Code: v.Code, - Timeout: v.Timeout, - } - } - return err -} - -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") -} - -func logf(c appengine.Context, level int64, format string, args ...interface{}) { - var fn func(format string, args ...interface{}) - switch level { - case 0: - fn = c.Debugf - case 1: - fn = c.Infof - case 2: - fn = c.Warningf - case 3: - fn = c.Errorf - case 4: - fn = c.Criticalf - default: - // This shouldn't happen. - fn = c.Criticalf - } - fn(format, args...) -} diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go deleted file mode 100644 index 2db33a7..0000000 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" -) - -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error - -var callOverrideKey = "holds []CallOverrideFunc" - -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { - // We avoid appending to any existing call override - // so we don't risk overwriting a popped stack below. - var cofs []CallOverrideFunc - if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok { - cofs = append(cofs, uf...) - } - cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) -} - -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { - cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) - if len(cofs) == 0 { - return nil, nil, false - } - // We found a list of overrides; grab the last, and reconstitute a - // context that will hide it. - f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) - return f, ctx, true -} - -type logOverrideFunc func(level int64, format string, args ...interface{}) - -var logOverrideKey = "holds a logOverrideFunc" - -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) -} - -var appIDOverrideKey = "holds a string, being the full app ID" - -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) -} - -var namespaceKey = "holds the namespace string" - -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) -} - -func NamespaceFromContext(ctx netcontext.Context) string { - // If there's no namespace, return the empty string. - ns, _ := ctx.Value(&namespaceKey).(string) - return ns -} - -// FullyQualifiedAppID returns the fully-qualified application ID. -// This may contain a partition prefix (e.g. "s~" for High Replication apps), -// or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { - if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { - return id - } - return fullyQualifiedAppID(ctx) -} - -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { - if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { - f(level, format, args...) - return - } - logf(fromContext(ctx), level, format, args...) -} - -// NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { - return withNamespace(ctx, namespace) -} diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go deleted file mode 100644 index 11df8c0..0000000 --- a/vendor/google.golang.org/appengine/internal/app_id.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "strings" -) - -func parseFullAppID(appid string) (partition, domain, displayID string) { - if i := strings.Index(appid, "~"); i != -1 { - partition, appid = appid[:i], appid[i+1:] - } - if i := strings.Index(appid, ":"); i != -1 { - domain, appid = appid[:i], appid[i+1:] - } - return partition, domain, appid -} - -// appID returns "appid" or "domain.com:appid". -func appID(fullAppID string) string { - _, dom, dis := parseFullAppID(fullAppID) - if dom != "" { - return dom + ":" + dis - } - return dis -} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go deleted file mode 100644 index 87d9701..0000000 --- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go +++ /dev/null @@ -1,296 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto -// DO NOT EDIT! - -/* -Package app_identity is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/app_identity/app_identity_service.proto - -It has these top-level messages: - AppIdentityServiceError - SignForAppRequest - SignForAppResponse - GetPublicCertificateForAppRequest - PublicCertificate - GetPublicCertificateForAppResponse - GetServiceAccountNameRequest - GetServiceAccountNameResponse - GetAccessTokenRequest - GetAccessTokenResponse - GetDefaultGcsBucketNameRequest - GetDefaultGcsBucketNameResponse -*/ -package app_identity - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type AppIdentityServiceError_ErrorCode int32 - -const ( - AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 - AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 - AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 - AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 - AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 - AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 - AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 - AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 -) - -var AppIdentityServiceError_ErrorCode_name = map[int32]string{ - 0: "SUCCESS", - 9: "UNKNOWN_SCOPE", - 1000: "BLOB_TOO_LARGE", - 1001: "DEADLINE_EXCEEDED", - 1002: "NOT_A_VALID_APP", - 1003: "UNKNOWN_ERROR", - 1005: "NOT_ALLOWED", - 1006: "NOT_IMPLEMENTED", -} -var AppIdentityServiceError_ErrorCode_value = map[string]int32{ - "SUCCESS": 0, - "UNKNOWN_SCOPE": 9, - "BLOB_TOO_LARGE": 1000, - "DEADLINE_EXCEEDED": 1001, - "NOT_A_VALID_APP": 1002, - "UNKNOWN_ERROR": 1003, - "NOT_ALLOWED": 1005, - "NOT_IMPLEMENTED": 1006, -} - -func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { - p := new(AppIdentityServiceError_ErrorCode) - *p = x - return p -} -func (x AppIdentityServiceError_ErrorCode) String() string { - return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) -} -func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") - if err != nil { - return err - } - *x = AppIdentityServiceError_ErrorCode(value) - return nil -} - -type AppIdentityServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } -func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } -func (*AppIdentityServiceError) ProtoMessage() {} - -type SignForAppRequest struct { - BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } -func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } -func (*SignForAppRequest) ProtoMessage() {} - -func (m *SignForAppRequest) GetBytesToSign() []byte { - if m != nil { - return m.BytesToSign - } - return nil -} - -type SignForAppResponse struct { - KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` - SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } -func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } -func (*SignForAppResponse) ProtoMessage() {} - -func (m *SignForAppResponse) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *SignForAppResponse) GetSignatureBytes() []byte { - if m != nil { - return m.SignatureBytes - } - return nil -} - -type GetPublicCertificateForAppRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } -func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } -func (*GetPublicCertificateForAppRequest) ProtoMessage() {} - -type PublicCertificate struct { - KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` - X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } -func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } -func (*PublicCertificate) ProtoMessage() {} - -func (m *PublicCertificate) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *PublicCertificate) GetX509CertificatePem() string { - if m != nil && m.X509CertificatePem != nil { - return *m.X509CertificatePem - } - return "" -} - -type GetPublicCertificateForAppResponse struct { - PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"` - MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } -func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } -func (*GetPublicCertificateForAppResponse) ProtoMessage() {} - -func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { - if m != nil { - return m.PublicCertificateList - } - return nil -} - -func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { - if m != nil && m.MaxClientCacheTimeInSecond != nil { - return *m.MaxClientCacheTimeInSecond - } - return 0 -} - -type GetServiceAccountNameRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } -func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetServiceAccountNameRequest) ProtoMessage() {} - -type GetServiceAccountNameResponse struct { - ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } -func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } -func (*GetServiceAccountNameResponse) ProtoMessage() {} - -func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { - if m != nil && m.ServiceAccountName != nil { - return *m.ServiceAccountName - } - return "" -} - -type GetAccessTokenRequest struct { - Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` - ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"` - ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } -func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } -func (*GetAccessTokenRequest) ProtoMessage() {} - -func (m *GetAccessTokenRequest) GetScope() []string { - if m != nil { - return m.Scope - } - return nil -} - -func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { - if m != nil && m.ServiceAccountId != nil { - return *m.ServiceAccountId - } - return 0 -} - -func (m *GetAccessTokenRequest) GetServiceAccountName() string { - if m != nil && m.ServiceAccountName != nil { - return *m.ServiceAccountName - } - return "" -} - -type GetAccessTokenResponse struct { - AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"` - ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } -func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } -func (*GetAccessTokenResponse) ProtoMessage() {} - -func (m *GetAccessTokenResponse) GetAccessToken() string { - if m != nil && m.AccessToken != nil { - return *m.AccessToken - } - return "" -} - -func (m *GetAccessTokenResponse) GetExpirationTime() int64 { - if m != nil && m.ExpirationTime != nil { - return *m.ExpirationTime - } - return 0 -} - -type GetDefaultGcsBucketNameRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } -func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} - -type GetDefaultGcsBucketNameResponse struct { - DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } -func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } -func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} - -func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { - if m != nil && m.DefaultGcsBucketName != nil { - return *m.DefaultGcsBucketName - } - return "" -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto deleted file mode 100644 index 19610ca..0000000 --- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto2"; -option go_package = "app_identity"; - -package appengine; - -message AppIdentityServiceError { - enum ErrorCode { - SUCCESS = 0; - UNKNOWN_SCOPE = 9; - BLOB_TOO_LARGE = 1000; - DEADLINE_EXCEEDED = 1001; - NOT_A_VALID_APP = 1002; - UNKNOWN_ERROR = 1003; - NOT_ALLOWED = 1005; - NOT_IMPLEMENTED = 1006; - } -} - -message SignForAppRequest { - optional bytes bytes_to_sign = 1; -} - -message SignForAppResponse { - optional string key_name = 1; - optional bytes signature_bytes = 2; -} - -message GetPublicCertificateForAppRequest { -} - -message PublicCertificate { - optional string key_name = 1; - optional string x509_certificate_pem = 2; -} - -message GetPublicCertificateForAppResponse { - repeated PublicCertificate public_certificate_list = 1; - optional int64 max_client_cache_time_in_second = 2; -} - -message GetServiceAccountNameRequest { -} - -message GetServiceAccountNameResponse { - optional string service_account_name = 1; -} - -message GetAccessTokenRequest { - repeated string scope = 1; - optional int64 service_account_id = 2; - optional string service_account_name = 3; -} - -message GetAccessTokenResponse { - optional string access_token = 1; - optional int64 expiration_time = 2; -} - -message GetDefaultGcsBucketNameRequest { -} - -message GetDefaultGcsBucketNameResponse { - optional string default_gcs_bucket_name = 1; -} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go deleted file mode 100644 index 36a1956..0000000 --- a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go +++ /dev/null @@ -1,133 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/base/api_base.proto -// DO NOT EDIT! - -/* -Package base is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/base/api_base.proto - -It has these top-level messages: - StringProto - Integer32Proto - Integer64Proto - BoolProto - DoubleProto - BytesProto - VoidProto -*/ -package base - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type StringProto struct { - Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StringProto) Reset() { *m = StringProto{} } -func (m *StringProto) String() string { return proto.CompactTextString(m) } -func (*StringProto) ProtoMessage() {} - -func (m *StringProto) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Integer32Proto struct { - Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Integer32Proto) Reset() { *m = Integer32Proto{} } -func (m *Integer32Proto) String() string { return proto.CompactTextString(m) } -func (*Integer32Proto) ProtoMessage() {} - -func (m *Integer32Proto) GetValue() int32 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Integer64Proto struct { - Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Integer64Proto) Reset() { *m = Integer64Proto{} } -func (m *Integer64Proto) String() string { return proto.CompactTextString(m) } -func (*Integer64Proto) ProtoMessage() {} - -func (m *Integer64Proto) GetValue() int64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type BoolProto struct { - Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BoolProto) Reset() { *m = BoolProto{} } -func (m *BoolProto) String() string { return proto.CompactTextString(m) } -func (*BoolProto) ProtoMessage() {} - -func (m *BoolProto) GetValue() bool { - if m != nil && m.Value != nil { - return *m.Value - } - return false -} - -type DoubleProto struct { - Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DoubleProto) Reset() { *m = DoubleProto{} } -func (m *DoubleProto) String() string { return proto.CompactTextString(m) } -func (*DoubleProto) ProtoMessage() {} - -func (m *DoubleProto) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type BytesProto struct { - Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BytesProto) Reset() { *m = BytesProto{} } -func (m *BytesProto) String() string { return proto.CompactTextString(m) } -func (*BytesProto) ProtoMessage() {} - -func (m *BytesProto) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type VoidProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *VoidProto) Reset() { *m = VoidProto{} } -func (m *VoidProto) String() string { return proto.CompactTextString(m) } -func (*VoidProto) ProtoMessage() {} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto deleted file mode 100644 index 56cd7a3..0000000 --- a/vendor/google.golang.org/appengine/internal/base/api_base.proto +++ /dev/null @@ -1,33 +0,0 @@ -// Built-in base types for API calls. Primarily useful as return types. - -syntax = "proto2"; -option go_package = "base"; - -package appengine.base; - -message StringProto { - required string value = 1; -} - -message Integer32Proto { - required int32 value = 1; -} - -message Integer64Proto { - required int64 value = 1; -} - -message BoolProto { - required bool value = 1; -} - -message DoubleProto { - required double value = 1; -} - -message BytesProto { - required bytes value = 1 [ctype=CORD]; -} - -message VoidProto { -} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go deleted file mode 100644 index 8613cb7..0000000 --- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go +++ /dev/null @@ -1,2778 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto -// DO NOT EDIT! - -/* -Package datastore is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/datastore/datastore_v3.proto - -It has these top-level messages: - Action - PropertyValue - Property - Path - Reference - User - EntityProto - CompositeProperty - Index - CompositeIndex - IndexPostfix - IndexPosition - Snapshot - InternalHeader - Transaction - Query - CompiledQuery - CompiledCursor - Cursor - Error - Cost - GetRequest - GetResponse - PutRequest - PutResponse - TouchRequest - TouchResponse - DeleteRequest - DeleteResponse - NextRequest - QueryResult - AllocateIdsRequest - AllocateIdsResponse - CompositeIndices - AddActionsRequest - AddActionsResponse - BeginTransactionRequest - CommitResponse -*/ -package datastore - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type Property_Meaning int32 - -const ( - Property_NO_MEANING Property_Meaning = 0 - Property_BLOB Property_Meaning = 14 - Property_TEXT Property_Meaning = 15 - Property_BYTESTRING Property_Meaning = 16 - Property_ATOM_CATEGORY Property_Meaning = 1 - Property_ATOM_LINK Property_Meaning = 2 - Property_ATOM_TITLE Property_Meaning = 3 - Property_ATOM_CONTENT Property_Meaning = 4 - Property_ATOM_SUMMARY Property_Meaning = 5 - Property_ATOM_AUTHOR Property_Meaning = 6 - Property_GD_WHEN Property_Meaning = 7 - Property_GD_EMAIL Property_Meaning = 8 - Property_GEORSS_POINT Property_Meaning = 9 - Property_GD_IM Property_Meaning = 10 - Property_GD_PHONENUMBER Property_Meaning = 11 - Property_GD_POSTALADDRESS Property_Meaning = 12 - Property_GD_RATING Property_Meaning = 13 - Property_BLOBKEY Property_Meaning = 17 - Property_ENTITY_PROTO Property_Meaning = 19 - Property_INDEX_VALUE Property_Meaning = 18 -) - -var Property_Meaning_name = map[int32]string{ - 0: "NO_MEANING", - 14: "BLOB", - 15: "TEXT", - 16: "BYTESTRING", - 1: "ATOM_CATEGORY", - 2: "ATOM_LINK", - 3: "ATOM_TITLE", - 4: "ATOM_CONTENT", - 5: "ATOM_SUMMARY", - 6: "ATOM_AUTHOR", - 7: "GD_WHEN", - 8: "GD_EMAIL", - 9: "GEORSS_POINT", - 10: "GD_IM", - 11: "GD_PHONENUMBER", - 12: "GD_POSTALADDRESS", - 13: "GD_RATING", - 17: "BLOBKEY", - 19: "ENTITY_PROTO", - 18: "INDEX_VALUE", -} -var Property_Meaning_value = map[string]int32{ - "NO_MEANING": 0, - "BLOB": 14, - "TEXT": 15, - "BYTESTRING": 16, - "ATOM_CATEGORY": 1, - "ATOM_LINK": 2, - "ATOM_TITLE": 3, - "ATOM_CONTENT": 4, - "ATOM_SUMMARY": 5, - "ATOM_AUTHOR": 6, - "GD_WHEN": 7, - "GD_EMAIL": 8, - "GEORSS_POINT": 9, - "GD_IM": 10, - "GD_PHONENUMBER": 11, - "GD_POSTALADDRESS": 12, - "GD_RATING": 13, - "BLOBKEY": 17, - "ENTITY_PROTO": 19, - "INDEX_VALUE": 18, -} - -func (x Property_Meaning) Enum() *Property_Meaning { - p := new(Property_Meaning) - *p = x - return p -} -func (x Property_Meaning) String() string { - return proto.EnumName(Property_Meaning_name, int32(x)) -} -func (x *Property_Meaning) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning") - if err != nil { - return err - } - *x = Property_Meaning(value) - return nil -} - -type Property_FtsTokenizationOption int32 - -const ( - Property_HTML Property_FtsTokenizationOption = 1 - Property_ATOM Property_FtsTokenizationOption = 2 -) - -var Property_FtsTokenizationOption_name = map[int32]string{ - 1: "HTML", - 2: "ATOM", -} -var Property_FtsTokenizationOption_value = map[string]int32{ - "HTML": 1, - "ATOM": 2, -} - -func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption { - p := new(Property_FtsTokenizationOption) - *p = x - return p -} -func (x Property_FtsTokenizationOption) String() string { - return proto.EnumName(Property_FtsTokenizationOption_name, int32(x)) -} -func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption") - if err != nil { - return err - } - *x = Property_FtsTokenizationOption(value) - return nil -} - -type EntityProto_Kind int32 - -const ( - EntityProto_GD_CONTACT EntityProto_Kind = 1 - EntityProto_GD_EVENT EntityProto_Kind = 2 - EntityProto_GD_MESSAGE EntityProto_Kind = 3 -) - -var EntityProto_Kind_name = map[int32]string{ - 1: "GD_CONTACT", - 2: "GD_EVENT", - 3: "GD_MESSAGE", -} -var EntityProto_Kind_value = map[string]int32{ - "GD_CONTACT": 1, - "GD_EVENT": 2, - "GD_MESSAGE": 3, -} - -func (x EntityProto_Kind) Enum() *EntityProto_Kind { - p := new(EntityProto_Kind) - *p = x - return p -} -func (x EntityProto_Kind) String() string { - return proto.EnumName(EntityProto_Kind_name, int32(x)) -} -func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind") - if err != nil { - return err - } - *x = EntityProto_Kind(value) - return nil -} - -type Index_Property_Direction int32 - -const ( - Index_Property_ASCENDING Index_Property_Direction = 1 - Index_Property_DESCENDING Index_Property_Direction = 2 -) - -var Index_Property_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var Index_Property_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x Index_Property_Direction) Enum() *Index_Property_Direction { - p := new(Index_Property_Direction) - *p = x - return p -} -func (x Index_Property_Direction) String() string { - return proto.EnumName(Index_Property_Direction_name, int32(x)) -} -func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction") - if err != nil { - return err - } - *x = Index_Property_Direction(value) - return nil -} - -type CompositeIndex_State int32 - -const ( - CompositeIndex_WRITE_ONLY CompositeIndex_State = 1 - CompositeIndex_READ_WRITE CompositeIndex_State = 2 - CompositeIndex_DELETED CompositeIndex_State = 3 - CompositeIndex_ERROR CompositeIndex_State = 4 -) - -var CompositeIndex_State_name = map[int32]string{ - 1: "WRITE_ONLY", - 2: "READ_WRITE", - 3: "DELETED", - 4: "ERROR", -} -var CompositeIndex_State_value = map[string]int32{ - "WRITE_ONLY": 1, - "READ_WRITE": 2, - "DELETED": 3, - "ERROR": 4, -} - -func (x CompositeIndex_State) Enum() *CompositeIndex_State { - p := new(CompositeIndex_State) - *p = x - return p -} -func (x CompositeIndex_State) String() string { - return proto.EnumName(CompositeIndex_State_name, int32(x)) -} -func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State") - if err != nil { - return err - } - *x = CompositeIndex_State(value) - return nil -} - -type Snapshot_Status int32 - -const ( - Snapshot_INACTIVE Snapshot_Status = 0 - Snapshot_ACTIVE Snapshot_Status = 1 -) - -var Snapshot_Status_name = map[int32]string{ - 0: "INACTIVE", - 1: "ACTIVE", -} -var Snapshot_Status_value = map[string]int32{ - "INACTIVE": 0, - "ACTIVE": 1, -} - -func (x Snapshot_Status) Enum() *Snapshot_Status { - p := new(Snapshot_Status) - *p = x - return p -} -func (x Snapshot_Status) String() string { - return proto.EnumName(Snapshot_Status_name, int32(x)) -} -func (x *Snapshot_Status) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status") - if err != nil { - return err - } - *x = Snapshot_Status(value) - return nil -} - -type Query_Hint int32 - -const ( - Query_ORDER_FIRST Query_Hint = 1 - Query_ANCESTOR_FIRST Query_Hint = 2 - Query_FILTER_FIRST Query_Hint = 3 -) - -var Query_Hint_name = map[int32]string{ - 1: "ORDER_FIRST", - 2: "ANCESTOR_FIRST", - 3: "FILTER_FIRST", -} -var Query_Hint_value = map[string]int32{ - "ORDER_FIRST": 1, - "ANCESTOR_FIRST": 2, - "FILTER_FIRST": 3, -} - -func (x Query_Hint) Enum() *Query_Hint { - p := new(Query_Hint) - *p = x - return p -} -func (x Query_Hint) String() string { - return proto.EnumName(Query_Hint_name, int32(x)) -} -func (x *Query_Hint) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint") - if err != nil { - return err - } - *x = Query_Hint(value) - return nil -} - -type Query_Filter_Operator int32 - -const ( - Query_Filter_LESS_THAN Query_Filter_Operator = 1 - Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2 - Query_Filter_GREATER_THAN Query_Filter_Operator = 3 - Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4 - Query_Filter_EQUAL Query_Filter_Operator = 5 - Query_Filter_IN Query_Filter_Operator = 6 - Query_Filter_EXISTS Query_Filter_Operator = 7 -) - -var Query_Filter_Operator_name = map[int32]string{ - 1: "LESS_THAN", - 2: "LESS_THAN_OR_EQUAL", - 3: "GREATER_THAN", - 4: "GREATER_THAN_OR_EQUAL", - 5: "EQUAL", - 6: "IN", - 7: "EXISTS", -} -var Query_Filter_Operator_value = map[string]int32{ - "LESS_THAN": 1, - "LESS_THAN_OR_EQUAL": 2, - "GREATER_THAN": 3, - "GREATER_THAN_OR_EQUAL": 4, - "EQUAL": 5, - "IN": 6, - "EXISTS": 7, -} - -func (x Query_Filter_Operator) Enum() *Query_Filter_Operator { - p := new(Query_Filter_Operator) - *p = x - return p -} -func (x Query_Filter_Operator) String() string { - return proto.EnumName(Query_Filter_Operator_name, int32(x)) -} -func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator") - if err != nil { - return err - } - *x = Query_Filter_Operator(value) - return nil -} - -type Query_Order_Direction int32 - -const ( - Query_Order_ASCENDING Query_Order_Direction = 1 - Query_Order_DESCENDING Query_Order_Direction = 2 -) - -var Query_Order_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var Query_Order_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x Query_Order_Direction) Enum() *Query_Order_Direction { - p := new(Query_Order_Direction) - *p = x - return p -} -func (x Query_Order_Direction) String() string { - return proto.EnumName(Query_Order_Direction_name, int32(x)) -} -func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction") - if err != nil { - return err - } - *x = Query_Order_Direction(value) - return nil -} - -type Error_ErrorCode int32 - -const ( - Error_BAD_REQUEST Error_ErrorCode = 1 - Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2 - Error_INTERNAL_ERROR Error_ErrorCode = 3 - Error_NEED_INDEX Error_ErrorCode = 4 - Error_TIMEOUT Error_ErrorCode = 5 - Error_PERMISSION_DENIED Error_ErrorCode = 6 - Error_BIGTABLE_ERROR Error_ErrorCode = 7 - Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8 - Error_CAPABILITY_DISABLED Error_ErrorCode = 9 - Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10 - Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11 -) - -var Error_ErrorCode_name = map[int32]string{ - 1: "BAD_REQUEST", - 2: "CONCURRENT_TRANSACTION", - 3: "INTERNAL_ERROR", - 4: "NEED_INDEX", - 5: "TIMEOUT", - 6: "PERMISSION_DENIED", - 7: "BIGTABLE_ERROR", - 8: "COMMITTED_BUT_STILL_APPLYING", - 9: "CAPABILITY_DISABLED", - 10: "TRY_ALTERNATE_BACKEND", - 11: "SAFE_TIME_TOO_OLD", -} -var Error_ErrorCode_value = map[string]int32{ - "BAD_REQUEST": 1, - "CONCURRENT_TRANSACTION": 2, - "INTERNAL_ERROR": 3, - "NEED_INDEX": 4, - "TIMEOUT": 5, - "PERMISSION_DENIED": 6, - "BIGTABLE_ERROR": 7, - "COMMITTED_BUT_STILL_APPLYING": 8, - "CAPABILITY_DISABLED": 9, - "TRY_ALTERNATE_BACKEND": 10, - "SAFE_TIME_TOO_OLD": 11, -} - -func (x Error_ErrorCode) Enum() *Error_ErrorCode { - p := new(Error_ErrorCode) - *p = x - return p -} -func (x Error_ErrorCode) String() string { - return proto.EnumName(Error_ErrorCode_name, int32(x)) -} -func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode") - if err != nil { - return err - } - *x = Error_ErrorCode(value) - return nil -} - -type PutRequest_AutoIdPolicy int32 - -const ( - PutRequest_CURRENT PutRequest_AutoIdPolicy = 0 - PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1 -) - -var PutRequest_AutoIdPolicy_name = map[int32]string{ - 0: "CURRENT", - 1: "SEQUENTIAL", -} -var PutRequest_AutoIdPolicy_value = map[string]int32{ - "CURRENT": 0, - "SEQUENTIAL": 1, -} - -func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy { - p := new(PutRequest_AutoIdPolicy) - *p = x - return p -} -func (x PutRequest_AutoIdPolicy) String() string { - return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x)) -} -func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy") - if err != nil { - return err - } - *x = PutRequest_AutoIdPolicy(value) - return nil -} - -type Action struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Action) Reset() { *m = Action{} } -func (m *Action) String() string { return proto.CompactTextString(m) } -func (*Action) ProtoMessage() {} - -type PropertyValue struct { - Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"` - BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"` - StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"` - Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"` - Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"` - Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue) Reset() { *m = PropertyValue{} } -func (m *PropertyValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue) ProtoMessage() {} - -func (m *PropertyValue) GetInt64Value() int64 { - if m != nil && m.Int64Value != nil { - return *m.Int64Value - } - return 0 -} - -func (m *PropertyValue) GetBooleanValue() bool { - if m != nil && m.BooleanValue != nil { - return *m.BooleanValue - } - return false -} - -func (m *PropertyValue) GetStringValue() string { - if m != nil && m.StringValue != nil { - return *m.StringValue - } - return "" -} - -func (m *PropertyValue) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue { - if m != nil { - return m.Pointvalue - } - return nil -} - -func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue { - if m != nil { - return m.Uservalue - } - return nil -} - -func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue { - if m != nil { - return m.Referencevalue - } - return nil -} - -type PropertyValue_PointValue struct { - X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"` - Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} } -func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_PointValue) ProtoMessage() {} - -func (m *PropertyValue_PointValue) GetX() float64 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -func (m *PropertyValue_PointValue) GetY() float64 { - if m != nil && m.Y != nil { - return *m.Y - } - return 0 -} - -type PropertyValue_UserValue struct { - Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"` - AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"` - Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"` - FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"` - FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} } -func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_UserValue) ProtoMessage() {} - -func (m *PropertyValue_UserValue) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *PropertyValue_UserValue) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *PropertyValue_UserValue) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *PropertyValue_UserValue) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -func (m *PropertyValue_UserValue) GetFederatedProvider() string { - if m != nil && m.FederatedProvider != nil { - return *m.FederatedProvider - } - return "" -} - -type PropertyValue_ReferenceValue struct { - App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` - Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} } -func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_ReferenceValue) ProtoMessage() {} - -func (m *PropertyValue_ReferenceValue) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *PropertyValue_ReferenceValue) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement { - if m != nil { - return m.Pathelement - } - return nil -} - -type PropertyValue_ReferenceValue_PathElement struct { - Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"` - Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"` - Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_ReferenceValue_PathElement) Reset() { - *m = PropertyValue_ReferenceValue_PathElement{} -} -func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {} - -func (m *PropertyValue_ReferenceValue_PathElement) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *PropertyValue_ReferenceValue_PathElement) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type Property struct { - Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"` - MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"` - Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` - Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"` - Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"` - Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"` - FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"` - Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Property) Reset() { *m = Property{} } -func (m *Property) String() string { return proto.CompactTextString(m) } -func (*Property) ProtoMessage() {} - -const Default_Property_Meaning Property_Meaning = Property_NO_MEANING -const Default_Property_Searchable bool = false -const Default_Property_Locale string = "en" - -func (m *Property) GetMeaning() Property_Meaning { - if m != nil && m.Meaning != nil { - return *m.Meaning - } - return Default_Property_Meaning -} - -func (m *Property) GetMeaningUri() string { - if m != nil && m.MeaningUri != nil { - return *m.MeaningUri - } - return "" -} - -func (m *Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Property) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -func (m *Property) GetMultiple() bool { - if m != nil && m.Multiple != nil { - return *m.Multiple - } - return false -} - -func (m *Property) GetSearchable() bool { - if m != nil && m.Searchable != nil { - return *m.Searchable - } - return Default_Property_Searchable -} - -func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption { - if m != nil && m.FtsTokenizationOption != nil { - return *m.FtsTokenizationOption - } - return Property_HTML -} - -func (m *Property) GetLocale() string { - if m != nil && m.Locale != nil { - return *m.Locale - } - return Default_Property_Locale -} - -type Path struct { - Element []*Path_Element `protobuf:"group,1,rep,name=Element" json:"element,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Path) Reset() { *m = Path{} } -func (m *Path) String() string { return proto.CompactTextString(m) } -func (*Path) ProtoMessage() {} - -func (m *Path) GetElement() []*Path_Element { - if m != nil { - return m.Element - } - return nil -} - -type Path_Element struct { - Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"` - Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` - Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Path_Element) Reset() { *m = Path_Element{} } -func (m *Path_Element) String() string { return proto.CompactTextString(m) } -func (*Path_Element) ProtoMessage() {} - -func (m *Path_Element) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *Path_Element) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *Path_Element) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type Reference struct { - App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` - Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Reference) Reset() { *m = Reference{} } -func (m *Reference) String() string { return proto.CompactTextString(m) } -func (*Reference) ProtoMessage() {} - -func (m *Reference) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Reference) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *Reference) GetPath() *Path { - if m != nil { - return m.Path - } - return nil -} - -type User struct { - Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` - AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"` - Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"` - FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"` - FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} - -func (m *User) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *User) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *User) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *User) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -func (m *User) GetFederatedProvider() string { - if m != nil && m.FederatedProvider != nil { - return *m.FederatedProvider - } - return "" -} - -type EntityProto struct { - Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"` - EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"` - Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"` - Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"` - KindUri *string `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"` - Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` - RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"` - Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EntityProto) Reset() { *m = EntityProto{} } -func (m *EntityProto) String() string { return proto.CompactTextString(m) } -func (*EntityProto) ProtoMessage() {} - -func (m *EntityProto) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *EntityProto) GetEntityGroup() *Path { - if m != nil { - return m.EntityGroup - } - return nil -} - -func (m *EntityProto) GetOwner() *User { - if m != nil { - return m.Owner - } - return nil -} - -func (m *EntityProto) GetKind() EntityProto_Kind { - if m != nil && m.Kind != nil { - return *m.Kind - } - return EntityProto_GD_CONTACT -} - -func (m *EntityProto) GetKindUri() string { - if m != nil && m.KindUri != nil { - return *m.KindUri - } - return "" -} - -func (m *EntityProto) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -func (m *EntityProto) GetRawProperty() []*Property { - if m != nil { - return m.RawProperty - } - return nil -} - -func (m *EntityProto) GetRank() int32 { - if m != nil && m.Rank != nil { - return *m.Rank - } - return 0 -} - -type CompositeProperty struct { - IndexId *int64 `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"` - Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompositeProperty) Reset() { *m = CompositeProperty{} } -func (m *CompositeProperty) String() string { return proto.CompactTextString(m) } -func (*CompositeProperty) ProtoMessage() {} - -func (m *CompositeProperty) GetIndexId() int64 { - if m != nil && m.IndexId != nil { - return *m.IndexId - } - return 0 -} - -func (m *CompositeProperty) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -type Index struct { - EntityType *string `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"` - Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"` - Property []*Index_Property `protobuf:"group,2,rep,name=Property" json:"property,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Index) Reset() { *m = Index{} } -func (m *Index) String() string { return proto.CompactTextString(m) } -func (*Index) ProtoMessage() {} - -func (m *Index) GetEntityType() string { - if m != nil && m.EntityType != nil { - return *m.EntityType - } - return "" -} - -func (m *Index) GetAncestor() bool { - if m != nil && m.Ancestor != nil { - return *m.Ancestor - } - return false -} - -func (m *Index) GetProperty() []*Index_Property { - if m != nil { - return m.Property - } - return nil -} - -type Index_Property struct { - Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` - Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Index_Property) Reset() { *m = Index_Property{} } -func (m *Index_Property) String() string { return proto.CompactTextString(m) } -func (*Index_Property) ProtoMessage() {} - -const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING - -func (m *Index_Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Index_Property) GetDirection() Index_Property_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_Index_Property_Direction -} - -type CompositeIndex struct { - AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` - Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` - Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"` - State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"` - OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompositeIndex) Reset() { *m = CompositeIndex{} } -func (m *CompositeIndex) String() string { return proto.CompactTextString(m) } -func (*CompositeIndex) ProtoMessage() {} - -const Default_CompositeIndex_OnlyUseIfRequired bool = false - -func (m *CompositeIndex) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *CompositeIndex) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *CompositeIndex) GetDefinition() *Index { - if m != nil { - return m.Definition - } - return nil -} - -func (m *CompositeIndex) GetState() CompositeIndex_State { - if m != nil && m.State != nil { - return *m.State - } - return CompositeIndex_WRITE_ONLY -} - -func (m *CompositeIndex) GetOnlyUseIfRequired() bool { - if m != nil && m.OnlyUseIfRequired != nil { - return *m.OnlyUseIfRequired - } - return Default_CompositeIndex_OnlyUseIfRequired -} - -type IndexPostfix struct { - IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"` - Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` - Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IndexPostfix) Reset() { *m = IndexPostfix{} } -func (m *IndexPostfix) String() string { return proto.CompactTextString(m) } -func (*IndexPostfix) ProtoMessage() {} - -const Default_IndexPostfix_Before bool = true - -func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue { - if m != nil { - return m.IndexValue - } - return nil -} - -func (m *IndexPostfix) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *IndexPostfix) GetBefore() bool { - if m != nil && m.Before != nil { - return *m.Before - } - return Default_IndexPostfix_Before -} - -type IndexPostfix_IndexValue struct { - PropertyName *string `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"` - Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} } -func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) } -func (*IndexPostfix_IndexValue) ProtoMessage() {} - -func (m *IndexPostfix_IndexValue) GetPropertyName() string { - if m != nil && m.PropertyName != nil { - return *m.PropertyName - } - return "" -} - -func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -type IndexPosition struct { - Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IndexPosition) Reset() { *m = IndexPosition{} } -func (m *IndexPosition) String() string { return proto.CompactTextString(m) } -func (*IndexPosition) ProtoMessage() {} - -const Default_IndexPosition_Before bool = true - -func (m *IndexPosition) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *IndexPosition) GetBefore() bool { - if m != nil && m.Before != nil { - return *m.Before - } - return Default_IndexPosition_Before -} - -type Snapshot struct { - Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} - -func (m *Snapshot) GetTs() int64 { - if m != nil && m.Ts != nil { - return *m.Ts - } - return 0 -} - -type InternalHeader struct { - Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *InternalHeader) Reset() { *m = InternalHeader{} } -func (m *InternalHeader) String() string { return proto.CompactTextString(m) } -func (*InternalHeader) ProtoMessage() {} - -func (m *InternalHeader) GetQos() string { - if m != nil && m.Qos != nil { - return *m.Qos - } - return "" -} - -type Transaction struct { - Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` - Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"` - App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"` - MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Transaction) Reset() { *m = Transaction{} } -func (m *Transaction) String() string { return proto.CompactTextString(m) } -func (*Transaction) ProtoMessage() {} - -const Default_Transaction_MarkChanges bool = false - -func (m *Transaction) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *Transaction) GetHandle() uint64 { - if m != nil && m.Handle != nil { - return *m.Handle - } - return 0 -} - -func (m *Transaction) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Transaction) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_Transaction_MarkChanges -} - -type Query struct { - Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"` - App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"` - Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` - Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"` - Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter" json:"filter,omitempty"` - SearchQuery *string `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"` - Order []*Query_Order `protobuf:"group,9,rep,name=Order" json:"order,omitempty"` - Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"` - Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"` - Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"` - Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"` - CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` - EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"` - RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"` - KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"` - Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"` - Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"` - FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"` - Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"` - PropertyName []string `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"` - GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"` - Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"` - MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"` - SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"` - PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Query) Reset() { *m = Query{} } -func (m *Query) String() string { return proto.CompactTextString(m) } -func (*Query) ProtoMessage() {} - -const Default_Query_Offset int32 = 0 -const Default_Query_RequirePerfectPlan bool = false -const Default_Query_KeysOnly bool = false -const Default_Query_Compile bool = false -const Default_Query_PersistOffset bool = false - -func (m *Query) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *Query) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Query) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *Query) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *Query) GetAncestor() *Reference { - if m != nil { - return m.Ancestor - } - return nil -} - -func (m *Query) GetFilter() []*Query_Filter { - if m != nil { - return m.Filter - } - return nil -} - -func (m *Query) GetSearchQuery() string { - if m != nil && m.SearchQuery != nil { - return *m.SearchQuery - } - return "" -} - -func (m *Query) GetOrder() []*Query_Order { - if m != nil { - return m.Order - } - return nil -} - -func (m *Query) GetHint() Query_Hint { - if m != nil && m.Hint != nil { - return *m.Hint - } - return Query_ORDER_FIRST -} - -func (m *Query) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *Query) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_Query_Offset -} - -func (m *Query) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *Query) GetCompiledCursor() *CompiledCursor { - if m != nil { - return m.CompiledCursor - } - return nil -} - -func (m *Query) GetEndCompiledCursor() *CompiledCursor { - if m != nil { - return m.EndCompiledCursor - } - return nil -} - -func (m *Query) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *Query) GetRequirePerfectPlan() bool { - if m != nil && m.RequirePerfectPlan != nil { - return *m.RequirePerfectPlan - } - return Default_Query_RequirePerfectPlan -} - -func (m *Query) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return Default_Query_KeysOnly -} - -func (m *Query) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *Query) GetCompile() bool { - if m != nil && m.Compile != nil { - return *m.Compile - } - return Default_Query_Compile -} - -func (m *Query) GetFailoverMs() int64 { - if m != nil && m.FailoverMs != nil { - return *m.FailoverMs - } - return 0 -} - -func (m *Query) GetStrong() bool { - if m != nil && m.Strong != nil { - return *m.Strong - } - return false -} - -func (m *Query) GetPropertyName() []string { - if m != nil { - return m.PropertyName - } - return nil -} - -func (m *Query) GetGroupByPropertyName() []string { - if m != nil { - return m.GroupByPropertyName - } - return nil -} - -func (m *Query) GetDistinct() bool { - if m != nil && m.Distinct != nil { - return *m.Distinct - } - return false -} - -func (m *Query) GetMinSafeTimeSeconds() int64 { - if m != nil && m.MinSafeTimeSeconds != nil { - return *m.MinSafeTimeSeconds - } - return 0 -} - -func (m *Query) GetSafeReplicaName() []string { - if m != nil { - return m.SafeReplicaName - } - return nil -} - -func (m *Query) GetPersistOffset() bool { - if m != nil && m.PersistOffset != nil { - return *m.PersistOffset - } - return Default_Query_PersistOffset -} - -type Query_Filter struct { - Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"` - Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Query_Filter) Reset() { *m = Query_Filter{} } -func (m *Query_Filter) String() string { return proto.CompactTextString(m) } -func (*Query_Filter) ProtoMessage() {} - -func (m *Query_Filter) GetOp() Query_Filter_Operator { - if m != nil && m.Op != nil { - return *m.Op - } - return Query_Filter_LESS_THAN -} - -func (m *Query_Filter) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -type Query_Order struct { - Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"` - Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Query_Order) Reset() { *m = Query_Order{} } -func (m *Query_Order) String() string { return proto.CompactTextString(m) } -func (*Query_Order) ProtoMessage() {} - -const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING - -func (m *Query_Order) GetProperty() string { - if m != nil && m.Property != nil { - return *m.Property - } - return "" -} - -func (m *Query_Order) GetDirection() Query_Order_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_Query_Order_Direction -} - -type CompiledQuery struct { - Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"` - Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"` - IndexDef *Index `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"` - Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` - Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` - KeysOnly *bool `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"` - PropertyName []string `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"` - DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"` - Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery) Reset() { *m = CompiledQuery{} } -func (m *CompiledQuery) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery) ProtoMessage() {} - -const Default_CompiledQuery_Offset int32 = 0 - -func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan { - if m != nil { - return m.Primaryscan - } - return nil -} - -func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan { - if m != nil { - return m.Mergejoinscan - } - return nil -} - -func (m *CompiledQuery) GetIndexDef() *Index { - if m != nil { - return m.IndexDef - } - return nil -} - -func (m *CompiledQuery) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_CompiledQuery_Offset -} - -func (m *CompiledQuery) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *CompiledQuery) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return false -} - -func (m *CompiledQuery) GetPropertyName() []string { - if m != nil { - return m.PropertyName - } - return nil -} - -func (m *CompiledQuery) GetDistinctInfixSize() int32 { - if m != nil && m.DistinctInfixSize != nil { - return *m.DistinctInfixSize - } - return 0 -} - -func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter { - if m != nil { - return m.Entityfilter - } - return nil -} - -type CompiledQuery_PrimaryScan struct { - IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` - StartKey *string `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"` - StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"` - EndKey *string `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"` - EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"` - StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"` - EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"` - EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} } -func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_PrimaryScan) ProtoMessage() {} - -func (m *CompiledQuery_PrimaryScan) GetIndexName() string { - if m != nil && m.IndexName != nil { - return *m.IndexName - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetStartKey() string { - if m != nil && m.StartKey != nil { - return *m.StartKey - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool { - if m != nil && m.StartInclusive != nil { - return *m.StartInclusive - } - return false -} - -func (m *CompiledQuery_PrimaryScan) GetEndKey() string { - if m != nil && m.EndKey != nil { - return *m.EndKey - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool { - if m != nil && m.EndInclusive != nil { - return *m.EndInclusive - } - return false -} - -func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string { - if m != nil { - return m.StartPostfixValue - } - return nil -} - -func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string { - if m != nil { - return m.EndPostfixValue - } - return nil -} - -func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 { - if m != nil && m.EndUnappliedLogTimestampUs != nil { - return *m.EndUnappliedLogTimestampUs - } - return 0 -} - -type CompiledQuery_MergeJoinScan struct { - IndexName *string `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"` - PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"` - ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} } -func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_MergeJoinScan) ProtoMessage() {} - -const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false - -func (m *CompiledQuery_MergeJoinScan) GetIndexName() string { - if m != nil && m.IndexName != nil { - return *m.IndexName - } - return "" -} - -func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string { - if m != nil { - return m.PrefixValue - } - return nil -} - -func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool { - if m != nil && m.ValuePrefix != nil { - return *m.ValuePrefix - } - return Default_CompiledQuery_MergeJoinScan_ValuePrefix -} - -type CompiledQuery_EntityFilter struct { - Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"` - Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"` - Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} } -func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_EntityFilter) ProtoMessage() {} - -const Default_CompiledQuery_EntityFilter_Distinct bool = false - -func (m *CompiledQuery_EntityFilter) GetDistinct() bool { - if m != nil && m.Distinct != nil { - return *m.Distinct - } - return Default_CompiledQuery_EntityFilter_Distinct -} - -func (m *CompiledQuery_EntityFilter) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference { - if m != nil { - return m.Ancestor - } - return nil -} - -type CompiledCursor struct { - Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position" json:"position,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledCursor) Reset() { *m = CompiledCursor{} } -func (m *CompiledCursor) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor) ProtoMessage() {} - -func (m *CompiledCursor) GetPosition() *CompiledCursor_Position { - if m != nil { - return m.Position - } - return nil -} - -type CompiledCursor_Position struct { - StartKey *string `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"` - Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"` - Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"` - StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} } -func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor_Position) ProtoMessage() {} - -const Default_CompiledCursor_Position_StartInclusive bool = true - -func (m *CompiledCursor_Position) GetStartKey() string { - if m != nil && m.StartKey != nil { - return *m.StartKey - } - return "" -} - -func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue { - if m != nil { - return m.Indexvalue - } - return nil -} - -func (m *CompiledCursor_Position) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *CompiledCursor_Position) GetStartInclusive() bool { - if m != nil && m.StartInclusive != nil { - return *m.StartInclusive - } - return Default_CompiledCursor_Position_StartInclusive -} - -type CompiledCursor_Position_IndexValue struct { - Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"` - Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} } -func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor_Position_IndexValue) ProtoMessage() {} - -func (m *CompiledCursor_Position_IndexValue) GetProperty() string { - if m != nil && m.Property != nil { - return *m.Property - } - return "" -} - -func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -type Cursor struct { - Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"` - App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Cursor) Reset() { *m = Cursor{} } -func (m *Cursor) String() string { return proto.CompactTextString(m) } -func (*Cursor) ProtoMessage() {} - -func (m *Cursor) GetCursor() uint64 { - if m != nil && m.Cursor != nil { - return *m.Cursor - } - return 0 -} - -func (m *Cursor) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -type Error struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Error) Reset() { *m = Error{} } -func (m *Error) String() string { return proto.CompactTextString(m) } -func (*Error) ProtoMessage() {} - -type Cost struct { - IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"` - IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"` - EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"` - EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"` - Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"` - ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"` - IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Cost) Reset() { *m = Cost{} } -func (m *Cost) String() string { return proto.CompactTextString(m) } -func (*Cost) ProtoMessage() {} - -func (m *Cost) GetIndexWrites() int32 { - if m != nil && m.IndexWrites != nil { - return *m.IndexWrites - } - return 0 -} - -func (m *Cost) GetIndexWriteBytes() int32 { - if m != nil && m.IndexWriteBytes != nil { - return *m.IndexWriteBytes - } - return 0 -} - -func (m *Cost) GetEntityWrites() int32 { - if m != nil && m.EntityWrites != nil { - return *m.EntityWrites - } - return 0 -} - -func (m *Cost) GetEntityWriteBytes() int32 { - if m != nil && m.EntityWriteBytes != nil { - return *m.EntityWriteBytes - } - return 0 -} - -func (m *Cost) GetCommitcost() *Cost_CommitCost { - if m != nil { - return m.Commitcost - } - return nil -} - -func (m *Cost) GetApproximateStorageDelta() int32 { - if m != nil && m.ApproximateStorageDelta != nil { - return *m.ApproximateStorageDelta - } - return 0 -} - -func (m *Cost) GetIdSequenceUpdates() int32 { - if m != nil && m.IdSequenceUpdates != nil { - return *m.IdSequenceUpdates - } - return 0 -} - -type Cost_CommitCost struct { - RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"` - RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} } -func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) } -func (*Cost_CommitCost) ProtoMessage() {} - -func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 { - if m != nil && m.RequestedEntityPuts != nil { - return *m.RequestedEntityPuts - } - return 0 -} - -func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 { - if m != nil && m.RequestedEntityDeletes != nil { - return *m.RequestedEntityDeletes - } - return 0 -} - -type GetRequest struct { - Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"` - Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"` - AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetRequest) Reset() { *m = GetRequest{} } -func (m *GetRequest) String() string { return proto.CompactTextString(m) } -func (*GetRequest) ProtoMessage() {} - -const Default_GetRequest_AllowDeferred bool = false - -func (m *GetRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *GetRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *GetRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *GetRequest) GetFailoverMs() int64 { - if m != nil && m.FailoverMs != nil { - return *m.FailoverMs - } - return 0 -} - -func (m *GetRequest) GetStrong() bool { - if m != nil && m.Strong != nil { - return *m.Strong - } - return false -} - -func (m *GetRequest) GetAllowDeferred() bool { - if m != nil && m.AllowDeferred != nil { - return *m.AllowDeferred - } - return Default_GetRequest_AllowDeferred -} - -type GetResponse struct { - Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity" json:"entity,omitempty"` - Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"` - InOrder *bool `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetResponse) Reset() { *m = GetResponse{} } -func (m *GetResponse) String() string { return proto.CompactTextString(m) } -func (*GetResponse) ProtoMessage() {} - -const Default_GetResponse_InOrder bool = true - -func (m *GetResponse) GetEntity() []*GetResponse_Entity { - if m != nil { - return m.Entity - } - return nil -} - -func (m *GetResponse) GetDeferred() []*Reference { - if m != nil { - return m.Deferred - } - return nil -} - -func (m *GetResponse) GetInOrder() bool { - if m != nil && m.InOrder != nil { - return *m.InOrder - } - return Default_GetResponse_InOrder -} - -type GetResponse_Entity struct { - Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"` - Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"` - Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} } -func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) } -func (*GetResponse_Entity) ProtoMessage() {} - -func (m *GetResponse_Entity) GetEntity() *EntityProto { - if m != nil { - return m.Entity - } - return nil -} - -func (m *GetResponse_Entity) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *GetResponse_Entity) GetVersion() int64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -type PutRequest struct { - Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"` - Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"` - Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"` - Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` - Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` - MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PutRequest) Reset() { *m = PutRequest{} } -func (m *PutRequest) String() string { return proto.CompactTextString(m) } -func (*PutRequest) ProtoMessage() {} - -const Default_PutRequest_Trusted bool = false -const Default_PutRequest_Force bool = false -const Default_PutRequest_MarkChanges bool = false -const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT - -func (m *PutRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *PutRequest) GetEntity() []*EntityProto { - if m != nil { - return m.Entity - } - return nil -} - -func (m *PutRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *PutRequest) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *PutRequest) GetTrusted() bool { - if m != nil && m.Trusted != nil { - return *m.Trusted - } - return Default_PutRequest_Trusted -} - -func (m *PutRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_PutRequest_Force -} - -func (m *PutRequest) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_PutRequest_MarkChanges -} - -func (m *PutRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy { - if m != nil && m.AutoIdPolicy != nil { - return *m.AutoIdPolicy - } - return Default_PutRequest_AutoIdPolicy -} - -type PutResponse struct { - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"` - Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PutResponse) Reset() { *m = PutResponse{} } -func (m *PutResponse) String() string { return proto.CompactTextString(m) } -func (*PutResponse) ProtoMessage() {} - -func (m *PutResponse) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *PutResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *PutResponse) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type TouchRequest struct { - Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"` - Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TouchRequest) Reset() { *m = TouchRequest{} } -func (m *TouchRequest) String() string { return proto.CompactTextString(m) } -func (*TouchRequest) ProtoMessage() {} - -const Default_TouchRequest_Force bool = false - -func (m *TouchRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *TouchRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *TouchRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_TouchRequest_Force -} - -func (m *TouchRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type TouchResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TouchResponse) Reset() { *m = TouchResponse{} } -func (m *TouchResponse) String() string { return proto.CompactTextString(m) } -func (*TouchResponse) ProtoMessage() {} - -func (m *TouchResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -type DeleteRequest struct { - Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"` - Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"` - Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` - Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` - MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRequest) ProtoMessage() {} - -const Default_DeleteRequest_Trusted bool = false -const Default_DeleteRequest_Force bool = false -const Default_DeleteRequest_MarkChanges bool = false - -func (m *DeleteRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *DeleteRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *DeleteRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *DeleteRequest) GetTrusted() bool { - if m != nil && m.Trusted != nil { - return *m.Trusted - } - return Default_DeleteRequest_Trusted -} - -func (m *DeleteRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_DeleteRequest_Force -} - -func (m *DeleteRequest) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_DeleteRequest_MarkChanges -} - -func (m *DeleteRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type DeleteResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } -func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteResponse) ProtoMessage() {} - -func (m *DeleteResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *DeleteResponse) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type NextRequest struct { - Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"` - Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"` - Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` - Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"` - Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NextRequest) Reset() { *m = NextRequest{} } -func (m *NextRequest) String() string { return proto.CompactTextString(m) } -func (*NextRequest) ProtoMessage() {} - -const Default_NextRequest_Offset int32 = 0 -const Default_NextRequest_Compile bool = false - -func (m *NextRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *NextRequest) GetCursor() *Cursor { - if m != nil { - return m.Cursor - } - return nil -} - -func (m *NextRequest) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *NextRequest) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_NextRequest_Offset -} - -func (m *NextRequest) GetCompile() bool { - if m != nil && m.Compile != nil { - return *m.Compile - } - return Default_NextRequest_Compile -} - -type QueryResult struct { - Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"` - Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"` - SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"` - MoreResults *bool `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"` - KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"` - IndexOnly *bool `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"` - SmallOps *bool `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"` - CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"` - CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` - Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"` - Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *QueryResult) Reset() { *m = QueryResult{} } -func (m *QueryResult) String() string { return proto.CompactTextString(m) } -func (*QueryResult) ProtoMessage() {} - -func (m *QueryResult) GetCursor() *Cursor { - if m != nil { - return m.Cursor - } - return nil -} - -func (m *QueryResult) GetResult() []*EntityProto { - if m != nil { - return m.Result - } - return nil -} - -func (m *QueryResult) GetSkippedResults() int32 { - if m != nil && m.SkippedResults != nil { - return *m.SkippedResults - } - return 0 -} - -func (m *QueryResult) GetMoreResults() bool { - if m != nil && m.MoreResults != nil { - return *m.MoreResults - } - return false -} - -func (m *QueryResult) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return false -} - -func (m *QueryResult) GetIndexOnly() bool { - if m != nil && m.IndexOnly != nil { - return *m.IndexOnly - } - return false -} - -func (m *QueryResult) GetSmallOps() bool { - if m != nil && m.SmallOps != nil { - return *m.SmallOps - } - return false -} - -func (m *QueryResult) GetCompiledQuery() *CompiledQuery { - if m != nil { - return m.CompiledQuery - } - return nil -} - -func (m *QueryResult) GetCompiledCursor() *CompiledCursor { - if m != nil { - return m.CompiledCursor - } - return nil -} - -func (m *QueryResult) GetIndex() []*CompositeIndex { - if m != nil { - return m.Index - } - return nil -} - -func (m *QueryResult) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type AllocateIdsRequest struct { - Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` - ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"` - Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` - Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"` - Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } -func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsRequest) ProtoMessage() {} - -func (m *AllocateIdsRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AllocateIdsRequest) GetModelKey() *Reference { - if m != nil { - return m.ModelKey - } - return nil -} - -func (m *AllocateIdsRequest) GetSize() int64 { - if m != nil && m.Size != nil { - return *m.Size - } - return 0 -} - -func (m *AllocateIdsRequest) GetMax() int64 { - if m != nil && m.Max != nil { - return *m.Max - } - return 0 -} - -func (m *AllocateIdsRequest) GetReserve() []*Reference { - if m != nil { - return m.Reserve - } - return nil -} - -type AllocateIdsResponse struct { - Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"` - End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"` - Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } -func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsResponse) ProtoMessage() {} - -func (m *AllocateIdsResponse) GetStart() int64 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *AllocateIdsResponse) GetEnd() int64 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *AllocateIdsResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -type CompositeIndices struct { - Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompositeIndices) Reset() { *m = CompositeIndices{} } -func (m *CompositeIndices) String() string { return proto.CompactTextString(m) } -func (*CompositeIndices) ProtoMessage() {} - -func (m *CompositeIndices) GetIndex() []*CompositeIndex { - if m != nil { - return m.Index - } - return nil -} - -type AddActionsRequest struct { - Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` - Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` - Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} } -func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) } -func (*AddActionsRequest) ProtoMessage() {} - -func (m *AddActionsRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AddActionsRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *AddActionsRequest) GetAction() []*Action { - if m != nil { - return m.Action - } - return nil -} - -type AddActionsResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} } -func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) } -func (*AddActionsResponse) ProtoMessage() {} - -type BeginTransactionRequest struct { - Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` - App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` - AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } -func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } -func (*BeginTransactionRequest) ProtoMessage() {} - -const Default_BeginTransactionRequest_AllowMultipleEg bool = false - -func (m *BeginTransactionRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *BeginTransactionRequest) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *BeginTransactionRequest) GetAllowMultipleEg() bool { - if m != nil && m.AllowMultipleEg != nil { - return *m.AllowMultipleEg - } - return Default_BeginTransactionRequest_AllowMultipleEg -} - -type CommitResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CommitResponse) Reset() { *m = CommitResponse{} } -func (m *CommitResponse) String() string { return proto.CompactTextString(m) } -func (*CommitResponse) ProtoMessage() {} - -func (m *CommitResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *CommitResponse) GetVersion() []*CommitResponse_Version { - if m != nil { - return m.Version - } - return nil -} - -type CommitResponse_Version struct { - RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"` - Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} } -func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) } -func (*CommitResponse_Version) ProtoMessage() {} - -func (m *CommitResponse_Version) GetRootEntityKey() *Reference { - if m != nil { - return m.RootEntityKey - } - return nil -} - -func (m *CommitResponse_Version) GetVersion() int64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto deleted file mode 100755 index e76f126..0000000 --- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto +++ /dev/null @@ -1,541 +0,0 @@ -syntax = "proto2"; -option go_package = "datastore"; - -package appengine; - -message Action{} - -message PropertyValue { - optional int64 int64Value = 1; - optional bool booleanValue = 2; - optional string stringValue = 3; - optional double doubleValue = 4; - - optional group PointValue = 5 { - required double x = 6; - required double y = 7; - } - - optional group UserValue = 8 { - required string email = 9; - required string auth_domain = 10; - optional string nickname = 11; - optional string federated_identity = 21; - optional string federated_provider = 22; - } - - optional group ReferenceValue = 12 { - required string app = 13; - optional string name_space = 20; - repeated group PathElement = 14 { - required string type = 15; - optional int64 id = 16; - optional string name = 17; - } - } -} - -message Property { - enum Meaning { - NO_MEANING = 0; - BLOB = 14; - TEXT = 15; - BYTESTRING = 16; - - ATOM_CATEGORY = 1; - ATOM_LINK = 2; - ATOM_TITLE = 3; - ATOM_CONTENT = 4; - ATOM_SUMMARY = 5; - ATOM_AUTHOR = 6; - - GD_WHEN = 7; - GD_EMAIL = 8; - GEORSS_POINT = 9; - GD_IM = 10; - - GD_PHONENUMBER = 11; - GD_POSTALADDRESS = 12; - - GD_RATING = 13; - - BLOBKEY = 17; - ENTITY_PROTO = 19; - - INDEX_VALUE = 18; - }; - - optional Meaning meaning = 1 [default = NO_MEANING]; - optional string meaning_uri = 2; - - required string name = 3; - - required PropertyValue value = 5; - - required bool multiple = 4; - - optional bool searchable = 6 [default=false]; - - enum FtsTokenizationOption { - HTML = 1; - ATOM = 2; - } - - optional FtsTokenizationOption fts_tokenization_option = 8; - - optional string locale = 9 [default = "en"]; -} - -message Path { - repeated group Element = 1 { - required string type = 2; - optional int64 id = 3; - optional string name = 4; - } -} - -message Reference { - required string app = 13; - optional string name_space = 20; - required Path path = 14; -} - -message User { - required string email = 1; - required string auth_domain = 2; - optional string nickname = 3; - optional string federated_identity = 6; - optional string federated_provider = 7; -} - -message EntityProto { - required Reference key = 13; - required Path entity_group = 16; - optional User owner = 17; - - enum Kind { - GD_CONTACT = 1; - GD_EVENT = 2; - GD_MESSAGE = 3; - } - optional Kind kind = 4; - optional string kind_uri = 5; - - repeated Property property = 14; - repeated Property raw_property = 15; - - optional int32 rank = 18; -} - -message CompositeProperty { - required int64 index_id = 1; - repeated string value = 2; -} - -message Index { - required string entity_type = 1; - required bool ancestor = 5; - repeated group Property = 2 { - required string name = 3; - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - optional Direction direction = 4 [default = ASCENDING]; - } -} - -message CompositeIndex { - required string app_id = 1; - required int64 id = 2; - required Index definition = 3; - - enum State { - WRITE_ONLY = 1; - READ_WRITE = 2; - DELETED = 3; - ERROR = 4; - } - required State state = 4; - - optional bool only_use_if_required = 6 [default = false]; -} - -message IndexPostfix { - message IndexValue { - required string property_name = 1; - required PropertyValue value = 2; - } - - repeated IndexValue index_value = 1; - - optional Reference key = 2; - - optional bool before = 3 [default=true]; -} - -message IndexPosition { - optional string key = 1; - - optional bool before = 2 [default=true]; -} - -message Snapshot { - enum Status { - INACTIVE = 0; - ACTIVE = 1; - } - - required int64 ts = 1; -} - -message InternalHeader { - optional string qos = 1; -} - -message Transaction { - optional InternalHeader header = 4; - required fixed64 handle = 1; - required string app = 2; - optional bool mark_changes = 3 [default = false]; -} - -message Query { - optional InternalHeader header = 39; - - required string app = 1; - optional string name_space = 29; - - optional string kind = 3; - optional Reference ancestor = 17; - - repeated group Filter = 4 { - enum Operator { - LESS_THAN = 1; - LESS_THAN_OR_EQUAL = 2; - GREATER_THAN = 3; - GREATER_THAN_OR_EQUAL = 4; - EQUAL = 5; - IN = 6; - EXISTS = 7; - } - - required Operator op = 6; - repeated Property property = 14; - } - - optional string search_query = 8; - - repeated group Order = 9 { - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - - required string property = 10; - optional Direction direction = 11 [default = ASCENDING]; - } - - enum Hint { - ORDER_FIRST = 1; - ANCESTOR_FIRST = 2; - FILTER_FIRST = 3; - } - optional Hint hint = 18; - - optional int32 count = 23; - - optional int32 offset = 12 [default = 0]; - - optional int32 limit = 16; - - optional CompiledCursor compiled_cursor = 30; - optional CompiledCursor end_compiled_cursor = 31; - - repeated CompositeIndex composite_index = 19; - - optional bool require_perfect_plan = 20 [default = false]; - - optional bool keys_only = 21 [default = false]; - - optional Transaction transaction = 22; - - optional bool compile = 25 [default = false]; - - optional int64 failover_ms = 26; - - optional bool strong = 32; - - repeated string property_name = 33; - - repeated string group_by_property_name = 34; - - optional bool distinct = 24; - - optional int64 min_safe_time_seconds = 35; - - repeated string safe_replica_name = 36; - - optional bool persist_offset = 37 [default=false]; -} - -message CompiledQuery { - required group PrimaryScan = 1 { - optional string index_name = 2; - - optional string start_key = 3; - optional bool start_inclusive = 4; - optional string end_key = 5; - optional bool end_inclusive = 6; - - repeated string start_postfix_value = 22; - repeated string end_postfix_value = 23; - - optional int64 end_unapplied_log_timestamp_us = 19; - } - - repeated group MergeJoinScan = 7 { - required string index_name = 8; - - repeated string prefix_value = 9; - - optional bool value_prefix = 20 [default=false]; - } - - optional Index index_def = 21; - - optional int32 offset = 10 [default = 0]; - - optional int32 limit = 11; - - required bool keys_only = 12; - - repeated string property_name = 24; - - optional int32 distinct_infix_size = 25; - - optional group EntityFilter = 13 { - optional bool distinct = 14 [default=false]; - - optional string kind = 17; - optional Reference ancestor = 18; - } -} - -message CompiledCursor { - optional group Position = 2 { - optional string start_key = 27; - - repeated group IndexValue = 29 { - optional string property = 30; - required PropertyValue value = 31; - } - - optional Reference key = 32; - - optional bool start_inclusive = 28 [default=true]; - } -} - -message Cursor { - required fixed64 cursor = 1; - - optional string app = 2; -} - -message Error { - enum ErrorCode { - BAD_REQUEST = 1; - CONCURRENT_TRANSACTION = 2; - INTERNAL_ERROR = 3; - NEED_INDEX = 4; - TIMEOUT = 5; - PERMISSION_DENIED = 6; - BIGTABLE_ERROR = 7; - COMMITTED_BUT_STILL_APPLYING = 8; - CAPABILITY_DISABLED = 9; - TRY_ALTERNATE_BACKEND = 10; - SAFE_TIME_TOO_OLD = 11; - } -} - -message Cost { - optional int32 index_writes = 1; - optional int32 index_write_bytes = 2; - optional int32 entity_writes = 3; - optional int32 entity_write_bytes = 4; - optional group CommitCost = 5 { - optional int32 requested_entity_puts = 6; - optional int32 requested_entity_deletes = 7; - }; - optional int32 approximate_storage_delta = 8; - optional int32 id_sequence_updates = 9; -} - -message GetRequest { - optional InternalHeader header = 6; - - repeated Reference key = 1; - optional Transaction transaction = 2; - - optional int64 failover_ms = 3; - - optional bool strong = 4; - - optional bool allow_deferred = 5 [default=false]; -} - -message GetResponse { - repeated group Entity = 1 { - optional EntityProto entity = 2; - optional Reference key = 4; - - optional int64 version = 3; - } - - repeated Reference deferred = 5; - - optional bool in_order = 6 [default=true]; -} - -message PutRequest { - optional InternalHeader header = 11; - - repeated EntityProto entity = 1; - optional Transaction transaction = 2; - repeated CompositeIndex composite_index = 3; - - optional bool trusted = 4 [default = false]; - - optional bool force = 7 [default = false]; - - optional bool mark_changes = 8 [default = false]; - repeated Snapshot snapshot = 9; - - enum AutoIdPolicy { - CURRENT = 0; - SEQUENTIAL = 1; - } - optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT]; -} - -message PutResponse { - repeated Reference key = 1; - optional Cost cost = 2; - repeated int64 version = 3; -} - -message TouchRequest { - optional InternalHeader header = 10; - - repeated Reference key = 1; - repeated CompositeIndex composite_index = 2; - optional bool force = 3 [default = false]; - repeated Snapshot snapshot = 9; -} - -message TouchResponse { - optional Cost cost = 1; -} - -message DeleteRequest { - optional InternalHeader header = 10; - - repeated Reference key = 6; - optional Transaction transaction = 5; - - optional bool trusted = 4 [default = false]; - - optional bool force = 7 [default = false]; - - optional bool mark_changes = 8 [default = false]; - repeated Snapshot snapshot = 9; -} - -message DeleteResponse { - optional Cost cost = 1; - repeated int64 version = 3; -} - -message NextRequest { - optional InternalHeader header = 5; - - required Cursor cursor = 1; - optional int32 count = 2; - - optional int32 offset = 4 [default = 0]; - - optional bool compile = 3 [default = false]; -} - -message QueryResult { - optional Cursor cursor = 1; - - repeated EntityProto result = 2; - - optional int32 skipped_results = 7; - - required bool more_results = 3; - - optional bool keys_only = 4; - - optional bool index_only = 9; - - optional bool small_ops = 10; - - optional CompiledQuery compiled_query = 5; - - optional CompiledCursor compiled_cursor = 6; - - repeated CompositeIndex index = 8; - - repeated int64 version = 11; -} - -message AllocateIdsRequest { - optional InternalHeader header = 4; - - optional Reference model_key = 1; - - optional int64 size = 2; - - optional int64 max = 3; - - repeated Reference reserve = 5; -} - -message AllocateIdsResponse { - required int64 start = 1; - required int64 end = 2; - optional Cost cost = 3; -} - -message CompositeIndices { - repeated CompositeIndex index = 1; -} - -message AddActionsRequest { - optional InternalHeader header = 3; - - required Transaction transaction = 1; - repeated Action action = 2; -} - -message AddActionsResponse { -} - -message BeginTransactionRequest { - optional InternalHeader header = 3; - - required string app = 1; - optional bool allow_multiple_eg = 2 [default = false]; -} - -message CommitResponse { - optional Cost cost = 1; - - repeated group Version = 3 { - required Reference root_entity_key = 4; - required int64 version = 5; - } -} diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go deleted file mode 100644 index d538701..0000000 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import netcontext "golang.org/x/net/context" - -// These functions are implementations of the wrapper functions -// in ../appengine/identity.go. See that file for commentary. - -func AppID(c netcontext.Context) string { - return appID(FullyQualifiedAppID(c)) -} diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go deleted file mode 100644 index e6b9227..0000000 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import ( - "appengine" - - netcontext "golang.org/x/net/context" -) - -func DefaultVersionHostname(ctx netcontext.Context) string { - return appengine.DefaultVersionHostname(fromContext(ctx)) -} - -func RequestID(ctx netcontext.Context) string { return appengine.RequestID(fromContext(ctx)) } -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) } -func VersionID(ctx netcontext.Context) string { return appengine.VersionID(fromContext(ctx)) } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } - -func fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() } diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go deleted file mode 100644 index ebe68b7..0000000 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "net/http" - "os" - - netcontext "golang.org/x/net/context" -) - -// These functions are implementations of the wrapper functions -// in ../appengine/identity.go. See that file for commentary. - -const ( - hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname" - hRequestLogId = "X-AppEngine-Request-Log-Id" - hDatacenter = "X-AppEngine-Datacenter" -) - -func ctxHeaders(ctx netcontext.Context) http.Header { - return fromContext(ctx).Request().Header -} - -func DefaultVersionHostname(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hDefaultVersionHostname) -} - -func RequestID(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hRequestLogId) -} - -func Datacenter(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hDatacenter) -} - -func ServerSoftware() string { - // TODO(dsymonds): Remove fallback when we've verified this. - if s := os.Getenv("SERVER_SOFTWARE"); s != "" { - return s - } - return "Google App Engine/1.x.x" -} - -// TODO(dsymonds): Remove the metadata fetches. - -func ModuleName(_ netcontext.Context) string { - if s := os.Getenv("GAE_MODULE_NAME"); s != "" { - return s - } - return string(mustGetMetadata("instance/attributes/gae_backend_name")) -} - -func VersionID(_ netcontext.Context) string { - if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { - return s1 + "." + s2 - } - return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version")) -} - -func InstanceID() string { - if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" { - return s - } - return string(mustGetMetadata("instance/attributes/gae_backend_instance")) -} - -func partitionlessAppID() string { - // gae_project has everything except the partition prefix. - appID := os.Getenv("GAE_LONG_APP_ID") - if appID == "" { - appID = string(mustGetMetadata("instance/attributes/gae_project")) - } - return appID -} - -func fullyQualifiedAppID(_ netcontext.Context) string { - appID := partitionlessAppID() - - part := os.Getenv("GAE_PARTITION") - if part == "" { - part = string(mustGetMetadata("instance/attributes/gae_partition")) - } - - if part != "" { - appID = part + "~" + appID - } - return appID -} - -func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" -} diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go deleted file mode 100644 index 051ea39..0000000 --- a/vendor/google.golang.org/appengine/internal/internal.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package internal provides support for package appengine. -// -// Programs should not use this package directly. Its API is not stable. -// Use packages appengine and appengine/* instead. -package internal - -import ( - "fmt" - - "github.com/golang/protobuf/proto" - - remotepb "google.golang.org/appengine/internal/remote_api" -) - -// errorCodeMaps is a map of service name to the error code map for the service. -var errorCodeMaps = make(map[string]map[int32]string) - -// RegisterErrorCodeMap is called from API implementations to register their -// error code map. This should only be called from init functions. -func RegisterErrorCodeMap(service string, m map[int32]string) { - errorCodeMaps[service] = m -} - -type timeoutCodeKey struct { - service string - code int32 -} - -// timeoutCodes is the set of service+code pairs that represent timeouts. -var timeoutCodes = make(map[timeoutCodeKey]bool) - -func RegisterTimeoutErrorCode(service string, code int32) { - timeoutCodes[timeoutCodeKey{service, code}] = true -} - -// APIError is the type returned by appengine.Context's Call method -// when an API call fails in an API-specific way. This may be, for instance, -// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE. -type APIError struct { - Service string - Detail string - Code int32 // API-specific error code -} - -func (e *APIError) Error() string { - if e.Code == 0 { - if e.Detail == "" { - return "APIError " - } - return e.Detail - } - s := fmt.Sprintf("API error %d", e.Code) - if m, ok := errorCodeMaps[e.Service]; ok { - s += " (" + e.Service + ": " + m[e.Code] + ")" - } else { - // Shouldn't happen, but provide a bit more detail if it does. - s = e.Service + " " + s - } - if e.Detail != "" { - s += ": " + e.Detail - } - return s -} - -func (e *APIError) IsTimeout() bool { - return timeoutCodes[timeoutCodeKey{e.Service, e.Code}] -} - -// CallError is the type returned by appengine.Context's Call method when an -// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED. -type CallError struct { - Detail string - Code int32 - // TODO: Remove this if we get a distinguishable error code. - Timeout bool -} - -func (e *CallError) Error() string { - var msg string - switch remotepb.RpcError_ErrorCode(e.Code) { - case remotepb.RpcError_UNKNOWN: - return e.Detail - case remotepb.RpcError_OVER_QUOTA: - msg = "Over quota" - case remotepb.RpcError_CAPABILITY_DISABLED: - msg = "Capability disabled" - case remotepb.RpcError_CANCELLED: - msg = "Canceled" - default: - msg = fmt.Sprintf("Call error %d", e.Code) - } - s := msg + ": " + e.Detail - if e.Timeout { - s += " (timeout)" - } - return s -} - -func (e *CallError) IsTimeout() bool { - return e.Timeout -} - -// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace. -// The function should be prepared to be called on the same message more than once; it should only modify the -// RPC request the first time. -var NamespaceMods = make(map[string]func(m proto.Message, namespace string)) diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go deleted file mode 100644 index 20c595b..0000000 --- a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go +++ /dev/null @@ -1,899 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/log/log_service.proto -// DO NOT EDIT! - -/* -Package log is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/log/log_service.proto - -It has these top-level messages: - LogServiceError - UserAppLogLine - UserAppLogGroup - FlushRequest - SetStatusRequest - LogOffset - LogLine - RequestLog - LogModuleVersion - LogReadRequest - LogReadResponse - LogUsageRecord - LogUsageRequest - LogUsageResponse -*/ -package log - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type LogServiceError_ErrorCode int32 - -const ( - LogServiceError_OK LogServiceError_ErrorCode = 0 - LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1 - LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2 -) - -var LogServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_REQUEST", - 2: "STORAGE_ERROR", -} -var LogServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_REQUEST": 1, - "STORAGE_ERROR": 2, -} - -func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode { - p := new(LogServiceError_ErrorCode) - *p = x - return p -} -func (x LogServiceError_ErrorCode) String() string { - return proto.EnumName(LogServiceError_ErrorCode_name, int32(x)) -} -func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode") - if err != nil { - return err - } - *x = LogServiceError_ErrorCode(value) - return nil -} - -type LogServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogServiceError) Reset() { *m = LogServiceError{} } -func (m *LogServiceError) String() string { return proto.CompactTextString(m) } -func (*LogServiceError) ProtoMessage() {} - -type UserAppLogLine struct { - TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"` - Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` - Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} } -func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) } -func (*UserAppLogLine) ProtoMessage() {} - -func (m *UserAppLogLine) GetTimestampUsec() int64 { - if m != nil && m.TimestampUsec != nil { - return *m.TimestampUsec - } - return 0 -} - -func (m *UserAppLogLine) GetLevel() int64 { - if m != nil && m.Level != nil { - return *m.Level - } - return 0 -} - -func (m *UserAppLogLine) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -type UserAppLogGroup struct { - LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} } -func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) } -func (*UserAppLogGroup) ProtoMessage() {} - -func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine { - if m != nil { - return m.LogLine - } - return nil -} - -type FlushRequest struct { - Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FlushRequest) Reset() { *m = FlushRequest{} } -func (m *FlushRequest) String() string { return proto.CompactTextString(m) } -func (*FlushRequest) ProtoMessage() {} - -func (m *FlushRequest) GetLogs() []byte { - if m != nil { - return m.Logs - } - return nil -} - -type SetStatusRequest struct { - Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} } -func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) } -func (*SetStatusRequest) ProtoMessage() {} - -func (m *SetStatusRequest) GetStatus() string { - if m != nil && m.Status != nil { - return *m.Status - } - return "" -} - -type LogOffset struct { - RequestId []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogOffset) Reset() { *m = LogOffset{} } -func (m *LogOffset) String() string { return proto.CompactTextString(m) } -func (*LogOffset) ProtoMessage() {} - -func (m *LogOffset) GetRequestId() []byte { - if m != nil { - return m.RequestId - } - return nil -} - -type LogLine struct { - Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"` - Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` - LogMessage *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogLine) Reset() { *m = LogLine{} } -func (m *LogLine) String() string { return proto.CompactTextString(m) } -func (*LogLine) ProtoMessage() {} - -func (m *LogLine) GetTime() int64 { - if m != nil && m.Time != nil { - return *m.Time - } - return 0 -} - -func (m *LogLine) GetLevel() int32 { - if m != nil && m.Level != nil { - return *m.Level - } - return 0 -} - -func (m *LogLine) GetLogMessage() string { - if m != nil && m.LogMessage != nil { - return *m.LogMessage - } - return "" -} - -type RequestLog struct { - AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` - ModuleId *string `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"` - VersionId *string `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"` - RequestId []byte `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"` - Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"` - Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"` - Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"` - StartTime *int64 `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"` - EndTime *int64 `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"` - Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"` - Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"` - Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"` - Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"` - HttpVersion *string `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"` - Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"` - ResponseSize *int64 `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"` - Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"` - UserAgent *string `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"` - UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"` - Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"` - ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"` - Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"` - Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"` - TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"` - TaskName *string `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"` - WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"` - PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"` - ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"` - Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"` - CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"` - Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"` - LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"` - AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"` - ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"` - WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"` - WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"` - ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"` - ServerName []byte `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RequestLog) Reset() { *m = RequestLog{} } -func (m *RequestLog) String() string { return proto.CompactTextString(m) } -func (*RequestLog) ProtoMessage() {} - -const Default_RequestLog_ModuleId string = "default" -const Default_RequestLog_ReplicaIndex int32 = -1 -const Default_RequestLog_Finished bool = true - -func (m *RequestLog) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *RequestLog) GetModuleId() string { - if m != nil && m.ModuleId != nil { - return *m.ModuleId - } - return Default_RequestLog_ModuleId -} - -func (m *RequestLog) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -func (m *RequestLog) GetRequestId() []byte { - if m != nil { - return m.RequestId - } - return nil -} - -func (m *RequestLog) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *RequestLog) GetIp() string { - if m != nil && m.Ip != nil { - return *m.Ip - } - return "" -} - -func (m *RequestLog) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *RequestLog) GetStartTime() int64 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *RequestLog) GetEndTime() int64 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *RequestLog) GetLatency() int64 { - if m != nil && m.Latency != nil { - return *m.Latency - } - return 0 -} - -func (m *RequestLog) GetMcycles() int64 { - if m != nil && m.Mcycles != nil { - return *m.Mcycles - } - return 0 -} - -func (m *RequestLog) GetMethod() string { - if m != nil && m.Method != nil { - return *m.Method - } - return "" -} - -func (m *RequestLog) GetResource() string { - if m != nil && m.Resource != nil { - return *m.Resource - } - return "" -} - -func (m *RequestLog) GetHttpVersion() string { - if m != nil && m.HttpVersion != nil { - return *m.HttpVersion - } - return "" -} - -func (m *RequestLog) GetStatus() int32 { - if m != nil && m.Status != nil { - return *m.Status - } - return 0 -} - -func (m *RequestLog) GetResponseSize() int64 { - if m != nil && m.ResponseSize != nil { - return *m.ResponseSize - } - return 0 -} - -func (m *RequestLog) GetReferrer() string { - if m != nil && m.Referrer != nil { - return *m.Referrer - } - return "" -} - -func (m *RequestLog) GetUserAgent() string { - if m != nil && m.UserAgent != nil { - return *m.UserAgent - } - return "" -} - -func (m *RequestLog) GetUrlMapEntry() string { - if m != nil && m.UrlMapEntry != nil { - return *m.UrlMapEntry - } - return "" -} - -func (m *RequestLog) GetCombined() string { - if m != nil && m.Combined != nil { - return *m.Combined - } - return "" -} - -func (m *RequestLog) GetApiMcycles() int64 { - if m != nil && m.ApiMcycles != nil { - return *m.ApiMcycles - } - return 0 -} - -func (m *RequestLog) GetHost() string { - if m != nil && m.Host != nil { - return *m.Host - } - return "" -} - -func (m *RequestLog) GetCost() float64 { - if m != nil && m.Cost != nil { - return *m.Cost - } - return 0 -} - -func (m *RequestLog) GetTaskQueueName() string { - if m != nil && m.TaskQueueName != nil { - return *m.TaskQueueName - } - return "" -} - -func (m *RequestLog) GetTaskName() string { - if m != nil && m.TaskName != nil { - return *m.TaskName - } - return "" -} - -func (m *RequestLog) GetWasLoadingRequest() bool { - if m != nil && m.WasLoadingRequest != nil { - return *m.WasLoadingRequest - } - return false -} - -func (m *RequestLog) GetPendingTime() int64 { - if m != nil && m.PendingTime != nil { - return *m.PendingTime - } - return 0 -} - -func (m *RequestLog) GetReplicaIndex() int32 { - if m != nil && m.ReplicaIndex != nil { - return *m.ReplicaIndex - } - return Default_RequestLog_ReplicaIndex -} - -func (m *RequestLog) GetFinished() bool { - if m != nil && m.Finished != nil { - return *m.Finished - } - return Default_RequestLog_Finished -} - -func (m *RequestLog) GetCloneKey() []byte { - if m != nil { - return m.CloneKey - } - return nil -} - -func (m *RequestLog) GetLine() []*LogLine { - if m != nil { - return m.Line - } - return nil -} - -func (m *RequestLog) GetLinesIncomplete() bool { - if m != nil && m.LinesIncomplete != nil { - return *m.LinesIncomplete - } - return false -} - -func (m *RequestLog) GetAppEngineRelease() []byte { - if m != nil { - return m.AppEngineRelease - } - return nil -} - -func (m *RequestLog) GetExitReason() int32 { - if m != nil && m.ExitReason != nil { - return *m.ExitReason - } - return 0 -} - -func (m *RequestLog) GetWasThrottledForTime() bool { - if m != nil && m.WasThrottledForTime != nil { - return *m.WasThrottledForTime - } - return false -} - -func (m *RequestLog) GetWasThrottledForRequests() bool { - if m != nil && m.WasThrottledForRequests != nil { - return *m.WasThrottledForRequests - } - return false -} - -func (m *RequestLog) GetThrottledTime() int64 { - if m != nil && m.ThrottledTime != nil { - return *m.ThrottledTime - } - return 0 -} - -func (m *RequestLog) GetServerName() []byte { - if m != nil { - return m.ServerName - } - return nil -} - -type LogModuleVersion struct { - ModuleId *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"` - VersionId *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} } -func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) } -func (*LogModuleVersion) ProtoMessage() {} - -const Default_LogModuleVersion_ModuleId string = "default" - -func (m *LogModuleVersion) GetModuleId() string { - if m != nil && m.ModuleId != nil { - return *m.ModuleId - } - return Default_LogModuleVersion_ModuleId -} - -func (m *LogModuleVersion) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -type LogReadRequest struct { - AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` - VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` - ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"` - StartTime *int64 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` - EndTime *int64 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` - Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"` - RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"` - MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"` - IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"` - Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"` - CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"` - HostRegex *string `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"` - ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"` - IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"` - AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"` - IncludeHost *bool `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"` - IncludeAll *bool `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"` - CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"` - NumShards *int32 `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogReadRequest) Reset() { *m = LogReadRequest{} } -func (m *LogReadRequest) String() string { return proto.CompactTextString(m) } -func (*LogReadRequest) ProtoMessage() {} - -func (m *LogReadRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *LogReadRequest) GetVersionId() []string { - if m != nil { - return m.VersionId - } - return nil -} - -func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion { - if m != nil { - return m.ModuleVersion - } - return nil -} - -func (m *LogReadRequest) GetStartTime() int64 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogReadRequest) GetEndTime() int64 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogReadRequest) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *LogReadRequest) GetRequestId() [][]byte { - if m != nil { - return m.RequestId - } - return nil -} - -func (m *LogReadRequest) GetMinimumLogLevel() int32 { - if m != nil && m.MinimumLogLevel != nil { - return *m.MinimumLogLevel - } - return 0 -} - -func (m *LogReadRequest) GetIncludeIncomplete() bool { - if m != nil && m.IncludeIncomplete != nil { - return *m.IncludeIncomplete - } - return false -} - -func (m *LogReadRequest) GetCount() int64 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *LogReadRequest) GetCombinedLogRegex() string { - if m != nil && m.CombinedLogRegex != nil { - return *m.CombinedLogRegex - } - return "" -} - -func (m *LogReadRequest) GetHostRegex() string { - if m != nil && m.HostRegex != nil { - return *m.HostRegex - } - return "" -} - -func (m *LogReadRequest) GetReplicaIndex() int32 { - if m != nil && m.ReplicaIndex != nil { - return *m.ReplicaIndex - } - return 0 -} - -func (m *LogReadRequest) GetIncludeAppLogs() bool { - if m != nil && m.IncludeAppLogs != nil { - return *m.IncludeAppLogs - } - return false -} - -func (m *LogReadRequest) GetAppLogsPerRequest() int32 { - if m != nil && m.AppLogsPerRequest != nil { - return *m.AppLogsPerRequest - } - return 0 -} - -func (m *LogReadRequest) GetIncludeHost() bool { - if m != nil && m.IncludeHost != nil { - return *m.IncludeHost - } - return false -} - -func (m *LogReadRequest) GetIncludeAll() bool { - if m != nil && m.IncludeAll != nil { - return *m.IncludeAll - } - return false -} - -func (m *LogReadRequest) GetCacheIterator() bool { - if m != nil && m.CacheIterator != nil { - return *m.CacheIterator - } - return false -} - -func (m *LogReadRequest) GetNumShards() int32 { - if m != nil && m.NumShards != nil { - return *m.NumShards - } - return 0 -} - -type LogReadResponse struct { - Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` - Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"` - LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogReadResponse) Reset() { *m = LogReadResponse{} } -func (m *LogReadResponse) String() string { return proto.CompactTextString(m) } -func (*LogReadResponse) ProtoMessage() {} - -func (m *LogReadResponse) GetLog() []*RequestLog { - if m != nil { - return m.Log - } - return nil -} - -func (m *LogReadResponse) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *LogReadResponse) GetLastEndTime() int64 { - if m != nil && m.LastEndTime != nil { - return *m.LastEndTime - } - return 0 -} - -type LogUsageRecord struct { - VersionId *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"` - StartTime *int32 `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"` - EndTime *int32 `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"` - Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` - TotalSize *int64 `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"` - Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} } -func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) } -func (*LogUsageRecord) ProtoMessage() {} - -func (m *LogUsageRecord) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -func (m *LogUsageRecord) GetStartTime() int32 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogUsageRecord) GetEndTime() int32 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogUsageRecord) GetCount() int64 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *LogUsageRecord) GetTotalSize() int64 { - if m != nil && m.TotalSize != nil { - return *m.TotalSize - } - return 0 -} - -func (m *LogUsageRecord) GetRecords() int32 { - if m != nil && m.Records != nil { - return *m.Records - } - return 0 -} - -type LogUsageRequest struct { - AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` - VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` - StartTime *int32 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` - EndTime *int32 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` - ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"` - CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"` - UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"` - VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} } -func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) } -func (*LogUsageRequest) ProtoMessage() {} - -const Default_LogUsageRequest_ResolutionHours uint32 = 1 - -func (m *LogUsageRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *LogUsageRequest) GetVersionId() []string { - if m != nil { - return m.VersionId - } - return nil -} - -func (m *LogUsageRequest) GetStartTime() int32 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogUsageRequest) GetEndTime() int32 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogUsageRequest) GetResolutionHours() uint32 { - if m != nil && m.ResolutionHours != nil { - return *m.ResolutionHours - } - return Default_LogUsageRequest_ResolutionHours -} - -func (m *LogUsageRequest) GetCombineVersions() bool { - if m != nil && m.CombineVersions != nil { - return *m.CombineVersions - } - return false -} - -func (m *LogUsageRequest) GetUsageVersion() int32 { - if m != nil && m.UsageVersion != nil { - return *m.UsageVersion - } - return 0 -} - -func (m *LogUsageRequest) GetVersionsOnly() bool { - if m != nil && m.VersionsOnly != nil { - return *m.VersionsOnly - } - return false -} - -type LogUsageResponse struct { - Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"` - Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} } -func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) } -func (*LogUsageResponse) ProtoMessage() {} - -func (m *LogUsageResponse) GetUsage() []*LogUsageRecord { - if m != nil { - return m.Usage - } - return nil -} - -func (m *LogUsageResponse) GetSummary() *LogUsageRecord { - if m != nil { - return m.Summary - } - return nil -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto deleted file mode 100644 index 8981dc4..0000000 --- a/vendor/google.golang.org/appengine/internal/log/log_service.proto +++ /dev/null @@ -1,150 +0,0 @@ -syntax = "proto2"; -option go_package = "log"; - -package appengine; - -message LogServiceError { - enum ErrorCode { - OK = 0; - INVALID_REQUEST = 1; - STORAGE_ERROR = 2; - } -} - -message UserAppLogLine { - required int64 timestamp_usec = 1; - required int64 level = 2; - required string message = 3; -} - -message UserAppLogGroup { - repeated UserAppLogLine log_line = 2; -} - -message FlushRequest { - optional bytes logs = 1; -} - -message SetStatusRequest { - required string status = 1; -} - - -message LogOffset { - optional bytes request_id = 1; -} - -message LogLine { - required int64 time = 1; - required int32 level = 2; - required string log_message = 3; -} - -message RequestLog { - required string app_id = 1; - optional string module_id = 37 [default="default"]; - required string version_id = 2; - required bytes request_id = 3; - optional LogOffset offset = 35; - required string ip = 4; - optional string nickname = 5; - required int64 start_time = 6; - required int64 end_time = 7; - required int64 latency = 8; - required int64 mcycles = 9; - required string method = 10; - required string resource = 11; - required string http_version = 12; - required int32 status = 13; - required int64 response_size = 14; - optional string referrer = 15; - optional string user_agent = 16; - required string url_map_entry = 17; - required string combined = 18; - optional int64 api_mcycles = 19; - optional string host = 20; - optional double cost = 21; - - optional string task_queue_name = 22; - optional string task_name = 23; - - optional bool was_loading_request = 24; - optional int64 pending_time = 25; - optional int32 replica_index = 26 [default = -1]; - optional bool finished = 27 [default = true]; - optional bytes clone_key = 28; - - repeated LogLine line = 29; - - optional bool lines_incomplete = 36; - optional bytes app_engine_release = 38; - - optional int32 exit_reason = 30; - optional bool was_throttled_for_time = 31; - optional bool was_throttled_for_requests = 32; - optional int64 throttled_time = 33; - - optional bytes server_name = 34; -} - -message LogModuleVersion { - optional string module_id = 1 [default="default"]; - optional string version_id = 2; -} - -message LogReadRequest { - required string app_id = 1; - repeated string version_id = 2; - repeated LogModuleVersion module_version = 19; - - optional int64 start_time = 3; - optional int64 end_time = 4; - optional LogOffset offset = 5; - repeated bytes request_id = 6; - - optional int32 minimum_log_level = 7; - optional bool include_incomplete = 8; - optional int64 count = 9; - - optional string combined_log_regex = 14; - optional string host_regex = 15; - optional int32 replica_index = 16; - - optional bool include_app_logs = 10; - optional int32 app_logs_per_request = 17; - optional bool include_host = 11; - optional bool include_all = 12; - optional bool cache_iterator = 13; - optional int32 num_shards = 18; -} - -message LogReadResponse { - repeated RequestLog log = 1; - optional LogOffset offset = 2; - optional int64 last_end_time = 3; -} - -message LogUsageRecord { - optional string version_id = 1; - optional int32 start_time = 2; - optional int32 end_time = 3; - optional int64 count = 4; - optional int64 total_size = 5; - optional int32 records = 6; -} - -message LogUsageRequest { - required string app_id = 1; - repeated string version_id = 2; - optional int32 start_time = 3; - optional int32 end_time = 4; - optional uint32 resolution_hours = 5 [default = 1]; - optional bool combine_versions = 6; - optional int32 usage_version = 7; - optional bool versions_only = 8; -} - -message LogUsageResponse { - repeated LogUsageRecord usage = 1; - optional LogUsageRecord summary = 2; -} diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go deleted file mode 100644 index 4903616..0000000 --- a/vendor/google.golang.org/appengine/internal/main.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import ( - "appengine_internal" -) - -func Main() { - appengine_internal.Main() -} diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go deleted file mode 100644 index 57331ad..0000000 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "io" - "log" - "net/http" - "net/url" - "os" -) - -func Main() { - installHealthChecker(http.DefaultServeMux) - - port := "8080" - if s := os.Getenv("PORT"); s != "" { - port = s - } - - if err := http.ListenAndServe(":"+port, http.HandlerFunc(handleHTTP)); err != nil { - log.Fatalf("http.ListenAndServe: %v", err) - } -} - -func installHealthChecker(mux *http.ServeMux) { - // If no health check handler has been installed by this point, add a trivial one. - const healthPath = "/_ah/health" - hreq := &http.Request{ - Method: "GET", - URL: &url.URL{ - Path: healthPath, - }, - } - if _, pat := mux.Handler(hreq); pat != healthPath { - mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, "ok") - }) - } -} diff --git a/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go deleted file mode 100644 index 252fef8..0000000 --- a/vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go +++ /dev/null @@ -1,938 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/memcache/memcache_service.proto -// DO NOT EDIT! - -/* -Package memcache is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/memcache/memcache_service.proto - -It has these top-level messages: - MemcacheServiceError - AppOverride - MemcacheGetRequest - MemcacheGetResponse - MemcacheSetRequest - MemcacheSetResponse - MemcacheDeleteRequest - MemcacheDeleteResponse - MemcacheIncrementRequest - MemcacheIncrementResponse - MemcacheBatchIncrementRequest - MemcacheBatchIncrementResponse - MemcacheFlushRequest - MemcacheFlushResponse - MemcacheStatsRequest - MergedNamespaceStats - MemcacheStatsResponse - MemcacheGrabTailRequest - MemcacheGrabTailResponse -*/ -package memcache - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type MemcacheServiceError_ErrorCode int32 - -const ( - MemcacheServiceError_OK MemcacheServiceError_ErrorCode = 0 - MemcacheServiceError_UNSPECIFIED_ERROR MemcacheServiceError_ErrorCode = 1 - MemcacheServiceError_NAMESPACE_NOT_SET MemcacheServiceError_ErrorCode = 2 - MemcacheServiceError_PERMISSION_DENIED MemcacheServiceError_ErrorCode = 3 - MemcacheServiceError_INVALID_VALUE MemcacheServiceError_ErrorCode = 6 -) - -var MemcacheServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "UNSPECIFIED_ERROR", - 2: "NAMESPACE_NOT_SET", - 3: "PERMISSION_DENIED", - 6: "INVALID_VALUE", -} -var MemcacheServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "UNSPECIFIED_ERROR": 1, - "NAMESPACE_NOT_SET": 2, - "PERMISSION_DENIED": 3, - "INVALID_VALUE": 6, -} - -func (x MemcacheServiceError_ErrorCode) Enum() *MemcacheServiceError_ErrorCode { - p := new(MemcacheServiceError_ErrorCode) - *p = x - return p -} -func (x MemcacheServiceError_ErrorCode) String() string { - return proto.EnumName(MemcacheServiceError_ErrorCode_name, int32(x)) -} -func (x *MemcacheServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MemcacheServiceError_ErrorCode_value, data, "MemcacheServiceError_ErrorCode") - if err != nil { - return err - } - *x = MemcacheServiceError_ErrorCode(value) - return nil -} - -type MemcacheSetRequest_SetPolicy int32 - -const ( - MemcacheSetRequest_SET MemcacheSetRequest_SetPolicy = 1 - MemcacheSetRequest_ADD MemcacheSetRequest_SetPolicy = 2 - MemcacheSetRequest_REPLACE MemcacheSetRequest_SetPolicy = 3 - MemcacheSetRequest_CAS MemcacheSetRequest_SetPolicy = 4 -) - -var MemcacheSetRequest_SetPolicy_name = map[int32]string{ - 1: "SET", - 2: "ADD", - 3: "REPLACE", - 4: "CAS", -} -var MemcacheSetRequest_SetPolicy_value = map[string]int32{ - "SET": 1, - "ADD": 2, - "REPLACE": 3, - "CAS": 4, -} - -func (x MemcacheSetRequest_SetPolicy) Enum() *MemcacheSetRequest_SetPolicy { - p := new(MemcacheSetRequest_SetPolicy) - *p = x - return p -} -func (x MemcacheSetRequest_SetPolicy) String() string { - return proto.EnumName(MemcacheSetRequest_SetPolicy_name, int32(x)) -} -func (x *MemcacheSetRequest_SetPolicy) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MemcacheSetRequest_SetPolicy_value, data, "MemcacheSetRequest_SetPolicy") - if err != nil { - return err - } - *x = MemcacheSetRequest_SetPolicy(value) - return nil -} - -type MemcacheSetResponse_SetStatusCode int32 - -const ( - MemcacheSetResponse_STORED MemcacheSetResponse_SetStatusCode = 1 - MemcacheSetResponse_NOT_STORED MemcacheSetResponse_SetStatusCode = 2 - MemcacheSetResponse_ERROR MemcacheSetResponse_SetStatusCode = 3 - MemcacheSetResponse_EXISTS MemcacheSetResponse_SetStatusCode = 4 -) - -var MemcacheSetResponse_SetStatusCode_name = map[int32]string{ - 1: "STORED", - 2: "NOT_STORED", - 3: "ERROR", - 4: "EXISTS", -} -var MemcacheSetResponse_SetStatusCode_value = map[string]int32{ - "STORED": 1, - "NOT_STORED": 2, - "ERROR": 3, - "EXISTS": 4, -} - -func (x MemcacheSetResponse_SetStatusCode) Enum() *MemcacheSetResponse_SetStatusCode { - p := new(MemcacheSetResponse_SetStatusCode) - *p = x - return p -} -func (x MemcacheSetResponse_SetStatusCode) String() string { - return proto.EnumName(MemcacheSetResponse_SetStatusCode_name, int32(x)) -} -func (x *MemcacheSetResponse_SetStatusCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MemcacheSetResponse_SetStatusCode_value, data, "MemcacheSetResponse_SetStatusCode") - if err != nil { - return err - } - *x = MemcacheSetResponse_SetStatusCode(value) - return nil -} - -type MemcacheDeleteResponse_DeleteStatusCode int32 - -const ( - MemcacheDeleteResponse_DELETED MemcacheDeleteResponse_DeleteStatusCode = 1 - MemcacheDeleteResponse_NOT_FOUND MemcacheDeleteResponse_DeleteStatusCode = 2 -) - -var MemcacheDeleteResponse_DeleteStatusCode_name = map[int32]string{ - 1: "DELETED", - 2: "NOT_FOUND", -} -var MemcacheDeleteResponse_DeleteStatusCode_value = map[string]int32{ - "DELETED": 1, - "NOT_FOUND": 2, -} - -func (x MemcacheDeleteResponse_DeleteStatusCode) Enum() *MemcacheDeleteResponse_DeleteStatusCode { - p := new(MemcacheDeleteResponse_DeleteStatusCode) - *p = x - return p -} -func (x MemcacheDeleteResponse_DeleteStatusCode) String() string { - return proto.EnumName(MemcacheDeleteResponse_DeleteStatusCode_name, int32(x)) -} -func (x *MemcacheDeleteResponse_DeleteStatusCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MemcacheDeleteResponse_DeleteStatusCode_value, data, "MemcacheDeleteResponse_DeleteStatusCode") - if err != nil { - return err - } - *x = MemcacheDeleteResponse_DeleteStatusCode(value) - return nil -} - -type MemcacheIncrementRequest_Direction int32 - -const ( - MemcacheIncrementRequest_INCREMENT MemcacheIncrementRequest_Direction = 1 - MemcacheIncrementRequest_DECREMENT MemcacheIncrementRequest_Direction = 2 -) - -var MemcacheIncrementRequest_Direction_name = map[int32]string{ - 1: "INCREMENT", - 2: "DECREMENT", -} -var MemcacheIncrementRequest_Direction_value = map[string]int32{ - "INCREMENT": 1, - "DECREMENT": 2, -} - -func (x MemcacheIncrementRequest_Direction) Enum() *MemcacheIncrementRequest_Direction { - p := new(MemcacheIncrementRequest_Direction) - *p = x - return p -} -func (x MemcacheIncrementRequest_Direction) String() string { - return proto.EnumName(MemcacheIncrementRequest_Direction_name, int32(x)) -} -func (x *MemcacheIncrementRequest_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MemcacheIncrementRequest_Direction_value, data, "MemcacheIncrementRequest_Direction") - if err != nil { - return err - } - *x = MemcacheIncrementRequest_Direction(value) - return nil -} - -type MemcacheIncrementResponse_IncrementStatusCode int32 - -const ( - MemcacheIncrementResponse_OK MemcacheIncrementResponse_IncrementStatusCode = 1 - MemcacheIncrementResponse_NOT_CHANGED MemcacheIncrementResponse_IncrementStatusCode = 2 - MemcacheIncrementResponse_ERROR MemcacheIncrementResponse_IncrementStatusCode = 3 -) - -var MemcacheIncrementResponse_IncrementStatusCode_name = map[int32]string{ - 1: "OK", - 2: "NOT_CHANGED", - 3: "ERROR", -} -var MemcacheIncrementResponse_IncrementStatusCode_value = map[string]int32{ - "OK": 1, - "NOT_CHANGED": 2, - "ERROR": 3, -} - -func (x MemcacheIncrementResponse_IncrementStatusCode) Enum() *MemcacheIncrementResponse_IncrementStatusCode { - p := new(MemcacheIncrementResponse_IncrementStatusCode) - *p = x - return p -} -func (x MemcacheIncrementResponse_IncrementStatusCode) String() string { - return proto.EnumName(MemcacheIncrementResponse_IncrementStatusCode_name, int32(x)) -} -func (x *MemcacheIncrementResponse_IncrementStatusCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MemcacheIncrementResponse_IncrementStatusCode_value, data, "MemcacheIncrementResponse_IncrementStatusCode") - if err != nil { - return err - } - *x = MemcacheIncrementResponse_IncrementStatusCode(value) - return nil -} - -type MemcacheServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheServiceError) Reset() { *m = MemcacheServiceError{} } -func (m *MemcacheServiceError) String() string { return proto.CompactTextString(m) } -func (*MemcacheServiceError) ProtoMessage() {} - -type AppOverride struct { - AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` - NumMemcachegBackends *int32 `protobuf:"varint,2,opt,name=num_memcacheg_backends" json:"num_memcacheg_backends,omitempty"` - IgnoreShardlock *bool `protobuf:"varint,3,opt,name=ignore_shardlock" json:"ignore_shardlock,omitempty"` - MemcachePoolHint *string `protobuf:"bytes,4,opt,name=memcache_pool_hint" json:"memcache_pool_hint,omitempty"` - MemcacheShardingStrategy []byte `protobuf:"bytes,5,opt,name=memcache_sharding_strategy" json:"memcache_sharding_strategy,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AppOverride) Reset() { *m = AppOverride{} } -func (m *AppOverride) String() string { return proto.CompactTextString(m) } -func (*AppOverride) ProtoMessage() {} - -func (m *AppOverride) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *AppOverride) GetNumMemcachegBackends() int32 { - if m != nil && m.NumMemcachegBackends != nil { - return *m.NumMemcachegBackends - } - return 0 -} - -func (m *AppOverride) GetIgnoreShardlock() bool { - if m != nil && m.IgnoreShardlock != nil { - return *m.IgnoreShardlock - } - return false -} - -func (m *AppOverride) GetMemcachePoolHint() string { - if m != nil && m.MemcachePoolHint != nil { - return *m.MemcachePoolHint - } - return "" -} - -func (m *AppOverride) GetMemcacheShardingStrategy() []byte { - if m != nil { - return m.MemcacheShardingStrategy - } - return nil -} - -type MemcacheGetRequest struct { - Key [][]byte `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - NameSpace *string `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"` - ForCas *bool `protobuf:"varint,4,opt,name=for_cas" json:"for_cas,omitempty"` - Override *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheGetRequest) Reset() { *m = MemcacheGetRequest{} } -func (m *MemcacheGetRequest) String() string { return proto.CompactTextString(m) } -func (*MemcacheGetRequest) ProtoMessage() {} - -func (m *MemcacheGetRequest) GetKey() [][]byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *MemcacheGetRequest) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *MemcacheGetRequest) GetForCas() bool { - if m != nil && m.ForCas != nil { - return *m.ForCas - } - return false -} - -func (m *MemcacheGetRequest) GetOverride() *AppOverride { - if m != nil { - return m.Override - } - return nil -} - -type MemcacheGetResponse struct { - Item []*MemcacheGetResponse_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheGetResponse) Reset() { *m = MemcacheGetResponse{} } -func (m *MemcacheGetResponse) String() string { return proto.CompactTextString(m) } -func (*MemcacheGetResponse) ProtoMessage() {} - -func (m *MemcacheGetResponse) GetItem() []*MemcacheGetResponse_Item { - if m != nil { - return m.Item - } - return nil -} - -type MemcacheGetResponse_Item struct { - Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"` - Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` - Flags *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"` - CasId *uint64 `protobuf:"fixed64,5,opt,name=cas_id" json:"cas_id,omitempty"` - ExpiresInSeconds *int32 `protobuf:"varint,6,opt,name=expires_in_seconds" json:"expires_in_seconds,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheGetResponse_Item) Reset() { *m = MemcacheGetResponse_Item{} } -func (m *MemcacheGetResponse_Item) String() string { return proto.CompactTextString(m) } -func (*MemcacheGetResponse_Item) ProtoMessage() {} - -func (m *MemcacheGetResponse_Item) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *MemcacheGetResponse_Item) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *MemcacheGetResponse_Item) GetFlags() uint32 { - if m != nil && m.Flags != nil { - return *m.Flags - } - return 0 -} - -func (m *MemcacheGetResponse_Item) GetCasId() uint64 { - if m != nil && m.CasId != nil { - return *m.CasId - } - return 0 -} - -func (m *MemcacheGetResponse_Item) GetExpiresInSeconds() int32 { - if m != nil && m.ExpiresInSeconds != nil { - return *m.ExpiresInSeconds - } - return 0 -} - -type MemcacheSetRequest struct { - Item []*MemcacheSetRequest_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"` - NameSpace *string `protobuf:"bytes,7,opt,name=name_space,def=" json:"name_space,omitempty"` - Override *AppOverride `protobuf:"bytes,10,opt,name=override" json:"override,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheSetRequest) Reset() { *m = MemcacheSetRequest{} } -func (m *MemcacheSetRequest) String() string { return proto.CompactTextString(m) } -func (*MemcacheSetRequest) ProtoMessage() {} - -func (m *MemcacheSetRequest) GetItem() []*MemcacheSetRequest_Item { - if m != nil { - return m.Item - } - return nil -} - -func (m *MemcacheSetRequest) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *MemcacheSetRequest) GetOverride() *AppOverride { - if m != nil { - return m.Override - } - return nil -} - -type MemcacheSetRequest_Item struct { - Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"` - Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` - Flags *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"` - SetPolicy *MemcacheSetRequest_SetPolicy `protobuf:"varint,5,opt,name=set_policy,enum=appengine.MemcacheSetRequest_SetPolicy,def=1" json:"set_policy,omitempty"` - ExpirationTime *uint32 `protobuf:"fixed32,6,opt,name=expiration_time,def=0" json:"expiration_time,omitempty"` - CasId *uint64 `protobuf:"fixed64,8,opt,name=cas_id" json:"cas_id,omitempty"` - ForCas *bool `protobuf:"varint,9,opt,name=for_cas" json:"for_cas,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheSetRequest_Item) Reset() { *m = MemcacheSetRequest_Item{} } -func (m *MemcacheSetRequest_Item) String() string { return proto.CompactTextString(m) } -func (*MemcacheSetRequest_Item) ProtoMessage() {} - -const Default_MemcacheSetRequest_Item_SetPolicy MemcacheSetRequest_SetPolicy = MemcacheSetRequest_SET -const Default_MemcacheSetRequest_Item_ExpirationTime uint32 = 0 - -func (m *MemcacheSetRequest_Item) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *MemcacheSetRequest_Item) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *MemcacheSetRequest_Item) GetFlags() uint32 { - if m != nil && m.Flags != nil { - return *m.Flags - } - return 0 -} - -func (m *MemcacheSetRequest_Item) GetSetPolicy() MemcacheSetRequest_SetPolicy { - if m != nil && m.SetPolicy != nil { - return *m.SetPolicy - } - return Default_MemcacheSetRequest_Item_SetPolicy -} - -func (m *MemcacheSetRequest_Item) GetExpirationTime() uint32 { - if m != nil && m.ExpirationTime != nil { - return *m.ExpirationTime - } - return Default_MemcacheSetRequest_Item_ExpirationTime -} - -func (m *MemcacheSetRequest_Item) GetCasId() uint64 { - if m != nil && m.CasId != nil { - return *m.CasId - } - return 0 -} - -func (m *MemcacheSetRequest_Item) GetForCas() bool { - if m != nil && m.ForCas != nil { - return *m.ForCas - } - return false -} - -type MemcacheSetResponse struct { - SetStatus []MemcacheSetResponse_SetStatusCode `protobuf:"varint,1,rep,name=set_status,enum=appengine.MemcacheSetResponse_SetStatusCode" json:"set_status,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheSetResponse) Reset() { *m = MemcacheSetResponse{} } -func (m *MemcacheSetResponse) String() string { return proto.CompactTextString(m) } -func (*MemcacheSetResponse) ProtoMessage() {} - -func (m *MemcacheSetResponse) GetSetStatus() []MemcacheSetResponse_SetStatusCode { - if m != nil { - return m.SetStatus - } - return nil -} - -type MemcacheDeleteRequest struct { - Item []*MemcacheDeleteRequest_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"` - NameSpace *string `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"` - Override *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheDeleteRequest) Reset() { *m = MemcacheDeleteRequest{} } -func (m *MemcacheDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*MemcacheDeleteRequest) ProtoMessage() {} - -func (m *MemcacheDeleteRequest) GetItem() []*MemcacheDeleteRequest_Item { - if m != nil { - return m.Item - } - return nil -} - -func (m *MemcacheDeleteRequest) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *MemcacheDeleteRequest) GetOverride() *AppOverride { - if m != nil { - return m.Override - } - return nil -} - -type MemcacheDeleteRequest_Item struct { - Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"` - DeleteTime *uint32 `protobuf:"fixed32,3,opt,name=delete_time,def=0" json:"delete_time,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheDeleteRequest_Item) Reset() { *m = MemcacheDeleteRequest_Item{} } -func (m *MemcacheDeleteRequest_Item) String() string { return proto.CompactTextString(m) } -func (*MemcacheDeleteRequest_Item) ProtoMessage() {} - -const Default_MemcacheDeleteRequest_Item_DeleteTime uint32 = 0 - -func (m *MemcacheDeleteRequest_Item) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *MemcacheDeleteRequest_Item) GetDeleteTime() uint32 { - if m != nil && m.DeleteTime != nil { - return *m.DeleteTime - } - return Default_MemcacheDeleteRequest_Item_DeleteTime -} - -type MemcacheDeleteResponse struct { - DeleteStatus []MemcacheDeleteResponse_DeleteStatusCode `protobuf:"varint,1,rep,name=delete_status,enum=appengine.MemcacheDeleteResponse_DeleteStatusCode" json:"delete_status,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheDeleteResponse) Reset() { *m = MemcacheDeleteResponse{} } -func (m *MemcacheDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*MemcacheDeleteResponse) ProtoMessage() {} - -func (m *MemcacheDeleteResponse) GetDeleteStatus() []MemcacheDeleteResponse_DeleteStatusCode { - if m != nil { - return m.DeleteStatus - } - return nil -} - -type MemcacheIncrementRequest struct { - Key []byte `protobuf:"bytes,1,req,name=key" json:"key,omitempty"` - NameSpace *string `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"` - Delta *uint64 `protobuf:"varint,2,opt,name=delta,def=1" json:"delta,omitempty"` - Direction *MemcacheIncrementRequest_Direction `protobuf:"varint,3,opt,name=direction,enum=appengine.MemcacheIncrementRequest_Direction,def=1" json:"direction,omitempty"` - InitialValue *uint64 `protobuf:"varint,5,opt,name=initial_value" json:"initial_value,omitempty"` - InitialFlags *uint32 `protobuf:"fixed32,6,opt,name=initial_flags" json:"initial_flags,omitempty"` - Override *AppOverride `protobuf:"bytes,7,opt,name=override" json:"override,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheIncrementRequest) Reset() { *m = MemcacheIncrementRequest{} } -func (m *MemcacheIncrementRequest) String() string { return proto.CompactTextString(m) } -func (*MemcacheIncrementRequest) ProtoMessage() {} - -const Default_MemcacheIncrementRequest_Delta uint64 = 1 -const Default_MemcacheIncrementRequest_Direction MemcacheIncrementRequest_Direction = MemcacheIncrementRequest_INCREMENT - -func (m *MemcacheIncrementRequest) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *MemcacheIncrementRequest) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *MemcacheIncrementRequest) GetDelta() uint64 { - if m != nil && m.Delta != nil { - return *m.Delta - } - return Default_MemcacheIncrementRequest_Delta -} - -func (m *MemcacheIncrementRequest) GetDirection() MemcacheIncrementRequest_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_MemcacheIncrementRequest_Direction -} - -func (m *MemcacheIncrementRequest) GetInitialValue() uint64 { - if m != nil && m.InitialValue != nil { - return *m.InitialValue - } - return 0 -} - -func (m *MemcacheIncrementRequest) GetInitialFlags() uint32 { - if m != nil && m.InitialFlags != nil { - return *m.InitialFlags - } - return 0 -} - -func (m *MemcacheIncrementRequest) GetOverride() *AppOverride { - if m != nil { - return m.Override - } - return nil -} - -type MemcacheIncrementResponse struct { - NewValue *uint64 `protobuf:"varint,1,opt,name=new_value" json:"new_value,omitempty"` - IncrementStatus *MemcacheIncrementResponse_IncrementStatusCode `protobuf:"varint,2,opt,name=increment_status,enum=appengine.MemcacheIncrementResponse_IncrementStatusCode" json:"increment_status,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheIncrementResponse) Reset() { *m = MemcacheIncrementResponse{} } -func (m *MemcacheIncrementResponse) String() string { return proto.CompactTextString(m) } -func (*MemcacheIncrementResponse) ProtoMessage() {} - -func (m *MemcacheIncrementResponse) GetNewValue() uint64 { - if m != nil && m.NewValue != nil { - return *m.NewValue - } - return 0 -} - -func (m *MemcacheIncrementResponse) GetIncrementStatus() MemcacheIncrementResponse_IncrementStatusCode { - if m != nil && m.IncrementStatus != nil { - return *m.IncrementStatus - } - return MemcacheIncrementResponse_OK -} - -type MemcacheBatchIncrementRequest struct { - NameSpace *string `protobuf:"bytes,1,opt,name=name_space,def=" json:"name_space,omitempty"` - Item []*MemcacheIncrementRequest `protobuf:"bytes,2,rep,name=item" json:"item,omitempty"` - Override *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheBatchIncrementRequest) Reset() { *m = MemcacheBatchIncrementRequest{} } -func (m *MemcacheBatchIncrementRequest) String() string { return proto.CompactTextString(m) } -func (*MemcacheBatchIncrementRequest) ProtoMessage() {} - -func (m *MemcacheBatchIncrementRequest) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *MemcacheBatchIncrementRequest) GetItem() []*MemcacheIncrementRequest { - if m != nil { - return m.Item - } - return nil -} - -func (m *MemcacheBatchIncrementRequest) GetOverride() *AppOverride { - if m != nil { - return m.Override - } - return nil -} - -type MemcacheBatchIncrementResponse struct { - Item []*MemcacheIncrementResponse `protobuf:"bytes,1,rep,name=item" json:"item,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheBatchIncrementResponse) Reset() { *m = MemcacheBatchIncrementResponse{} } -func (m *MemcacheBatchIncrementResponse) String() string { return proto.CompactTextString(m) } -func (*MemcacheBatchIncrementResponse) ProtoMessage() {} - -func (m *MemcacheBatchIncrementResponse) GetItem() []*MemcacheIncrementResponse { - if m != nil { - return m.Item - } - return nil -} - -type MemcacheFlushRequest struct { - Override *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheFlushRequest) Reset() { *m = MemcacheFlushRequest{} } -func (m *MemcacheFlushRequest) String() string { return proto.CompactTextString(m) } -func (*MemcacheFlushRequest) ProtoMessage() {} - -func (m *MemcacheFlushRequest) GetOverride() *AppOverride { - if m != nil { - return m.Override - } - return nil -} - -type MemcacheFlushResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheFlushResponse) Reset() { *m = MemcacheFlushResponse{} } -func (m *MemcacheFlushResponse) String() string { return proto.CompactTextString(m) } -func (*MemcacheFlushResponse) ProtoMessage() {} - -type MemcacheStatsRequest struct { - Override *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheStatsRequest) Reset() { *m = MemcacheStatsRequest{} } -func (m *MemcacheStatsRequest) String() string { return proto.CompactTextString(m) } -func (*MemcacheStatsRequest) ProtoMessage() {} - -func (m *MemcacheStatsRequest) GetOverride() *AppOverride { - if m != nil { - return m.Override - } - return nil -} - -type MergedNamespaceStats struct { - Hits *uint64 `protobuf:"varint,1,req,name=hits" json:"hits,omitempty"` - Misses *uint64 `protobuf:"varint,2,req,name=misses" json:"misses,omitempty"` - ByteHits *uint64 `protobuf:"varint,3,req,name=byte_hits" json:"byte_hits,omitempty"` - Items *uint64 `protobuf:"varint,4,req,name=items" json:"items,omitempty"` - Bytes *uint64 `protobuf:"varint,5,req,name=bytes" json:"bytes,omitempty"` - OldestItemAge *uint32 `protobuf:"fixed32,6,req,name=oldest_item_age" json:"oldest_item_age,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MergedNamespaceStats) Reset() { *m = MergedNamespaceStats{} } -func (m *MergedNamespaceStats) String() string { return proto.CompactTextString(m) } -func (*MergedNamespaceStats) ProtoMessage() {} - -func (m *MergedNamespaceStats) GetHits() uint64 { - if m != nil && m.Hits != nil { - return *m.Hits - } - return 0 -} - -func (m *MergedNamespaceStats) GetMisses() uint64 { - if m != nil && m.Misses != nil { - return *m.Misses - } - return 0 -} - -func (m *MergedNamespaceStats) GetByteHits() uint64 { - if m != nil && m.ByteHits != nil { - return *m.ByteHits - } - return 0 -} - -func (m *MergedNamespaceStats) GetItems() uint64 { - if m != nil && m.Items != nil { - return *m.Items - } - return 0 -} - -func (m *MergedNamespaceStats) GetBytes() uint64 { - if m != nil && m.Bytes != nil { - return *m.Bytes - } - return 0 -} - -func (m *MergedNamespaceStats) GetOldestItemAge() uint32 { - if m != nil && m.OldestItemAge != nil { - return *m.OldestItemAge - } - return 0 -} - -type MemcacheStatsResponse struct { - Stats *MergedNamespaceStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheStatsResponse) Reset() { *m = MemcacheStatsResponse{} } -func (m *MemcacheStatsResponse) String() string { return proto.CompactTextString(m) } -func (*MemcacheStatsResponse) ProtoMessage() {} - -func (m *MemcacheStatsResponse) GetStats() *MergedNamespaceStats { - if m != nil { - return m.Stats - } - return nil -} - -type MemcacheGrabTailRequest struct { - ItemCount *int32 `protobuf:"varint,1,req,name=item_count" json:"item_count,omitempty"` - NameSpace *string `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"` - Override *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheGrabTailRequest) Reset() { *m = MemcacheGrabTailRequest{} } -func (m *MemcacheGrabTailRequest) String() string { return proto.CompactTextString(m) } -func (*MemcacheGrabTailRequest) ProtoMessage() {} - -func (m *MemcacheGrabTailRequest) GetItemCount() int32 { - if m != nil && m.ItemCount != nil { - return *m.ItemCount - } - return 0 -} - -func (m *MemcacheGrabTailRequest) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *MemcacheGrabTailRequest) GetOverride() *AppOverride { - if m != nil { - return m.Override - } - return nil -} - -type MemcacheGrabTailResponse struct { - Item []*MemcacheGrabTailResponse_Item `protobuf:"group,1,rep,name=Item" json:"item,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheGrabTailResponse) Reset() { *m = MemcacheGrabTailResponse{} } -func (m *MemcacheGrabTailResponse) String() string { return proto.CompactTextString(m) } -func (*MemcacheGrabTailResponse) ProtoMessage() {} - -func (m *MemcacheGrabTailResponse) GetItem() []*MemcacheGrabTailResponse_Item { - if m != nil { - return m.Item - } - return nil -} - -type MemcacheGrabTailResponse_Item struct { - Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` - Flags *uint32 `protobuf:"fixed32,3,opt,name=flags" json:"flags,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MemcacheGrabTailResponse_Item) Reset() { *m = MemcacheGrabTailResponse_Item{} } -func (m *MemcacheGrabTailResponse_Item) String() string { return proto.CompactTextString(m) } -func (*MemcacheGrabTailResponse_Item) ProtoMessage() {} - -func (m *MemcacheGrabTailResponse_Item) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *MemcacheGrabTailResponse_Item) GetFlags() uint32 { - if m != nil && m.Flags != nil { - return *m.Flags - } - return 0 -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto b/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto deleted file mode 100644 index 5f0edcd..0000000 --- a/vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto +++ /dev/null @@ -1,165 +0,0 @@ -syntax = "proto2"; -option go_package = "memcache"; - -package appengine; - -message MemcacheServiceError { - enum ErrorCode { - OK = 0; - UNSPECIFIED_ERROR = 1; - NAMESPACE_NOT_SET = 2; - PERMISSION_DENIED = 3; - INVALID_VALUE = 6; - } -} - -message AppOverride { - required string app_id = 1; - - optional int32 num_memcacheg_backends = 2 [deprecated=true]; - optional bool ignore_shardlock = 3 [deprecated=true]; - optional string memcache_pool_hint = 4 [deprecated=true]; - optional bytes memcache_sharding_strategy = 5 [deprecated=true]; -} - -message MemcacheGetRequest { - repeated bytes key = 1; - optional string name_space = 2 [default = ""]; - optional bool for_cas = 4; - optional AppOverride override = 5; -} - -message MemcacheGetResponse { - repeated group Item = 1 { - required bytes key = 2; - required bytes value = 3; - optional fixed32 flags = 4; - optional fixed64 cas_id = 5; - optional int32 expires_in_seconds = 6; - } -} - -message MemcacheSetRequest { - enum SetPolicy { - SET = 1; - ADD = 2; - REPLACE = 3; - CAS = 4; - } - repeated group Item = 1 { - required bytes key = 2; - required bytes value = 3; - - optional fixed32 flags = 4; - optional SetPolicy set_policy = 5 [default = SET]; - optional fixed32 expiration_time = 6 [default = 0]; - - optional fixed64 cas_id = 8; - optional bool for_cas = 9; - } - optional string name_space = 7 [default = ""]; - optional AppOverride override = 10; -} - -message MemcacheSetResponse { - enum SetStatusCode { - STORED = 1; - NOT_STORED = 2; - ERROR = 3; - EXISTS = 4; - } - repeated SetStatusCode set_status = 1; -} - -message MemcacheDeleteRequest { - repeated group Item = 1 { - required bytes key = 2; - optional fixed32 delete_time = 3 [default = 0]; - } - optional string name_space = 4 [default = ""]; - optional AppOverride override = 5; -} - -message MemcacheDeleteResponse { - enum DeleteStatusCode { - DELETED = 1; - NOT_FOUND = 2; - } - repeated DeleteStatusCode delete_status = 1; -} - -message MemcacheIncrementRequest { - enum Direction { - INCREMENT = 1; - DECREMENT = 2; - } - required bytes key = 1; - optional string name_space = 4 [default = ""]; - - optional uint64 delta = 2 [default = 1]; - optional Direction direction = 3 [default = INCREMENT]; - - optional uint64 initial_value = 5; - optional fixed32 initial_flags = 6; - optional AppOverride override = 7; -} - -message MemcacheIncrementResponse { - enum IncrementStatusCode { - OK = 1; - NOT_CHANGED = 2; - ERROR = 3; - } - - optional uint64 new_value = 1; - optional IncrementStatusCode increment_status = 2; -} - -message MemcacheBatchIncrementRequest { - optional string name_space = 1 [default = ""]; - repeated MemcacheIncrementRequest item = 2; - optional AppOverride override = 3; -} - -message MemcacheBatchIncrementResponse { - repeated MemcacheIncrementResponse item = 1; -} - -message MemcacheFlushRequest { - optional AppOverride override = 1; -} - -message MemcacheFlushResponse { -} - -message MemcacheStatsRequest { - optional AppOverride override = 1; -} - -message MergedNamespaceStats { - required uint64 hits = 1; - required uint64 misses = 2; - required uint64 byte_hits = 3; - - required uint64 items = 4; - required uint64 bytes = 5; - - required fixed32 oldest_item_age = 6; -} - -message MemcacheStatsResponse { - optional MergedNamespaceStats stats = 1; -} - -message MemcacheGrabTailRequest { - required int32 item_count = 1; - optional string name_space = 2 [default = ""]; - optional AppOverride override = 3; -} - -message MemcacheGrabTailResponse { - repeated group Item = 1 { - required bytes value = 2; - optional fixed32 flags = 3; - } -} diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go deleted file mode 100644 index 9cc1f71..0000000 --- a/vendor/google.golang.org/appengine/internal/metadata.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file has code for accessing metadata. -// -// References: -// https://cloud.google.com/compute/docs/metadata - -import ( - "fmt" - "io/ioutil" - "log" - "net/http" - "net/url" -) - -const ( - metadataHost = "metadata" - metadataPath = "/computeMetadata/v1/" -) - -var ( - metadataRequestHeaders = http.Header{ - "Metadata-Flavor": []string{"Google"}, - } -) - -// TODO(dsymonds): Do we need to support default values, like Python? -func mustGetMetadata(key string) []byte { - b, err := getMetadata(key) - if err != nil { - log.Fatalf("Metadata fetch failed: %v", err) - } - return b -} - -func getMetadata(key string) ([]byte, error) { - // TODO(dsymonds): May need to use url.Parse to support keys with query args. - req := &http.Request{ - Method: "GET", - URL: &url.URL{ - Scheme: "http", - Host: metadataHost, - Path: metadataPath + key, - }, - Header: metadataRequestHeaders, - Host: metadataHost, - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode != 200 { - return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode) - } - return ioutil.ReadAll(resp.Body) -} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go deleted file mode 100644 index a0145ed..0000000 --- a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go +++ /dev/null @@ -1,375 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/modules/modules_service.proto -// DO NOT EDIT! - -/* -Package modules is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/modules/modules_service.proto - -It has these top-level messages: - ModulesServiceError - GetModulesRequest - GetModulesResponse - GetVersionsRequest - GetVersionsResponse - GetDefaultVersionRequest - GetDefaultVersionResponse - GetNumInstancesRequest - GetNumInstancesResponse - SetNumInstancesRequest - SetNumInstancesResponse - StartModuleRequest - StartModuleResponse - StopModuleRequest - StopModuleResponse - GetHostnameRequest - GetHostnameResponse -*/ -package modules - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type ModulesServiceError_ErrorCode int32 - -const ( - ModulesServiceError_OK ModulesServiceError_ErrorCode = 0 - ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1 - ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2 - ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3 - ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4 - ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5 -) - -var ModulesServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_MODULE", - 2: "INVALID_VERSION", - 3: "INVALID_INSTANCES", - 4: "TRANSIENT_ERROR", - 5: "UNEXPECTED_STATE", -} -var ModulesServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_MODULE": 1, - "INVALID_VERSION": 2, - "INVALID_INSTANCES": 3, - "TRANSIENT_ERROR": 4, - "UNEXPECTED_STATE": 5, -} - -func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode { - p := new(ModulesServiceError_ErrorCode) - *p = x - return p -} -func (x ModulesServiceError_ErrorCode) String() string { - return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x)) -} -func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode") - if err != nil { - return err - } - *x = ModulesServiceError_ErrorCode(value) - return nil -} - -type ModulesServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} } -func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) } -func (*ModulesServiceError) ProtoMessage() {} - -type GetModulesRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} } -func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) } -func (*GetModulesRequest) ProtoMessage() {} - -type GetModulesResponse struct { - Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} } -func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) } -func (*GetModulesResponse) ProtoMessage() {} - -func (m *GetModulesResponse) GetModule() []string { - if m != nil { - return m.Module - } - return nil -} - -type GetVersionsRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} } -func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetVersionsRequest) ProtoMessage() {} - -func (m *GetVersionsRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -type GetVersionsResponse struct { - Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} } -func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) } -func (*GetVersionsResponse) ProtoMessage() {} - -func (m *GetVersionsResponse) GetVersion() []string { - if m != nil { - return m.Version - } - return nil -} - -type GetDefaultVersionRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} } -func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } -func (*GetDefaultVersionRequest) ProtoMessage() {} - -func (m *GetDefaultVersionRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -type GetDefaultVersionResponse struct { - Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} } -func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) } -func (*GetDefaultVersionResponse) ProtoMessage() {} - -func (m *GetDefaultVersionResponse) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type GetNumInstancesRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} } -func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) } -func (*GetNumInstancesRequest) ProtoMessage() {} - -func (m *GetNumInstancesRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *GetNumInstancesRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type GetNumInstancesResponse struct { - Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} } -func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) } -func (*GetNumInstancesResponse) ProtoMessage() {} - -func (m *GetNumInstancesResponse) GetInstances() int64 { - if m != nil && m.Instances != nil { - return *m.Instances - } - return 0 -} - -type SetNumInstancesRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} } -func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) } -func (*SetNumInstancesRequest) ProtoMessage() {} - -func (m *SetNumInstancesRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *SetNumInstancesRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *SetNumInstancesRequest) GetInstances() int64 { - if m != nil && m.Instances != nil { - return *m.Instances - } - return 0 -} - -type SetNumInstancesResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} } -func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) } -func (*SetNumInstancesResponse) ProtoMessage() {} - -type StartModuleRequest struct { - Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} } -func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) } -func (*StartModuleRequest) ProtoMessage() {} - -func (m *StartModuleRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *StartModuleRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type StartModuleResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} } -func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) } -func (*StartModuleResponse) ProtoMessage() {} - -type StopModuleRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} } -func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) } -func (*StopModuleRequest) ProtoMessage() {} - -func (m *StopModuleRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *StopModuleRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type StopModuleResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} } -func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) } -func (*StopModuleResponse) ProtoMessage() {} - -type GetHostnameRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} } -func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) } -func (*GetHostnameRequest) ProtoMessage() {} - -func (m *GetHostnameRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *GetHostnameRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *GetHostnameRequest) GetInstance() string { - if m != nil && m.Instance != nil { - return *m.Instance - } - return "" -} - -type GetHostnameResponse struct { - Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} } -func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) } -func (*GetHostnameResponse) ProtoMessage() {} - -func (m *GetHostnameResponse) GetHostname() string { - if m != nil && m.Hostname != nil { - return *m.Hostname - } - return "" -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto deleted file mode 100644 index d29f006..0000000 --- a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto +++ /dev/null @@ -1,80 +0,0 @@ -syntax = "proto2"; -option go_package = "modules"; - -package appengine; - -message ModulesServiceError { - enum ErrorCode { - OK = 0; - INVALID_MODULE = 1; - INVALID_VERSION = 2; - INVALID_INSTANCES = 3; - TRANSIENT_ERROR = 4; - UNEXPECTED_STATE = 5; - } -} - -message GetModulesRequest { -} - -message GetModulesResponse { - repeated string module = 1; -} - -message GetVersionsRequest { - optional string module = 1; -} - -message GetVersionsResponse { - repeated string version = 1; -} - -message GetDefaultVersionRequest { - optional string module = 1; -} - -message GetDefaultVersionResponse { - required string version = 1; -} - -message GetNumInstancesRequest { - optional string module = 1; - optional string version = 2; -} - -message GetNumInstancesResponse { - required int64 instances = 1; -} - -message SetNumInstancesRequest { - optional string module = 1; - optional string version = 2; - required int64 instances = 3; -} - -message SetNumInstancesResponse {} - -message StartModuleRequest { - required string module = 1; - required string version = 2; -} - -message StartModuleResponse {} - -message StopModuleRequest { - optional string module = 1; - optional string version = 2; -} - -message StopModuleResponse {} - -message GetHostnameRequest { - optional string module = 1; - optional string version = 2; - optional string instance = 3; -} - -message GetHostnameResponse { - required string hostname = 1; -} - diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go deleted file mode 100644 index 3b94cf0..0000000 --- a/vendor/google.golang.org/appengine/internal/net.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file implements a network dialer that limits the number of concurrent connections. -// It is only used for API calls. - -import ( - "log" - "net" - "runtime" - "sync" - "time" -) - -var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable. - -func limitRelease() { - // non-blocking - select { - case <-limitSem: - default: - // This should not normally happen. - log.Print("appengine: unbalanced limitSem release!") - } -} - -func limitDial(network, addr string) (net.Conn, error) { - limitSem <- 1 - - // Dial with a timeout in case the API host is MIA. - // The connection should normally be very fast. - conn, err := net.DialTimeout(network, addr, 500*time.Millisecond) - if err != nil { - limitRelease() - return nil, err - } - lc := &limitConn{Conn: conn} - runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required - return lc, nil -} - -type limitConn struct { - close sync.Once - net.Conn -} - -func (lc *limitConn) Close() error { - defer lc.close.Do(func() { - limitRelease() - runtime.SetFinalizer(lc, nil) - }) - return lc.Conn.Close() -} diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh deleted file mode 100755 index 2fdb546..0000000 --- a/vendor/google.golang.org/appengine/internal/regen.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -e -# -# This script rebuilds the generated code for the protocol buffers. -# To run this you will need protoc and goprotobuf installed; -# see https://github.com/golang/protobuf for instructions. - -PKG=google.golang.org/appengine - -function die() { - echo 1>&2 $* - exit 1 -} - -# Sanity check that the right tools are accessible. -for tool in go protoc protoc-gen-go; do - q=$(which $tool) || die "didn't find $tool" - echo 1>&2 "$tool: $q" -done - -echo -n 1>&2 "finding package dir... " -pkgdir=$(go list -f '{{.Dir}}' $PKG) -echo 1>&2 $pkgdir -base=$(echo $pkgdir | sed "s,/$PKG\$,,") -echo 1>&2 "base: $base" -cd $base - -# Run protoc once per package. -for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do - echo 1>&2 "* $dir" - protoc --go_out=. $dir/*.proto -done - -for f in $(find $PKG/internal -name '*.pb.go'); do - # Remove proto.RegisterEnum calls. - # These cause duplicate registration panics when these packages - # are used on classic App Engine. proto.RegisterEnum only affects - # parsing the text format; we don't care about that. - # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17 - sed -i '/proto.RegisterEnum/d' $f -done diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go deleted file mode 100644 index 526bd39..0000000 --- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go +++ /dev/null @@ -1,231 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/remote_api/remote_api.proto -// DO NOT EDIT! - -/* -Package remote_api is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/remote_api/remote_api.proto - -It has these top-level messages: - Request - ApplicationError - RpcError - Response -*/ -package remote_api - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type RpcError_ErrorCode int32 - -const ( - RpcError_UNKNOWN RpcError_ErrorCode = 0 - RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1 - RpcError_PARSE_ERROR RpcError_ErrorCode = 2 - RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3 - RpcError_OVER_QUOTA RpcError_ErrorCode = 4 - RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5 - RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6 - RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7 - RpcError_BAD_REQUEST RpcError_ErrorCode = 8 - RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9 - RpcError_CANCELLED RpcError_ErrorCode = 10 - RpcError_REPLAY_ERROR RpcError_ErrorCode = 11 - RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12 -) - -var RpcError_ErrorCode_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CALL_NOT_FOUND", - 2: "PARSE_ERROR", - 3: "SECURITY_VIOLATION", - 4: "OVER_QUOTA", - 5: "REQUEST_TOO_LARGE", - 6: "CAPABILITY_DISABLED", - 7: "FEATURE_DISABLED", - 8: "BAD_REQUEST", - 9: "RESPONSE_TOO_LARGE", - 10: "CANCELLED", - 11: "REPLAY_ERROR", - 12: "DEADLINE_EXCEEDED", -} -var RpcError_ErrorCode_value = map[string]int32{ - "UNKNOWN": 0, - "CALL_NOT_FOUND": 1, - "PARSE_ERROR": 2, - "SECURITY_VIOLATION": 3, - "OVER_QUOTA": 4, - "REQUEST_TOO_LARGE": 5, - "CAPABILITY_DISABLED": 6, - "FEATURE_DISABLED": 7, - "BAD_REQUEST": 8, - "RESPONSE_TOO_LARGE": 9, - "CANCELLED": 10, - "REPLAY_ERROR": 11, - "DEADLINE_EXCEEDED": 12, -} - -func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode { - p := new(RpcError_ErrorCode) - *p = x - return p -} -func (x RpcError_ErrorCode) String() string { - return proto.EnumName(RpcError_ErrorCode_name, int32(x)) -} -func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode") - if err != nil { - return err - } - *x = RpcError_ErrorCode(value) - return nil -} - -type Request struct { - ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"` - Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"` - Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"` - RequestId *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} - -func (m *Request) GetServiceName() string { - if m != nil && m.ServiceName != nil { - return *m.ServiceName - } - return "" -} - -func (m *Request) GetMethod() string { - if m != nil && m.Method != nil { - return *m.Method - } - return "" -} - -func (m *Request) GetRequest() []byte { - if m != nil { - return m.Request - } - return nil -} - -func (m *Request) GetRequestId() string { - if m != nil && m.RequestId != nil { - return *m.RequestId - } - return "" -} - -type ApplicationError struct { - Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` - Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ApplicationError) Reset() { *m = ApplicationError{} } -func (m *ApplicationError) String() string { return proto.CompactTextString(m) } -func (*ApplicationError) ProtoMessage() {} - -func (m *ApplicationError) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *ApplicationError) GetDetail() string { - if m != nil && m.Detail != nil { - return *m.Detail - } - return "" -} - -type RpcError struct { - Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` - Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RpcError) Reset() { *m = RpcError{} } -func (m *RpcError) String() string { return proto.CompactTextString(m) } -func (*RpcError) ProtoMessage() {} - -func (m *RpcError) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *RpcError) GetDetail() string { - if m != nil && m.Detail != nil { - return *m.Detail - } - return "" -} - -type Response struct { - Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` - Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"` - ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"` - JavaException []byte `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"` - RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} - -func (m *Response) GetResponse() []byte { - if m != nil { - return m.Response - } - return nil -} - -func (m *Response) GetException() []byte { - if m != nil { - return m.Exception - } - return nil -} - -func (m *Response) GetApplicationError() *ApplicationError { - if m != nil { - return m.ApplicationError - } - return nil -} - -func (m *Response) GetJavaException() []byte { - if m != nil { - return m.JavaException - } - return nil -} - -func (m *Response) GetRpcError() *RpcError { - if m != nil { - return m.RpcError - } - return nil -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto deleted file mode 100644 index f21763a..0000000 --- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto2"; -option go_package = "remote_api"; - -package remote_api; - -message Request { - required string service_name = 2; - required string method = 3; - required bytes request = 4; - optional string request_id = 5; -} - -message ApplicationError { - required int32 code = 1; - required string detail = 2; -} - -message RpcError { - enum ErrorCode { - UNKNOWN = 0; - CALL_NOT_FOUND = 1; - PARSE_ERROR = 2; - SECURITY_VIOLATION = 3; - OVER_QUOTA = 4; - REQUEST_TOO_LARGE = 5; - CAPABILITY_DISABLED = 6; - FEATURE_DISABLED = 7; - BAD_REQUEST = 8; - RESPONSE_TOO_LARGE = 9; - CANCELLED = 10; - REPLAY_ERROR = 11; - DEADLINE_EXCEEDED = 12; - } - required int32 code = 1; - optional string detail = 2; -} - -message Response { - optional bytes response = 1; - optional bytes exception = 2; - optional ApplicationError application_error = 3; - optional bytes java_exception = 4; - optional RpcError rpc_error = 5; -} diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go deleted file mode 100644 index 28a6d18..0000000 --- a/vendor/google.golang.org/appengine/internal/transaction.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file implements hooks for applying datastore transactions. - -import ( - "errors" - "reflect" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" - - basepb "google.golang.org/appengine/internal/base" - pb "google.golang.org/appengine/internal/datastore" -) - -var transactionSetters = make(map[reflect.Type]reflect.Value) - -// RegisterTransactionSetter registers a function that sets transaction information -// in a protocol buffer message. f should be a function with two arguments, -// the first being a protocol buffer type, and the second being *datastore.Transaction. -func RegisterTransactionSetter(f interface{}) { - v := reflect.ValueOf(f) - transactionSetters[v.Type().In(0)] = v -} - -// applyTransaction applies the transaction t to message pb -// by using the relevant setter passed to RegisterTransactionSetter. -func applyTransaction(pb proto.Message, t *pb.Transaction) { - v := reflect.ValueOf(pb) - if f, ok := transactionSetters[v.Type()]; ok { - f.Call([]reflect.Value{v, reflect.ValueOf(t)}) - } -} - -var transactionKey = "used for *Transaction" - -func transactionFromContext(ctx netcontext.Context) *transaction { - t, _ := ctx.Value(&transactionKey).(*transaction) - return t -} - -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) -} - -type transaction struct { - transaction pb.Transaction - finished bool -} - -var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") - -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error { - if transactionFromContext(c) != nil { - return errors.New("nested transactions are not supported") - } - - // Begin the transaction. - t := &transaction{} - req := &pb.BeginTransactionRequest{ - App: proto.String(FullyQualifiedAppID(c)), - } - if xg { - req.AllowMultipleEg = proto.Bool(true) - } - if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil { - return err - } - - // Call f, rolling back the transaction if f returns a non-nil error, or panics. - // The panic is not recovered. - defer func() { - if t.finished { - return - } - t.finished = true - // Ignore the error return value, since we are already returning a non-nil - // error (or we're panicking). - Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{}) - }() - if err := f(withTransaction(c, t)); err != nil { - return err - } - t.finished = true - - // Commit the transaction. - res := &pb.CommitResponse{} - err := Call(c, "datastore_v3", "Commit", &t.transaction, res) - if ae, ok := err.(*APIError); ok { - /* TODO: restore this conditional - if appengine.IsDevAppServer() { - */ - // The Python Dev AppServer raises an ApplicationError with error code 2 (which is - // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.". - if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." { - return ErrConcurrentTransaction - } - if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) { - return ErrConcurrentTransaction - } - } - return err -} diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go deleted file mode 100644 index af463fb..0000000 --- a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go +++ /dev/null @@ -1,355 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto -// DO NOT EDIT! - -/* -Package urlfetch is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto - -It has these top-level messages: - URLFetchServiceError - URLFetchRequest - URLFetchResponse -*/ -package urlfetch - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type URLFetchServiceError_ErrorCode int32 - -const ( - URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0 - URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1 - URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2 - URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3 - URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4 - URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5 - URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6 - URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7 - URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8 - URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9 - URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10 - URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11 - URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12 -) - -var URLFetchServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_URL", - 2: "FETCH_ERROR", - 3: "UNSPECIFIED_ERROR", - 4: "RESPONSE_TOO_LARGE", - 5: "DEADLINE_EXCEEDED", - 6: "SSL_CERTIFICATE_ERROR", - 7: "DNS_ERROR", - 8: "CLOSED", - 9: "INTERNAL_TRANSIENT_ERROR", - 10: "TOO_MANY_REDIRECTS", - 11: "MALFORMED_REPLY", - 12: "CONNECTION_ERROR", -} -var URLFetchServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_URL": 1, - "FETCH_ERROR": 2, - "UNSPECIFIED_ERROR": 3, - "RESPONSE_TOO_LARGE": 4, - "DEADLINE_EXCEEDED": 5, - "SSL_CERTIFICATE_ERROR": 6, - "DNS_ERROR": 7, - "CLOSED": 8, - "INTERNAL_TRANSIENT_ERROR": 9, - "TOO_MANY_REDIRECTS": 10, - "MALFORMED_REPLY": 11, - "CONNECTION_ERROR": 12, -} - -func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode { - p := new(URLFetchServiceError_ErrorCode) - *p = x - return p -} -func (x URLFetchServiceError_ErrorCode) String() string { - return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x)) -} -func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode") - if err != nil { - return err - } - *x = URLFetchServiceError_ErrorCode(value) - return nil -} - -type URLFetchRequest_RequestMethod int32 - -const ( - URLFetchRequest_GET URLFetchRequest_RequestMethod = 1 - URLFetchRequest_POST URLFetchRequest_RequestMethod = 2 - URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3 - URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4 - URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5 - URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6 -) - -var URLFetchRequest_RequestMethod_name = map[int32]string{ - 1: "GET", - 2: "POST", - 3: "HEAD", - 4: "PUT", - 5: "DELETE", - 6: "PATCH", -} -var URLFetchRequest_RequestMethod_value = map[string]int32{ - "GET": 1, - "POST": 2, - "HEAD": 3, - "PUT": 4, - "DELETE": 5, - "PATCH": 6, -} - -func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod { - p := new(URLFetchRequest_RequestMethod) - *p = x - return p -} -func (x URLFetchRequest_RequestMethod) String() string { - return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x)) -} -func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod") - if err != nil { - return err - } - *x = URLFetchRequest_RequestMethod(value) - return nil -} - -type URLFetchServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} } -func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) } -func (*URLFetchServiceError) ProtoMessage() {} - -type URLFetchRequest struct { - Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"` - Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"` - Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"` - Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"` - FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"` - Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"` - MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} } -func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) } -func (*URLFetchRequest) ProtoMessage() {} - -const Default_URLFetchRequest_FollowRedirects bool = true -const Default_URLFetchRequest_MustValidateServerCertificate bool = true - -func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod { - if m != nil && m.Method != nil { - return *m.Method - } - return URLFetchRequest_GET -} - -func (m *URLFetchRequest) GetUrl() string { - if m != nil && m.Url != nil { - return *m.Url - } - return "" -} - -func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header { - if m != nil { - return m.Header - } - return nil -} - -func (m *URLFetchRequest) GetPayload() []byte { - if m != nil { - return m.Payload - } - return nil -} - -func (m *URLFetchRequest) GetFollowRedirects() bool { - if m != nil && m.FollowRedirects != nil { - return *m.FollowRedirects - } - return Default_URLFetchRequest_FollowRedirects -} - -func (m *URLFetchRequest) GetDeadline() float64 { - if m != nil && m.Deadline != nil { - return *m.Deadline - } - return 0 -} - -func (m *URLFetchRequest) GetMustValidateServerCertificate() bool { - if m != nil && m.MustValidateServerCertificate != nil { - return *m.MustValidateServerCertificate - } - return Default_URLFetchRequest_MustValidateServerCertificate -} - -type URLFetchRequest_Header struct { - Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` - Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} } -func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) } -func (*URLFetchRequest_Header) ProtoMessage() {} - -func (m *URLFetchRequest_Header) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *URLFetchRequest_Header) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type URLFetchResponse struct { - Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"` - StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"` - Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"` - ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"` - ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"` - ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"` - FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"` - ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"` - ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"` - ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} } -func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) } -func (*URLFetchResponse) ProtoMessage() {} - -const Default_URLFetchResponse_ContentWasTruncated bool = false -const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0 -const Default_URLFetchResponse_ApiBytesSent int64 = 0 -const Default_URLFetchResponse_ApiBytesReceived int64 = 0 - -func (m *URLFetchResponse) GetContent() []byte { - if m != nil { - return m.Content - } - return nil -} - -func (m *URLFetchResponse) GetStatusCode() int32 { - if m != nil && m.StatusCode != nil { - return *m.StatusCode - } - return 0 -} - -func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header { - if m != nil { - return m.Header - } - return nil -} - -func (m *URLFetchResponse) GetContentWasTruncated() bool { - if m != nil && m.ContentWasTruncated != nil { - return *m.ContentWasTruncated - } - return Default_URLFetchResponse_ContentWasTruncated -} - -func (m *URLFetchResponse) GetExternalBytesSent() int64 { - if m != nil && m.ExternalBytesSent != nil { - return *m.ExternalBytesSent - } - return 0 -} - -func (m *URLFetchResponse) GetExternalBytesReceived() int64 { - if m != nil && m.ExternalBytesReceived != nil { - return *m.ExternalBytesReceived - } - return 0 -} - -func (m *URLFetchResponse) GetFinalUrl() string { - if m != nil && m.FinalUrl != nil { - return *m.FinalUrl - } - return "" -} - -func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 { - if m != nil && m.ApiCpuMilliseconds != nil { - return *m.ApiCpuMilliseconds - } - return Default_URLFetchResponse_ApiCpuMilliseconds -} - -func (m *URLFetchResponse) GetApiBytesSent() int64 { - if m != nil && m.ApiBytesSent != nil { - return *m.ApiBytesSent - } - return Default_URLFetchResponse_ApiBytesSent -} - -func (m *URLFetchResponse) GetApiBytesReceived() int64 { - if m != nil && m.ApiBytesReceived != nil { - return *m.ApiBytesReceived - } - return Default_URLFetchResponse_ApiBytesReceived -} - -type URLFetchResponse_Header struct { - Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` - Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} } -func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) } -func (*URLFetchResponse_Header) ProtoMessage() {} - -func (m *URLFetchResponse_Header) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *URLFetchResponse_Header) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto deleted file mode 100644 index f695edf..0000000 --- a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto2"; -option go_package = "urlfetch"; - -package appengine; - -message URLFetchServiceError { - enum ErrorCode { - OK = 0; - INVALID_URL = 1; - FETCH_ERROR = 2; - UNSPECIFIED_ERROR = 3; - RESPONSE_TOO_LARGE = 4; - DEADLINE_EXCEEDED = 5; - SSL_CERTIFICATE_ERROR = 6; - DNS_ERROR = 7; - CLOSED = 8; - INTERNAL_TRANSIENT_ERROR = 9; - TOO_MANY_REDIRECTS = 10; - MALFORMED_REPLY = 11; - CONNECTION_ERROR = 12; - } -} - -message URLFetchRequest { - enum RequestMethod { - GET = 1; - POST = 2; - HEAD = 3; - PUT = 4; - DELETE = 5; - PATCH = 6; - } - required RequestMethod Method = 1; - required string Url = 2; - repeated group Header = 3 { - required string Key = 4; - required string Value = 5; - } - optional bytes Payload = 6 [ctype=CORD]; - - optional bool FollowRedirects = 7 [default=true]; - - optional double Deadline = 8; - - optional bool MustValidateServerCertificate = 9 [default=true]; -} - -message URLFetchResponse { - optional bytes Content = 1; - required int32 StatusCode = 2; - repeated group Header = 3 { - required string Key = 4; - required string Value = 5; - } - optional bool ContentWasTruncated = 6 [default=false]; - optional int64 ExternalBytesSent = 7; - optional int64 ExternalBytesReceived = 8; - - optional string FinalUrl = 9; - - optional int64 ApiCpuMilliseconds = 10 [default=0]; - optional int64 ApiBytesSent = 11 [default=0]; - optional int64 ApiBytesReceived = 12 [default=0]; -} diff --git a/vendor/google.golang.org/appengine/internal/user/user_service.pb.go b/vendor/google.golang.org/appengine/internal/user/user_service.pb.go deleted file mode 100644 index 6b52ffc..0000000 --- a/vendor/google.golang.org/appengine/internal/user/user_service.pb.go +++ /dev/null @@ -1,289 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/user/user_service.proto -// DO NOT EDIT! - -/* -Package user is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/user/user_service.proto - -It has these top-level messages: - UserServiceError - CreateLoginURLRequest - CreateLoginURLResponse - CreateLogoutURLRequest - CreateLogoutURLResponse - GetOAuthUserRequest - GetOAuthUserResponse - CheckOAuthSignatureRequest - CheckOAuthSignatureResponse -*/ -package user - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type UserServiceError_ErrorCode int32 - -const ( - UserServiceError_OK UserServiceError_ErrorCode = 0 - UserServiceError_REDIRECT_URL_TOO_LONG UserServiceError_ErrorCode = 1 - UserServiceError_NOT_ALLOWED UserServiceError_ErrorCode = 2 - UserServiceError_OAUTH_INVALID_TOKEN UserServiceError_ErrorCode = 3 - UserServiceError_OAUTH_INVALID_REQUEST UserServiceError_ErrorCode = 4 - UserServiceError_OAUTH_ERROR UserServiceError_ErrorCode = 5 -) - -var UserServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "REDIRECT_URL_TOO_LONG", - 2: "NOT_ALLOWED", - 3: "OAUTH_INVALID_TOKEN", - 4: "OAUTH_INVALID_REQUEST", - 5: "OAUTH_ERROR", -} -var UserServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "REDIRECT_URL_TOO_LONG": 1, - "NOT_ALLOWED": 2, - "OAUTH_INVALID_TOKEN": 3, - "OAUTH_INVALID_REQUEST": 4, - "OAUTH_ERROR": 5, -} - -func (x UserServiceError_ErrorCode) Enum() *UserServiceError_ErrorCode { - p := new(UserServiceError_ErrorCode) - *p = x - return p -} -func (x UserServiceError_ErrorCode) String() string { - return proto.EnumName(UserServiceError_ErrorCode_name, int32(x)) -} -func (x *UserServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(UserServiceError_ErrorCode_value, data, "UserServiceError_ErrorCode") - if err != nil { - return err - } - *x = UserServiceError_ErrorCode(value) - return nil -} - -type UserServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *UserServiceError) Reset() { *m = UserServiceError{} } -func (m *UserServiceError) String() string { return proto.CompactTextString(m) } -func (*UserServiceError) ProtoMessage() {} - -type CreateLoginURLRequest struct { - DestinationUrl *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"` - AuthDomain *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"` - FederatedIdentity *string `protobuf:"bytes,3,opt,name=federated_identity,def=" json:"federated_identity,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateLoginURLRequest) Reset() { *m = CreateLoginURLRequest{} } -func (m *CreateLoginURLRequest) String() string { return proto.CompactTextString(m) } -func (*CreateLoginURLRequest) ProtoMessage() {} - -func (m *CreateLoginURLRequest) GetDestinationUrl() string { - if m != nil && m.DestinationUrl != nil { - return *m.DestinationUrl - } - return "" -} - -func (m *CreateLoginURLRequest) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *CreateLoginURLRequest) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -type CreateLoginURLResponse struct { - LoginUrl *string `protobuf:"bytes,1,req,name=login_url" json:"login_url,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateLoginURLResponse) Reset() { *m = CreateLoginURLResponse{} } -func (m *CreateLoginURLResponse) String() string { return proto.CompactTextString(m) } -func (*CreateLoginURLResponse) ProtoMessage() {} - -func (m *CreateLoginURLResponse) GetLoginUrl() string { - if m != nil && m.LoginUrl != nil { - return *m.LoginUrl - } - return "" -} - -type CreateLogoutURLRequest struct { - DestinationUrl *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"` - AuthDomain *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateLogoutURLRequest) Reset() { *m = CreateLogoutURLRequest{} } -func (m *CreateLogoutURLRequest) String() string { return proto.CompactTextString(m) } -func (*CreateLogoutURLRequest) ProtoMessage() {} - -func (m *CreateLogoutURLRequest) GetDestinationUrl() string { - if m != nil && m.DestinationUrl != nil { - return *m.DestinationUrl - } - return "" -} - -func (m *CreateLogoutURLRequest) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -type CreateLogoutURLResponse struct { - LogoutUrl *string `protobuf:"bytes,1,req,name=logout_url" json:"logout_url,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateLogoutURLResponse) Reset() { *m = CreateLogoutURLResponse{} } -func (m *CreateLogoutURLResponse) String() string { return proto.CompactTextString(m) } -func (*CreateLogoutURLResponse) ProtoMessage() {} - -func (m *CreateLogoutURLResponse) GetLogoutUrl() string { - if m != nil && m.LogoutUrl != nil { - return *m.LogoutUrl - } - return "" -} - -type GetOAuthUserRequest struct { - Scope *string `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"` - Scopes []string `protobuf:"bytes,2,rep,name=scopes" json:"scopes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetOAuthUserRequest) Reset() { *m = GetOAuthUserRequest{} } -func (m *GetOAuthUserRequest) String() string { return proto.CompactTextString(m) } -func (*GetOAuthUserRequest) ProtoMessage() {} - -func (m *GetOAuthUserRequest) GetScope() string { - if m != nil && m.Scope != nil { - return *m.Scope - } - return "" -} - -func (m *GetOAuthUserRequest) GetScopes() []string { - if m != nil { - return m.Scopes - } - return nil -} - -type GetOAuthUserResponse struct { - Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` - UserId *string `protobuf:"bytes,2,req,name=user_id" json:"user_id,omitempty"` - AuthDomain *string `protobuf:"bytes,3,req,name=auth_domain" json:"auth_domain,omitempty"` - UserOrganization *string `protobuf:"bytes,4,opt,name=user_organization,def=" json:"user_organization,omitempty"` - IsAdmin *bool `protobuf:"varint,5,opt,name=is_admin,def=0" json:"is_admin,omitempty"` - ClientId *string `protobuf:"bytes,6,opt,name=client_id,def=" json:"client_id,omitempty"` - Scopes []string `protobuf:"bytes,7,rep,name=scopes" json:"scopes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetOAuthUserResponse) Reset() { *m = GetOAuthUserResponse{} } -func (m *GetOAuthUserResponse) String() string { return proto.CompactTextString(m) } -func (*GetOAuthUserResponse) ProtoMessage() {} - -const Default_GetOAuthUserResponse_IsAdmin bool = false - -func (m *GetOAuthUserResponse) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *GetOAuthUserResponse) GetUserId() string { - if m != nil && m.UserId != nil { - return *m.UserId - } - return "" -} - -func (m *GetOAuthUserResponse) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *GetOAuthUserResponse) GetUserOrganization() string { - if m != nil && m.UserOrganization != nil { - return *m.UserOrganization - } - return "" -} - -func (m *GetOAuthUserResponse) GetIsAdmin() bool { - if m != nil && m.IsAdmin != nil { - return *m.IsAdmin - } - return Default_GetOAuthUserResponse_IsAdmin -} - -func (m *GetOAuthUserResponse) GetClientId() string { - if m != nil && m.ClientId != nil { - return *m.ClientId - } - return "" -} - -func (m *GetOAuthUserResponse) GetScopes() []string { - if m != nil { - return m.Scopes - } - return nil -} - -type CheckOAuthSignatureRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *CheckOAuthSignatureRequest) Reset() { *m = CheckOAuthSignatureRequest{} } -func (m *CheckOAuthSignatureRequest) String() string { return proto.CompactTextString(m) } -func (*CheckOAuthSignatureRequest) ProtoMessage() {} - -type CheckOAuthSignatureResponse struct { - OauthConsumerKey *string `protobuf:"bytes,1,req,name=oauth_consumer_key" json:"oauth_consumer_key,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CheckOAuthSignatureResponse) Reset() { *m = CheckOAuthSignatureResponse{} } -func (m *CheckOAuthSignatureResponse) String() string { return proto.CompactTextString(m) } -func (*CheckOAuthSignatureResponse) ProtoMessage() {} - -func (m *CheckOAuthSignatureResponse) GetOauthConsumerKey() string { - if m != nil && m.OauthConsumerKey != nil { - return *m.OauthConsumerKey - } - return "" -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/user/user_service.proto b/vendor/google.golang.org/appengine/internal/user/user_service.proto deleted file mode 100644 index f3e9693..0000000 --- a/vendor/google.golang.org/appengine/internal/user/user_service.proto +++ /dev/null @@ -1,58 +0,0 @@ -syntax = "proto2"; -option go_package = "user"; - -package appengine; - -message UserServiceError { - enum ErrorCode { - OK = 0; - REDIRECT_URL_TOO_LONG = 1; - NOT_ALLOWED = 2; - OAUTH_INVALID_TOKEN = 3; - OAUTH_INVALID_REQUEST = 4; - OAUTH_ERROR = 5; - } -} - -message CreateLoginURLRequest { - required string destination_url = 1; - optional string auth_domain = 2; - optional string federated_identity = 3 [default = ""]; -} - -message CreateLoginURLResponse { - required string login_url = 1; -} - -message CreateLogoutURLRequest { - required string destination_url = 1; - optional string auth_domain = 2; -} - -message CreateLogoutURLResponse { - required string logout_url = 1; -} - -message GetOAuthUserRequest { - optional string scope = 1; - - repeated string scopes = 2; -} - -message GetOAuthUserResponse { - required string email = 1; - required string user_id = 2; - required string auth_domain = 3; - optional string user_organization = 4 [default = ""]; - optional bool is_admin = 5 [default = false]; - optional string client_id = 6 [default = ""]; - - repeated string scopes = 7; -} - -message CheckOAuthSignatureRequest { -} - -message CheckOAuthSignatureResponse { - required string oauth_consumer_key = 1; -} diff --git a/vendor/google.golang.org/appengine/log/api.go b/vendor/google.golang.org/appengine/log/api.go deleted file mode 100644 index 24d5860..0000000 --- a/vendor/google.golang.org/appengine/log/api.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package log - -// This file implements the logging API. - -import ( - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// Debugf formats its arguments according to the format, analogous to fmt.Printf, -// and records the text as a log message at Debug level. The message will be associated -// with the request linked with the provided context. -func Debugf(ctx context.Context, format string, args ...interface{}) { - internal.Logf(ctx, 0, format, args...) -} - -// Infof is like Debugf, but at Info level. -func Infof(ctx context.Context, format string, args ...interface{}) { - internal.Logf(ctx, 1, format, args...) -} - -// Warningf is like Debugf, but at Warning level. -func Warningf(ctx context.Context, format string, args ...interface{}) { - internal.Logf(ctx, 2, format, args...) -} - -// Errorf is like Debugf, but at Error level. -func Errorf(ctx context.Context, format string, args ...interface{}) { - internal.Logf(ctx, 3, format, args...) -} - -// Criticalf is like Debugf, but at Critical level. -func Criticalf(ctx context.Context, format string, args ...interface{}) { - internal.Logf(ctx, 4, format, args...) -} diff --git a/vendor/google.golang.org/appengine/log/log.go b/vendor/google.golang.org/appengine/log/log.go deleted file mode 100644 index b54fe47..0000000 --- a/vendor/google.golang.org/appengine/log/log.go +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -/* -Package log provides the means of querying an application's logs from -within an App Engine application. - -Example: - c := appengine.NewContext(r) - query := &log.Query{ - AppLogs: true, - Versions: []string{"1"}, - } - - for results := query.Run(c); ; { - record, err := results.Next() - if err == log.Done { - log.Infof(c, "Done processing results") - break - } - if err != nil { - log.Errorf(c, "Failed to retrieve next log: %v", err) - break - } - log.Infof(c, "Saw record %v", record) - } -*/ -package log // import "google.golang.org/appengine/log" - -import ( - "errors" - "fmt" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine" - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/log" -) - -// Query defines a logs query. -type Query struct { - // Start time specifies the earliest log to return (inclusive). - StartTime time.Time - - // End time specifies the latest log to return (exclusive). - EndTime time.Time - - // Offset specifies a position within the log stream to resume reading from, - // and should come from a previously returned Record's field of the same name. - Offset []byte - - // Incomplete controls whether active (incomplete) requests should be included. - Incomplete bool - - // AppLogs indicates if application-level logs should be included. - AppLogs bool - - // ApplyMinLevel indicates if MinLevel should be used to filter results. - ApplyMinLevel bool - - // If ApplyMinLevel is true, only logs for requests with at least one - // application log of MinLevel or higher will be returned. - MinLevel int - - // Versions is the major version IDs whose logs should be retrieved. - // Logs for specific modules can be retrieved by the specifying versions - // in the form "module:version"; the default module is used if no module - // is specified. - Versions []string - - // A list of requests to search for instead of a time-based scan. Cannot be - // combined with filtering options such as StartTime, EndTime, Offset, - // Incomplete, ApplyMinLevel, or Versions. - RequestIDs []string -} - -// AppLog represents a single application-level log. -type AppLog struct { - Time time.Time - Level int - Message string -} - -// Record contains all the information for a single web request. -type Record struct { - AppID string - ModuleID string - VersionID string - RequestID []byte - IP string - Nickname string - AppEngineRelease string - - // The time when this request started. - StartTime time.Time - - // The time when this request finished. - EndTime time.Time - - // Opaque cursor into the result stream. - Offset []byte - - // The time required to process the request. - Latency time.Duration - MCycles int64 - Method string - Resource string - HTTPVersion string - Status int32 - - // The size of the request sent back to the client, in bytes. - ResponseSize int64 - Referrer string - UserAgent string - URLMapEntry string - Combined string - Host string - - // The estimated cost of this request, in dollars. - Cost float64 - TaskQueueName string - TaskName string - WasLoadingRequest bool - PendingTime time.Duration - Finished bool - AppLogs []AppLog - - // Mostly-unique identifier for the instance that handled the request if available. - InstanceID string -} - -// Result represents the result of a query. -type Result struct { - logs []*Record - context context.Context - request *pb.LogReadRequest - resultsSeen bool - err error -} - -// Next returns the next log record, -func (qr *Result) Next() (*Record, error) { - if qr.err != nil { - return nil, qr.err - } - if len(qr.logs) > 0 { - lr := qr.logs[0] - qr.logs = qr.logs[1:] - return lr, nil - } - - if qr.request.Offset == nil && qr.resultsSeen { - return nil, Done - } - - if err := qr.run(); err != nil { - // Errors here may be retried, so don't store the error. - return nil, err - } - - return qr.Next() -} - -// Done is returned when a query iteration has completed. -var Done = errors.New("log: query has no more results") - -// protoToAppLogs takes as input an array of pointers to LogLines, the internal -// Protocol Buffer representation of a single application-level log, -// and converts it to an array of AppLogs, the external representation -// of an application-level log. -func protoToAppLogs(logLines []*pb.LogLine) []AppLog { - appLogs := make([]AppLog, len(logLines)) - - for i, line := range logLines { - appLogs[i] = AppLog{ - Time: time.Unix(0, *line.Time*1e3), - Level: int(*line.Level), - Message: *line.LogMessage, - } - } - - return appLogs -} - -// protoToRecord converts a RequestLog, the internal Protocol Buffer -// representation of a single request-level log, to a Record, its -// corresponding external representation. -func protoToRecord(rl *pb.RequestLog) *Record { - offset, err := proto.Marshal(rl.Offset) - if err != nil { - offset = nil - } - return &Record{ - AppID: *rl.AppId, - ModuleID: rl.GetModuleId(), - VersionID: *rl.VersionId, - RequestID: rl.RequestId, - Offset: offset, - IP: *rl.Ip, - Nickname: rl.GetNickname(), - AppEngineRelease: string(rl.GetAppEngineRelease()), - StartTime: time.Unix(0, *rl.StartTime*1e3), - EndTime: time.Unix(0, *rl.EndTime*1e3), - Latency: time.Duration(*rl.Latency) * time.Microsecond, - MCycles: *rl.Mcycles, - Method: *rl.Method, - Resource: *rl.Resource, - HTTPVersion: *rl.HttpVersion, - Status: *rl.Status, - ResponseSize: *rl.ResponseSize, - Referrer: rl.GetReferrer(), - UserAgent: rl.GetUserAgent(), - URLMapEntry: *rl.UrlMapEntry, - Combined: *rl.Combined, - Host: rl.GetHost(), - Cost: rl.GetCost(), - TaskQueueName: rl.GetTaskQueueName(), - TaskName: rl.GetTaskName(), - WasLoadingRequest: rl.GetWasLoadingRequest(), - PendingTime: time.Duration(rl.GetPendingTime()) * time.Microsecond, - Finished: rl.GetFinished(), - AppLogs: protoToAppLogs(rl.Line), - InstanceID: string(rl.GetCloneKey()), - } -} - -// Run starts a query for log records, which contain request and application -// level log information. -func (params *Query) Run(c context.Context) *Result { - req, err := makeRequest(params, internal.FullyQualifiedAppID(c), appengine.VersionID(c)) - return &Result{ - context: c, - request: req, - err: err, - } -} - -func makeRequest(params *Query, appID, versionID string) (*pb.LogReadRequest, error) { - req := &pb.LogReadRequest{} - req.AppId = &appID - if !params.StartTime.IsZero() { - req.StartTime = proto.Int64(params.StartTime.UnixNano() / 1e3) - } - if !params.EndTime.IsZero() { - req.EndTime = proto.Int64(params.EndTime.UnixNano() / 1e3) - } - if len(params.Offset) > 0 { - var offset pb.LogOffset - if err := proto.Unmarshal(params.Offset, &offset); err != nil { - return nil, fmt.Errorf("bad Offset: %v", err) - } - req.Offset = &offset - } - if params.Incomplete { - req.IncludeIncomplete = ¶ms.Incomplete - } - if params.AppLogs { - req.IncludeAppLogs = ¶ms.AppLogs - } - if params.ApplyMinLevel { - req.MinimumLogLevel = proto.Int32(int32(params.MinLevel)) - } - if params.Versions == nil { - // If no versions were specified, default to the default module at - // the major version being used by this module. - if i := strings.Index(versionID, "."); i >= 0 { - versionID = versionID[:i] - } - req.VersionId = []string{versionID} - } else { - req.ModuleVersion = make([]*pb.LogModuleVersion, 0, len(params.Versions)) - for _, v := range params.Versions { - var m *string - if i := strings.Index(v, ":"); i >= 0 { - m, v = proto.String(v[:i]), v[i+1:] - } - req.ModuleVersion = append(req.ModuleVersion, &pb.LogModuleVersion{ - ModuleId: m, - VersionId: proto.String(v), - }) - } - } - if params.RequestIDs != nil { - ids := make([][]byte, len(params.RequestIDs)) - for i, v := range params.RequestIDs { - ids[i] = []byte(v) - } - req.RequestId = ids - } - - return req, nil -} - -// run takes the query Result produced by a call to Run and updates it with -// more Records. The updated Result contains a new set of logs as well as an -// offset to where more logs can be found. We also convert the items in the -// response from their internal representations to external versions of the -// same structs. -func (r *Result) run() error { - res := &pb.LogReadResponse{} - if err := internal.Call(r.context, "logservice", "Read", r.request, res); err != nil { - return err - } - - r.logs = make([]*Record, len(res.Log)) - r.request.Offset = res.Offset - r.resultsSeen = true - - for i, log := range res.Log { - r.logs[i] = protoToRecord(log) - } - - return nil -} - -func init() { - internal.RegisterErrorCodeMap("logservice", pb.LogServiceError_ErrorCode_name) -} diff --git a/vendor/google.golang.org/appengine/memcache/memcache.go b/vendor/google.golang.org/appengine/memcache/memcache.go deleted file mode 100644 index d8eed4b..0000000 --- a/vendor/google.golang.org/appengine/memcache/memcache.go +++ /dev/null @@ -1,526 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package memcache provides a client for App Engine's distributed in-memory -// key-value store for small chunks of arbitrary data. -// -// The fundamental operations get and set items, keyed by a string. -// -// item0, err := memcache.Get(c, "key") -// if err != nil && err != memcache.ErrCacheMiss { -// return err -// } -// if err == nil { -// fmt.Fprintf(w, "memcache hit: Key=%q Val=[% x]\n", item0.Key, item0.Value) -// } else { -// fmt.Fprintf(w, "memcache miss\n") -// } -// -// and -// -// item1 := &memcache.Item{ -// Key: "foo", -// Value: []byte("bar"), -// } -// if err := memcache.Set(c, item1); err != nil { -// return err -// } -package memcache // import "google.golang.org/appengine/memcache" - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "errors" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine" - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/memcache" -) - -var ( - // ErrCacheMiss means that an operation failed - // because the item wasn't present. - ErrCacheMiss = errors.New("memcache: cache miss") - // ErrCASConflict means that a CompareAndSwap call failed due to the - // cached value being modified between the Get and the CompareAndSwap. - // If the cached value was simply evicted rather than replaced, - // ErrNotStored will be returned instead. - ErrCASConflict = errors.New("memcache: compare-and-swap conflict") - // ErrNoStats means that no statistics were available. - ErrNoStats = errors.New("memcache: no statistics available") - // ErrNotStored means that a conditional write operation (i.e. Add or - // CompareAndSwap) failed because the condition was not satisfied. - ErrNotStored = errors.New("memcache: item not stored") - // ErrServerError means that a server error occurred. - ErrServerError = errors.New("memcache: server error") -) - -// Item is the unit of memcache gets and sets. -type Item struct { - // Key is the Item's key (250 bytes maximum). - Key string - // Value is the Item's value. - Value []byte - // Object is the Item's value for use with a Codec. - Object interface{} - // Flags are server-opaque flags whose semantics are entirely up to the - // App Engine app. - Flags uint32 - // Expiration is the maximum duration that the item will stay - // in the cache. - // The zero value means the Item has no expiration time. - // Subsecond precision is ignored. - // This is not set when getting items. - Expiration time.Duration - // casID is a client-opaque value used for compare-and-swap operations. - // Zero means that compare-and-swap is not used. - casID uint64 -} - -const ( - secondsIn30Years = 60 * 60 * 24 * 365 * 30 // from memcache server code - thirtyYears = time.Duration(secondsIn30Years) * time.Second -) - -// protoToItem converts a protocol buffer item to a Go struct. -func protoToItem(p *pb.MemcacheGetResponse_Item) *Item { - return &Item{ - Key: string(p.Key), - Value: p.Value, - Flags: p.GetFlags(), - casID: p.GetCasId(), - } -} - -// If err is an appengine.MultiError, return its first element. Otherwise, return err. -func singleError(err error) error { - if me, ok := err.(appengine.MultiError); ok { - return me[0] - } - return err -} - -// Get gets the item for the given key. ErrCacheMiss is returned for a memcache -// cache miss. The key must be at most 250 bytes in length. -func Get(c context.Context, key string) (*Item, error) { - m, err := GetMulti(c, []string{key}) - if err != nil { - return nil, err - } - if _, ok := m[key]; !ok { - return nil, ErrCacheMiss - } - return m[key], nil -} - -// GetMulti is a batch version of Get. The returned map from keys to items may -// have fewer elements than the input slice, due to memcache cache misses. -// Each key must be at most 250 bytes in length. -func GetMulti(c context.Context, key []string) (map[string]*Item, error) { - if len(key) == 0 { - return nil, nil - } - keyAsBytes := make([][]byte, len(key)) - for i, k := range key { - keyAsBytes[i] = []byte(k) - } - req := &pb.MemcacheGetRequest{ - Key: keyAsBytes, - ForCas: proto.Bool(true), - } - res := &pb.MemcacheGetResponse{} - if err := internal.Call(c, "memcache", "Get", req, res); err != nil { - return nil, err - } - m := make(map[string]*Item, len(res.Item)) - for _, p := range res.Item { - t := protoToItem(p) - m[t.Key] = t - } - return m, nil -} - -// Delete deletes the item for the given key. -// ErrCacheMiss is returned if the specified item can not be found. -// The key must be at most 250 bytes in length. -func Delete(c context.Context, key string) error { - return singleError(DeleteMulti(c, []string{key})) -} - -// DeleteMulti is a batch version of Delete. -// If any keys cannot be found, an appengine.MultiError is returned. -// Each key must be at most 250 bytes in length. -func DeleteMulti(c context.Context, key []string) error { - if len(key) == 0 { - return nil - } - req := &pb.MemcacheDeleteRequest{ - Item: make([]*pb.MemcacheDeleteRequest_Item, len(key)), - } - for i, k := range key { - req.Item[i] = &pb.MemcacheDeleteRequest_Item{Key: []byte(k)} - } - res := &pb.MemcacheDeleteResponse{} - if err := internal.Call(c, "memcache", "Delete", req, res); err != nil { - return err - } - if len(res.DeleteStatus) != len(key) { - return ErrServerError - } - me, any := make(appengine.MultiError, len(key)), false - for i, s := range res.DeleteStatus { - switch s { - case pb.MemcacheDeleteResponse_DELETED: - // OK - case pb.MemcacheDeleteResponse_NOT_FOUND: - me[i] = ErrCacheMiss - any = true - default: - me[i] = ErrServerError - any = true - } - } - if any { - return me - } - return nil -} - -// Increment atomically increments the decimal value in the given key -// by delta and returns the new value. The value must fit in a uint64. -// Overflow wraps around, and underflow is capped to zero. The -// provided delta may be negative. If the key doesn't exist in -// memcache, the provided initial value is used to atomically -// populate it before the delta is applied. -// The key must be at most 250 bytes in length. -func Increment(c context.Context, key string, delta int64, initialValue uint64) (newValue uint64, err error) { - return incr(c, key, delta, &initialValue) -} - -// IncrementExisting works like Increment but assumes that the key -// already exists in memcache and doesn't take an initial value. -// IncrementExisting can save work if calculating the initial value is -// expensive. -// An error is returned if the specified item can not be found. -func IncrementExisting(c context.Context, key string, delta int64) (newValue uint64, err error) { - return incr(c, key, delta, nil) -} - -func incr(c context.Context, key string, delta int64, initialValue *uint64) (newValue uint64, err error) { - req := &pb.MemcacheIncrementRequest{ - Key: []byte(key), - InitialValue: initialValue, - } - if delta >= 0 { - req.Delta = proto.Uint64(uint64(delta)) - } else { - req.Delta = proto.Uint64(uint64(-delta)) - req.Direction = pb.MemcacheIncrementRequest_DECREMENT.Enum() - } - res := &pb.MemcacheIncrementResponse{} - err = internal.Call(c, "memcache", "Increment", req, res) - if err != nil { - return - } - if res.NewValue == nil { - return 0, ErrCacheMiss - } - return *res.NewValue, nil -} - -// set sets the given items using the given conflict resolution policy. -// appengine.MultiError may be returned. -func set(c context.Context, item []*Item, value [][]byte, policy pb.MemcacheSetRequest_SetPolicy) error { - if len(item) == 0 { - return nil - } - req := &pb.MemcacheSetRequest{ - Item: make([]*pb.MemcacheSetRequest_Item, len(item)), - } - for i, t := range item { - p := &pb.MemcacheSetRequest_Item{ - Key: []byte(t.Key), - } - if value == nil { - p.Value = t.Value - } else { - p.Value = value[i] - } - if t.Flags != 0 { - p.Flags = proto.Uint32(t.Flags) - } - if t.Expiration != 0 { - // In the .proto file, MemcacheSetRequest_Item uses a fixed32 (i.e. unsigned) - // for expiration time, while MemcacheGetRequest_Item uses int32 (i.e. signed). - // Throughout this .go file, we use int32. - // Also, in the proto, the expiration value is either a duration (in seconds) - // or an absolute Unix timestamp (in seconds), depending on whether the - // value is less than or greater than or equal to 30 years, respectively. - if t.Expiration < time.Second { - // Because an Expiration of 0 means no expiration, we take - // care here to translate an item with an expiration - // Duration between 0-1 seconds as immediately expiring - // (saying it expired a few seconds ago), rather than - // rounding it down to 0 and making it live forever. - p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) - 5) - } else if t.Expiration >= thirtyYears { - p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) + uint32(t.Expiration/time.Second)) - } else { - p.ExpirationTime = proto.Uint32(uint32(t.Expiration / time.Second)) - } - } - if t.casID != 0 { - p.CasId = proto.Uint64(t.casID) - p.ForCas = proto.Bool(true) - } - p.SetPolicy = policy.Enum() - req.Item[i] = p - } - res := &pb.MemcacheSetResponse{} - if err := internal.Call(c, "memcache", "Set", req, res); err != nil { - return err - } - if len(res.SetStatus) != len(item) { - return ErrServerError - } - me, any := make(appengine.MultiError, len(item)), false - for i, st := range res.SetStatus { - var err error - switch st { - case pb.MemcacheSetResponse_STORED: - // OK - case pb.MemcacheSetResponse_NOT_STORED: - err = ErrNotStored - case pb.MemcacheSetResponse_EXISTS: - err = ErrCASConflict - default: - err = ErrServerError - } - if err != nil { - me[i] = err - any = true - } - } - if any { - return me - } - return nil -} - -// Set writes the given item, unconditionally. -func Set(c context.Context, item *Item) error { - return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_SET)) -} - -// SetMulti is a batch version of Set. -// appengine.MultiError may be returned. -func SetMulti(c context.Context, item []*Item) error { - return set(c, item, nil, pb.MemcacheSetRequest_SET) -} - -// Add writes the given item, if no value already exists for its key. -// ErrNotStored is returned if that condition is not met. -func Add(c context.Context, item *Item) error { - return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_ADD)) -} - -// AddMulti is a batch version of Add. -// appengine.MultiError may be returned. -func AddMulti(c context.Context, item []*Item) error { - return set(c, item, nil, pb.MemcacheSetRequest_ADD) -} - -// CompareAndSwap writes the given item that was previously returned by Get, -// if the value was neither modified or evicted between the Get and the -// CompareAndSwap calls. The item's Key should not change between calls but -// all other item fields may differ. -// ErrCASConflict is returned if the value was modified in between the calls. -// ErrNotStored is returned if the value was evicted in between the calls. -func CompareAndSwap(c context.Context, item *Item) error { - return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_CAS)) -} - -// CompareAndSwapMulti is a batch version of CompareAndSwap. -// appengine.MultiError may be returned. -func CompareAndSwapMulti(c context.Context, item []*Item) error { - return set(c, item, nil, pb.MemcacheSetRequest_CAS) -} - -// Codec represents a symmetric pair of functions that implement a codec. -// Items stored into or retrieved from memcache using a Codec have their -// values marshaled or unmarshaled. -// -// All the methods provided for Codec behave analogously to the package level -// function with same name. -type Codec struct { - Marshal func(interface{}) ([]byte, error) - Unmarshal func([]byte, interface{}) error -} - -// Get gets the item for the given key and decodes the obtained value into v. -// ErrCacheMiss is returned for a memcache cache miss. -// The key must be at most 250 bytes in length. -func (cd Codec) Get(c context.Context, key string, v interface{}) (*Item, error) { - i, err := Get(c, key) - if err != nil { - return nil, err - } - if err := cd.Unmarshal(i.Value, v); err != nil { - return nil, err - } - return i, nil -} - -func (cd Codec) set(c context.Context, items []*Item, policy pb.MemcacheSetRequest_SetPolicy) error { - var vs [][]byte - var me appengine.MultiError - for i, item := range items { - v, err := cd.Marshal(item.Object) - if err != nil { - if me == nil { - me = make(appengine.MultiError, len(items)) - } - me[i] = err - continue - } - if me == nil { - vs = append(vs, v) - } - } - if me != nil { - return me - } - - return set(c, items, vs, policy) -} - -// Set writes the given item, unconditionally. -func (cd Codec) Set(c context.Context, item *Item) error { - return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_SET)) -} - -// SetMulti is a batch version of Set. -// appengine.MultiError may be returned. -func (cd Codec) SetMulti(c context.Context, items []*Item) error { - return cd.set(c, items, pb.MemcacheSetRequest_SET) -} - -// Add writes the given item, if no value already exists for its key. -// ErrNotStored is returned if that condition is not met. -func (cd Codec) Add(c context.Context, item *Item) error { - return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_ADD)) -} - -// AddMulti is a batch version of Add. -// appengine.MultiError may be returned. -func (cd Codec) AddMulti(c context.Context, items []*Item) error { - return cd.set(c, items, pb.MemcacheSetRequest_ADD) -} - -// CompareAndSwap writes the given item that was previously returned by Get, -// if the value was neither modified or evicted between the Get and the -// CompareAndSwap calls. The item's Key should not change between calls but -// all other item fields may differ. -// ErrCASConflict is returned if the value was modified in between the calls. -// ErrNotStored is returned if the value was evicted in between the calls. -func (cd Codec) CompareAndSwap(c context.Context, item *Item) error { - return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_CAS)) -} - -// CompareAndSwapMulti is a batch version of CompareAndSwap. -// appengine.MultiError may be returned. -func (cd Codec) CompareAndSwapMulti(c context.Context, items []*Item) error { - return cd.set(c, items, pb.MemcacheSetRequest_CAS) -} - -var ( - // Gob is a Codec that uses the gob package. - Gob = Codec{gobMarshal, gobUnmarshal} - // JSON is a Codec that uses the json package. - JSON = Codec{json.Marshal, json.Unmarshal} -) - -func gobMarshal(v interface{}) ([]byte, error) { - var buf bytes.Buffer - if err := gob.NewEncoder(&buf).Encode(v); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func gobUnmarshal(data []byte, v interface{}) error { - return gob.NewDecoder(bytes.NewBuffer(data)).Decode(v) -} - -// Statistics represents a set of statistics about the memcache cache. -// This may include items that have expired but have not yet been removed from the cache. -type Statistics struct { - Hits uint64 // Counter of cache hits - Misses uint64 // Counter of cache misses - ByteHits uint64 // Counter of bytes transferred for gets - - Items uint64 // Items currently in the cache - Bytes uint64 // Size of all items currently in the cache - - Oldest int64 // Age of access of the oldest item, in seconds -} - -// Stats retrieves the current memcache statistics. -func Stats(c context.Context) (*Statistics, error) { - req := &pb.MemcacheStatsRequest{} - res := &pb.MemcacheStatsResponse{} - if err := internal.Call(c, "memcache", "Stats", req, res); err != nil { - return nil, err - } - if res.Stats == nil { - return nil, ErrNoStats - } - return &Statistics{ - Hits: *res.Stats.Hits, - Misses: *res.Stats.Misses, - ByteHits: *res.Stats.ByteHits, - Items: *res.Stats.Items, - Bytes: *res.Stats.Bytes, - Oldest: int64(*res.Stats.OldestItemAge), - }, nil -} - -// Flush flushes all items from memcache. -func Flush(c context.Context) error { - req := &pb.MemcacheFlushRequest{} - res := &pb.MemcacheFlushResponse{} - return internal.Call(c, "memcache", "FlushAll", req, res) -} - -func namespaceMod(m proto.Message, namespace string) { - switch m := m.(type) { - case *pb.MemcacheDeleteRequest: - if m.NameSpace == nil { - m.NameSpace = &namespace - } - case *pb.MemcacheGetRequest: - if m.NameSpace == nil { - m.NameSpace = &namespace - } - case *pb.MemcacheIncrementRequest: - if m.NameSpace == nil { - m.NameSpace = &namespace - } - case *pb.MemcacheSetRequest: - if m.NameSpace == nil { - m.NameSpace = &namespace - } - // MemcacheFlushRequest, MemcacheStatsRequest do not apply namespace. - } -} - -func init() { - internal.RegisterErrorCodeMap("memcache", pb.MemcacheServiceError_ErrorCode_name) - internal.NamespaceMods["memcache"] = namespaceMod -} diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go deleted file mode 100644 index 21860ca..0000000 --- a/vendor/google.golang.org/appengine/namespace.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import ( - "fmt" - "regexp" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// Namespace returns a replacement context that operates within the given namespace. -func Namespace(c context.Context, namespace string) (context.Context, error) { - if !validNamespace.MatchString(namespace) { - return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace) - } - return internal.NamespacedContext(c, namespace), nil -} - -// validNamespace matches valid namespace names. -var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`) diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go deleted file mode 100644 index 05642a9..0000000 --- a/vendor/google.golang.org/appengine/timeout.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2013 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import "golang.org/x/net/context" - -// IsTimeoutError reports whether err is a timeout error. -func IsTimeoutError(err error) bool { - if err == context.DeadlineExceeded { - return true - } - if t, ok := err.(interface { - IsTimeout() bool - }); ok { - return t.IsTimeout() - } - return false -} diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go deleted file mode 100644 index 6ffe1e6..0000000 --- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package urlfetch provides an http.RoundTripper implementation -// for fetching URLs via App Engine's urlfetch service. -package urlfetch // import "google.golang.org/appengine/urlfetch" - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/urlfetch" -) - -// Transport is an implementation of http.RoundTripper for -// App Engine. Users should generally create an http.Client using -// this transport and use the Client rather than using this transport -// directly. -type Transport struct { - Context context.Context - - // Controls whether the application checks the validity of SSL certificates - // over HTTPS connections. A value of false (the default) instructs the - // application to send a request to the server only if the certificate is - // valid and signed by a trusted certificate authority (CA), and also - // includes a hostname that matches the certificate. A value of true - // instructs the application to perform no certificate validation. - AllowInvalidServerCertificate bool -} - -// Verify statically that *Transport implements http.RoundTripper. -var _ http.RoundTripper = (*Transport)(nil) - -// Client returns an *http.Client using a default urlfetch Transport. This -// client will have the default deadline of 5 seconds, and will check the -// validity of SSL certificates. -// -// Any deadline of the provided context will be used for requests through this client; -// if the client does not have a deadline then a 5 second default is used. -func Client(ctx context.Context) *http.Client { - return &http.Client{ - Transport: &Transport{ - Context: ctx, - }, - } -} - -type bodyReader struct { - content []byte - truncated bool - closed bool -} - -// ErrTruncatedBody is the error returned after the final Read() from a -// response's Body if the body has been truncated by App Engine's proxy. -var ErrTruncatedBody = errors.New("urlfetch: truncated body") - -func statusCodeToText(code int) string { - if t := http.StatusText(code); t != "" { - return t - } - return strconv.Itoa(code) -} - -func (br *bodyReader) Read(p []byte) (n int, err error) { - if br.closed { - if br.truncated { - return 0, ErrTruncatedBody - } - return 0, io.EOF - } - n = copy(p, br.content) - if n > 0 { - br.content = br.content[n:] - return - } - if br.truncated { - br.closed = true - return 0, ErrTruncatedBody - } - return 0, io.EOF -} - -func (br *bodyReader) Close() error { - br.closed = true - br.content = nil - return nil -} - -// A map of the URL Fetch-accepted methods that take a request body. -var methodAcceptsRequestBody = map[string]bool{ - "POST": true, - "PUT": true, - "PATCH": true, -} - -// urlString returns a valid string given a URL. This function is necessary because -// the String method of URL doesn't correctly handle URLs with non-empty Opaque values. -// See http://code.google.com/p/go/issues/detail?id=4860. -func urlString(u *url.URL) string { - if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") { - return u.String() - } - aux := *u - aux.Opaque = "//" + aux.Host + aux.Opaque - return aux.String() -} - -// RoundTrip issues a single HTTP request and returns its response. Per the -// http.RoundTripper interface, RoundTrip only returns an error if there -// was an unsupported request or the URL Fetch proxy fails. -// Note that HTTP response codes such as 5xx, 403, 404, etc are not -// errors as far as the transport is concerned and will be returned -// with err set to nil. -func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) { - methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method] - if !ok { - return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method) - } - - method := pb.URLFetchRequest_RequestMethod(methNum) - - freq := &pb.URLFetchRequest{ - Method: &method, - Url: proto.String(urlString(req.URL)), - FollowRedirects: proto.Bool(false), // http.Client's responsibility - MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate), - } - if deadline, ok := t.Context.Deadline(); ok { - freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds()) - } - - for k, vals := range req.Header { - for _, val := range vals { - freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{ - Key: proto.String(k), - Value: proto.String(val), - }) - } - } - if methodAcceptsRequestBody[req.Method] && req.Body != nil { - // Avoid a []byte copy if req.Body has a Bytes method. - switch b := req.Body.(type) { - case interface { - Bytes() []byte - }: - freq.Payload = b.Bytes() - default: - freq.Payload, err = ioutil.ReadAll(req.Body) - if err != nil { - return nil, err - } - } - } - - fres := &pb.URLFetchResponse{} - if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil { - return nil, err - } - - res = &http.Response{} - res.StatusCode = int(*fres.StatusCode) - res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode)) - res.Header = make(http.Header) - res.Request = req - - // Faked: - res.ProtoMajor = 1 - res.ProtoMinor = 1 - res.Proto = "HTTP/1.1" - res.Close = true - - for _, h := range fres.Header { - hkey := http.CanonicalHeaderKey(*h.Key) - hval := *h.Value - if hkey == "Content-Length" { - // Will get filled in below for all but HEAD requests. - if req.Method == "HEAD" { - res.ContentLength, _ = strconv.ParseInt(hval, 10, 64) - } - continue - } - res.Header.Add(hkey, hval) - } - - if req.Method != "HEAD" { - res.ContentLength = int64(len(fres.Content)) - } - - truncated := fres.GetContentWasTruncated() - res.Body = &bodyReader{content: fres.Content, truncated: truncated} - return -} - -func init() { - internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name) - internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED)) -} diff --git a/vendor/google.golang.org/appengine/user/oauth.go b/vendor/google.golang.org/appengine/user/oauth.go deleted file mode 100644 index ffad571..0000000 --- a/vendor/google.golang.org/appengine/user/oauth.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package user - -import ( - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/user" -) - -// CurrentOAuth returns the user associated with the OAuth consumer making this -// request. If the OAuth consumer did not make a valid OAuth request, or the -// scopes is non-empty and the current user does not have at least one of the -// scopes, this method will return an error. -func CurrentOAuth(c context.Context, scopes ...string) (*User, error) { - req := &pb.GetOAuthUserRequest{} - if len(scopes) != 1 || scopes[0] != "" { - // The signature for this function used to be CurrentOAuth(Context, string). - // Ignore the singular "" scope to preserve existing behavior. - req.Scopes = scopes - } - - res := &pb.GetOAuthUserResponse{} - - err := internal.Call(c, "user", "GetOAuthUser", req, res) - if err != nil { - return nil, err - } - return &User{ - Email: *res.Email, - AuthDomain: *res.AuthDomain, - Admin: res.GetIsAdmin(), - ID: *res.UserId, - ClientID: res.GetClientId(), - }, nil -} - -// OAuthConsumerKey returns the OAuth consumer key provided with the current -// request. This method will return an error if the OAuth request was invalid. -func OAuthConsumerKey(c context.Context) (string, error) { - req := &pb.CheckOAuthSignatureRequest{} - res := &pb.CheckOAuthSignatureResponse{} - - err := internal.Call(c, "user", "CheckOAuthSignature", req, res) - if err != nil { - return "", err - } - return *res.OauthConsumerKey, err -} diff --git a/vendor/google.golang.org/appengine/user/user.go b/vendor/google.golang.org/appengine/user/user.go deleted file mode 100644 index eb76f59..0000000 --- a/vendor/google.golang.org/appengine/user/user.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package user provides a client for App Engine's user authentication service. -package user // import "google.golang.org/appengine/user" - -import ( - "strings" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/user" -) - -// User represents a user of the application. -type User struct { - Email string - AuthDomain string - Admin bool - - // ID is the unique permanent ID of the user. - // It is populated if the Email is associated - // with a Google account, or empty otherwise. - ID string - - // ClientID is the ID of the pre-registered client so its identity can be verified. - // See https://developers.google.com/console/help/#generatingoauth2 for more information. - ClientID string - - FederatedIdentity string - FederatedProvider string -} - -// String returns a displayable name for the user. -func (u *User) String() string { - if u.AuthDomain != "" && strings.HasSuffix(u.Email, "@"+u.AuthDomain) { - return u.Email[:len(u.Email)-len("@"+u.AuthDomain)] - } - if u.FederatedIdentity != "" { - return u.FederatedIdentity - } - return u.Email -} - -// LoginURL returns a URL that, when visited, prompts the user to sign in, -// then redirects the user to the URL specified by dest. -func LoginURL(c context.Context, dest string) (string, error) { - return LoginURLFederated(c, dest, "") -} - -// LoginURLFederated is like LoginURL but accepts a user's OpenID identifier. -func LoginURLFederated(c context.Context, dest, identity string) (string, error) { - req := &pb.CreateLoginURLRequest{ - DestinationUrl: proto.String(dest), - } - if identity != "" { - req.FederatedIdentity = proto.String(identity) - } - res := &pb.CreateLoginURLResponse{} - if err := internal.Call(c, "user", "CreateLoginURL", req, res); err != nil { - return "", err - } - return *res.LoginUrl, nil -} - -// LogoutURL returns a URL that, when visited, signs the user out, -// then redirects the user to the URL specified by dest. -func LogoutURL(c context.Context, dest string) (string, error) { - req := &pb.CreateLogoutURLRequest{ - DestinationUrl: proto.String(dest), - } - res := &pb.CreateLogoutURLResponse{} - if err := internal.Call(c, "user", "CreateLogoutURL", req, res); err != nil { - return "", err - } - return *res.LogoutUrl, nil -} - -func init() { - internal.RegisterErrorCodeMap("user", pb.UserServiceError_ErrorCode_name) -} diff --git a/vendor/google.golang.org/appengine/user/user_classic.go b/vendor/google.golang.org/appengine/user/user_classic.go deleted file mode 100644 index a747ef3..0000000 --- a/vendor/google.golang.org/appengine/user/user_classic.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package user - -import ( - "appengine/user" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -func Current(ctx context.Context) *User { - u := user.Current(internal.ClassicContextFromContext(ctx)) - if u == nil { - return nil - } - // Map appengine/user.User to this package's User type. - return &User{ - Email: u.Email, - AuthDomain: u.AuthDomain, - Admin: u.Admin, - ID: u.ID, - FederatedIdentity: u.FederatedIdentity, - FederatedProvider: u.FederatedProvider, - } -} - -func IsAdmin(ctx context.Context) bool { - return user.IsAdmin(internal.ClassicContextFromContext(ctx)) -} diff --git a/vendor/google.golang.org/appengine/user/user_vm.go b/vendor/google.golang.org/appengine/user/user_vm.go deleted file mode 100644 index 8dc672e..0000000 --- a/vendor/google.golang.org/appengine/user/user_vm.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package user - -import ( - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// Current returns the currently logged-in user, -// or nil if the user is not signed in. -func Current(c context.Context) *User { - h := internal.IncomingHeaders(c) - u := &User{ - Email: h.Get("X-AppEngine-User-Email"), - AuthDomain: h.Get("X-AppEngine-Auth-Domain"), - ID: h.Get("X-AppEngine-User-Id"), - Admin: h.Get("X-AppEngine-User-Is-Admin") == "1", - FederatedIdentity: h.Get("X-AppEngine-Federated-Identity"), - FederatedProvider: h.Get("X-AppEngine-Federated-Provider"), - } - if u.Email == "" && u.FederatedIdentity == "" { - return nil - } - return u -} - -// IsAdmin returns true if the current user is signed in and -// is currently registered as an administrator of the application. -func IsAdmin(c context.Context) bool { - h := internal.IncomingHeaders(c) - return h.Get("X-AppEngine-User-Is-Admin") == "1" -} diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml new file mode 100644 index 0000000..9f55693 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - 1.8 + - 1.9 + - tip + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE new file mode 100644 index 0000000..866d74a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md index 2ed3314..b50c6e8 100644 --- a/vendor/gopkg.in/yaml.v2/README.md +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -48,8 +48,6 @@ The yaml package is licensed under the Apache License 2.0. Please see the LICENS Example ------- -Some more examples can be found in the "examples" folder. - ```Go package main diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go index 95ec014..1f7e87e 100644 --- a/vendor/gopkg.in/yaml.v2/apic.go +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -2,7 +2,6 @@ package yaml import ( "io" - "os" ) func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { @@ -48,9 +47,9 @@ func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err return n, nil } -// File read handler. -func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_file.Read(buffer) +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) } // Set a string input. @@ -64,12 +63,12 @@ func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { } // Set a file input. -func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { if parser.read_handler != nil { panic("must set the input source only once") } - parser.read_handler = yaml_file_read_handler - parser.input_file = file + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r } // Set the source encoding. @@ -81,14 +80,13 @@ func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { } // Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { +func yaml_emitter_initialize(emitter *yaml_emitter_t) { *emitter = yaml_emitter_t{ buffer: make([]byte, output_buffer_size), raw_buffer: make([]byte, 0, output_raw_buffer_size), states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), } - return true } // Destroy an emitter object. @@ -102,9 +100,10 @@ func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { return nil } -// File write handler. -func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_file.Write(buffer) +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) return err } @@ -118,12 +117,12 @@ func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]by } // Set a file output. -func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { if emitter.write_handler != nil { panic("must set the output target only once") } - emitter.write_handler = yaml_file_write_handler - emitter.output_file = file + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w } // Set the output encoding. @@ -252,41 +251,41 @@ func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { // // Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { *event = yaml_event_t{ typ: yaml_STREAM_START_EVENT, encoding: encoding, } - return true } // Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) bool { +func yaml_stream_end_event_initialize(event *yaml_event_t) { *event = yaml_event_t{ typ: yaml_STREAM_END_EVENT, } - return true } // Create DOCUMENT-START. -func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, implicit bool) bool { +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { *event = yaml_event_t{ typ: yaml_DOCUMENT_START_EVENT, version_directive: version_directive, tag_directives: tag_directives, implicit: implicit, } - return true } // Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { *event = yaml_event_t{ typ: yaml_DOCUMENT_END_EVENT, implicit: implicit, } - return true } ///* @@ -348,7 +347,7 @@ func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { } // Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { *event = yaml_event_t{ typ: yaml_MAPPING_START_EVENT, anchor: anchor, @@ -356,15 +355,13 @@ func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte implicit: implicit, style: yaml_style_t(style), } - return true } // Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { +func yaml_mapping_end_event_initialize(event *yaml_event_t) { *event = yaml_event_t{ typ: yaml_MAPPING_END_EVENT, } - return true } // Destroy an event object. @@ -471,7 +468,7 @@ func yaml_event_delete(event *yaml_event_t) { // } context // tag_directive *yaml_tag_directive_t // -// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. // // assert(document) // Non-NULL document object is expected. // diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go index e85eb2e..e4e56e2 100644 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -4,6 +4,7 @@ import ( "encoding" "encoding/base64" "fmt" + "io" "math" "reflect" "strconv" @@ -22,19 +23,22 @@ type node struct { kind int line, column int tag string - value string - implicit bool - children []*node - anchors map[string]*node + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node } // ---------------------------------------------------------------------------- // Parser, produces a node tree out of a libyaml event stream. type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *node + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool } func newParser(b []byte) *parser { @@ -42,21 +46,30 @@ func newParser(b []byte) *parser { if !yaml_parser_initialize(&p.parser) { panic("failed to initialize YAML emitter") } - if len(b) == 0 { b = []byte{'\n'} } - yaml_parser_set_input_string(&p.parser, b) + return &p +} - p.skip() - if p.event.typ != yaml_STREAM_START_EVENT { - panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") } - p.skip() + yaml_parser_set_input_reader(&p.parser, r) return &p } +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + func (p *parser) destroy() { if p.event.typ != yaml_NO_EVENT { yaml_event_delete(&p.event) @@ -64,16 +77,35 @@ func (p *parser) destroy() { yaml_parser_delete(&p.parser) } -func (p *parser) skip() { - if p.event.typ != yaml_NO_EVENT { - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() } - yaml_event_delete(&p.event) + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ } if !yaml_parser_parse(&p.parser, &p.event) { p.fail() } + return p.event.typ } func (p *parser) fail() { @@ -81,6 +113,10 @@ func (p *parser) fail() { var line int if p.parser.problem_mark.line != 0 { line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } } else if p.parser.context_mark.line != 0 { line = p.parser.context_mark.line } @@ -103,7 +139,8 @@ func (p *parser) anchor(n *node, anchor []byte) { } func (p *parser) parse() *node { - switch p.event.typ { + p.init() + switch p.peek() { case yaml_SCALAR_EVENT: return p.scalar() case yaml_ALIAS_EVENT: @@ -118,7 +155,7 @@ func (p *parser) parse() *node { // Happens when attempting to decode an empty buffer. return nil default: - panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + panic("attempted to parse unknown event: " + p.event.typ.String()) } } @@ -134,19 +171,20 @@ func (p *parser) document() *node { n := p.node(documentNode) n.anchors = make(map[string]*node) p.doc = n - p.skip() + p.expect(yaml_DOCUMENT_START_EVENT) n.children = append(n.children, p.parse()) - if p.event.typ != yaml_DOCUMENT_END_EVENT { - panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) - } - p.skip() + p.expect(yaml_DOCUMENT_END_EVENT) return n } func (p *parser) alias() *node { n := p.node(aliasNode) n.value = string(p.event.anchor) - p.skip() + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) return n } @@ -156,29 +194,29 @@ func (p *parser) scalar() *node { n.tag = string(p.event.tag) n.implicit = p.event.implicit p.anchor(n, p.event.anchor) - p.skip() + p.expect(yaml_SCALAR_EVENT) return n } func (p *parser) sequence() *node { n := p.node(sequenceNode) p.anchor(n, p.event.anchor) - p.skip() - for p.event.typ != yaml_SEQUENCE_END_EVENT { + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { n.children = append(n.children, p.parse()) } - p.skip() + p.expect(yaml_SEQUENCE_END_EVENT) return n } func (p *parser) mapping() *node { n := p.node(mappingNode) p.anchor(n, p.event.anchor) - p.skip() - for p.event.typ != yaml_MAPPING_END_EVENT { + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { n.children = append(n.children, p.parse(), p.parse()) } - p.skip() + p.expect(yaml_MAPPING_END_EVENT) return n } @@ -187,7 +225,7 @@ func (p *parser) mapping() *node { type decoder struct { doc *node - aliases map[string]bool + aliases map[*node]bool mapType reflect.Type terrors []string strict bool @@ -198,11 +236,13 @@ var ( durationType = reflect.TypeOf(time.Duration(0)) defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) ) func newDecoder(strict bool) *decoder { d := &decoder{mapType: defaultMapType, strict: strict} - d.aliases = make(map[string]bool) + d.aliases = make(map[*node]bool) return d } @@ -308,16 +348,13 @@ func (d *decoder) document(n *node, out reflect.Value) (good bool) { } func (d *decoder) alias(n *node, out reflect.Value) (good bool) { - an, ok := d.doc.anchors[n.value] - if !ok { - failf("unknown anchor '%s' referenced", n.value) - } - if d.aliases[n.value] { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. failf("anchor '%s' value contains itself", n.value) } - d.aliases[n.value] = true - good = d.unmarshal(an, out) - delete(d.aliases, n.value) + d.aliases[n] = true + good = d.unmarshal(n.alias, out) + delete(d.aliases, n) return good } @@ -329,7 +366,7 @@ func resetMap(out reflect.Value) { } } -func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { +func (d *decoder) scalar(n *node, out reflect.Value) bool { var tag string var resolved interface{} if n.tag == "" && !n.implicit { @@ -353,9 +390,26 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { } return true } - if s, ok := resolved.(string); ok && out.CanAddr() { - if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { - err := u.UnmarshalText([]byte(s)) + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) if err != nil { fail(err) } @@ -366,46 +420,54 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { case reflect.String: if tag == yaml_BINARY_TAG { out.SetString(resolved.(string)) - good = true - } else if resolved != nil { + return true + } + if resolved != nil { out.SetString(n.value) - good = true + return true } case reflect.Interface: if resolved == nil { out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) } else { out.Set(reflect.ValueOf(resolved)) } - good = true + return true case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: switch resolved := resolved.(type) { case int: if !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) - good = true + return true } case int64: if !out.OverflowInt(resolved) { out.SetInt(resolved) - good = true + return true } case uint64: if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) - good = true + return true } case float64: if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { out.SetInt(int64(resolved)) - good = true + return true } case string: if out.Type() == durationType { d, err := time.ParseDuration(resolved) if err == nil { out.SetInt(int64(d)) - good = true + return true } } } @@ -414,44 +476,49 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { case int: if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { out.SetUint(uint64(resolved)) - good = true + return true } case int64: if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { out.SetUint(uint64(resolved)) - good = true + return true } case uint64: if !out.OverflowUint(uint64(resolved)) { out.SetUint(uint64(resolved)) - good = true + return true } case float64: if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { out.SetUint(uint64(resolved)) - good = true + return true } } case reflect.Bool: switch resolved := resolved.(type) { case bool: out.SetBool(resolved) - good = true + return true } case reflect.Float32, reflect.Float64: switch resolved := resolved.(type) { case int: out.SetFloat(float64(resolved)) - good = true + return true case int64: out.SetFloat(float64(resolved)) - good = true + return true case uint64: out.SetFloat(float64(resolved)) - good = true + return true case float64: out.SetFloat(resolved) - good = true + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true } case reflect.Ptr: if out.Type().Elem() == reflect.TypeOf(resolved) { @@ -459,13 +526,11 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { elem := reflect.New(out.Type().Elem()) elem.Elem().Set(reflect.ValueOf(resolved)) out.Set(elem) - good = true + return true } } - if !good { - d.terror(n, tag, out) - } - return good + d.terror(n, tag, out) + return false } func settableValueOf(i interface{}) reflect.Value { @@ -482,6 +547,10 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { switch out.Kind() { case reflect.Slice: out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } case reflect.Interface: // No type hints. Will have to use a generic sequence. iface = out @@ -500,7 +569,9 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { j++ } } - out.Set(out.Slice(0, j)) + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } if iface.IsValid() { iface.Set(out) } @@ -561,7 +632,7 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { } e := reflect.New(et).Elem() if d.unmarshal(n.children[i+1], e) { - out.SetMapIndex(k, e) + d.setMapIndex(n.children[i+1], out, k, e) } } } @@ -569,6 +640,14 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { return true } +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { outt := out.Type() if outt.Elem() != mapItemType { @@ -616,6 +695,10 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { elemType = inlineMap.Type().Elem() } + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } for i := 0; i < l; i += 2 { ni := n.children[i] if isMerge(ni) { @@ -626,6 +709,13 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { continue } if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } var field reflect.Value if info.Inline == nil { field = out.Field(info.Num) @@ -639,9 +729,9 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { } value := reflect.New(elemType).Elem() d.unmarshal(n.children[i+1], value) - inlineMap.SetMapIndex(name, value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) } else if d.strict { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in struct %s", ni.line+1, name.String(), out.Type())) + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) } } return true diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go index dcaf502..a1c2cc5 100644 --- a/vendor/gopkg.in/yaml.v2/emitterc.go +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -2,6 +2,7 @@ package yaml import ( "bytes" + "fmt" ) // Flush the buffer if needed. @@ -664,7 +665,7 @@ func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, return yaml_emitter_emit_mapping_start(emitter, event) default: return yaml_emitter_set_emitter_error(emitter, - "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) } } @@ -842,7 +843,7 @@ func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event return true } -// Write an achor. +// Write an anchor. func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { if emitter.anchor_data.anchor == nil { return true diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go index 84f8499..0ee738e 100644 --- a/vendor/gopkg.in/yaml.v2/encode.go +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -3,38 +3,67 @@ package yaml import ( "encoding" "fmt" + "io" "reflect" "regexp" "sort" "strconv" "strings" "time" + "unicode/utf8" ) +// jsonNumber is the interface of the encoding/json.Number datatype. +// Repeating the interface here avoids a dependency on encoding/json, and also +// supports other libraries like jsoniter, which use a similar datatype with +// the same interface. Detecting this interface is useful when dealing with +// structures containing json.Number, which is a string under the hood. The +// encoder should prefer the use of Int64(), Float64() and string(), in that +// order, when encoding this type. +type jsonNumber interface { + Float64() (float64, error) + Int64() (int64, error) + String() string +} + type encoder struct { emitter yaml_emitter_t event yaml_event_t out []byte flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool } -func newEncoder() (e *encoder) { - e = &encoder{} - e.must(yaml_emitter_initialize(&e.emitter)) +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) yaml_emitter_set_output_string(&e.emitter, &e.out) yaml_emitter_set_unicode(&e.emitter, true) - e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) - e.emit() - e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) - e.emit() return e } -func (e *encoder) finish() { - e.must(yaml_document_end_event_initialize(&e.event, true)) +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { e.emitter.open_ended = false - e.must(yaml_stream_end_event_initialize(&e.event)) + yaml_stream_end_event_initialize(&e.event) e.emit() } @@ -44,9 +73,7 @@ func (e *encoder) destroy() { func (e *encoder) emit() { // This will internally delete the e.event value. - if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { - e.must(false) - } + e.must(yaml_emitter_emit(&e.emitter, &e.event)) } func (e *encoder) must(ok bool) { @@ -59,13 +86,43 @@ func (e *encoder) must(ok bool) { } } +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + func (e *encoder) marshal(tag string, in reflect.Value) { - if !in.IsValid() { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { e.nilv() return } iface := in.Interface() - if m, ok := iface.(Marshaler); ok { + switch m := iface.(type) { + case jsonNumber: + integer, err := m.Int64() + if err == nil { + // In this case the json.Number is a valid int64 + in = reflect.ValueOf(integer) + break + } + float, err := m.Float64() + if err == nil { + // In this case the json.Number is a valid float64 + in = reflect.ValueOf(float) + break + } + // fallback case - no number could be obtained + in = reflect.ValueOf(m.String()) + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: v, err := m.MarshalYAML() if err != nil { fail(err) @@ -75,31 +132,34 @@ func (e *encoder) marshal(tag string, in reflect.Value) { return } in = reflect.ValueOf(v) - } else if m, ok := iface.(encoding.TextMarshaler); ok { + case encoding.TextMarshaler: text, err := m.MarshalText() if err != nil { fail(err) } in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return } switch in.Kind() { case reflect.Interface: - if in.IsNil() { - e.nilv() - } else { - e.marshal(tag, in.Elem()) - } + e.marshal(tag, in.Elem()) case reflect.Map: e.mapv(tag, in) case reflect.Ptr: - if in.IsNil() { - e.nilv() + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) } else { e.marshal(tag, in.Elem()) } case reflect.Struct: - e.structv(tag, in) - case reflect.Slice: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: if in.Type().Elem() == mapItemType { e.itemsv(tag, in) } else { @@ -191,10 +251,10 @@ func (e *encoder) mappingv(tag string, f func()) { e.flow = false style = yaml_FLOW_MAPPING_STYLE } - e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) e.emit() f() - e.must(yaml_mapping_end_event_initialize(&e.event)) + yaml_mapping_end_event_initialize(&e.event) e.emit() } @@ -240,23 +300,36 @@ var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0 func (e *encoder) stringv(tag string, in reflect.Value) { var style yaml_scalar_style_t s := in.String() - rtag, rs := resolve("", s) - if rtag == yaml_BINARY_TAG { - if tag == "" || tag == yaml_STR_TAG { - tag = rtag - s = rs.(string) - } else if tag == yaml_BINARY_TAG { + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { failf("explicitly tagged !!binary data must be base64-encoded") - } else { + } + if tag != "" { failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) } - if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } else if strings.Contains(s, "\n") { + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): style = yaml_LITERAL_SCALAR_STYLE - } else { + case canUsePlain: style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE } e.emitScalar(s, "", tag, style) } @@ -281,9 +354,20 @@ func (e *encoder) uintv(tag string, in reflect.Value) { e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) } +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + func (e *encoder) floatv(tag string, in reflect.Value) { - // FIXME: Handle 64 bits here. - s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) switch s { case "+Inf": s = ".inf" diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v2/go.mod new file mode 100644 index 0000000..1934e87 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/go.mod @@ -0,0 +1,5 @@ +module "gopkg.in/yaml.v2" + +require ( + "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 +) diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go index f450791..7c1f5fa 100644 --- a/vendor/gopkg.in/yaml.v2/readerc.go +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -93,9 +93,18 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { panic("read handler must be set") } + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + // If the EOF flag is set and the raw buffer is empty, do nothing. if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - return true + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true } // Return if the buffer contains enough characters. @@ -389,6 +398,15 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { break } } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } parser.buffer = parser.buffer[:buffer_len] return true } diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go index 232313c..6c151db 100644 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -6,7 +6,7 @@ import ( "regexp" "strconv" "strings" - "unicode/utf8" + "time" ) type resolveMapItem struct { @@ -75,7 +75,7 @@ func longTag(tag string) string { func resolvableTag(tag string) bool { switch tag { - case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: return true } return false @@ -92,6 +92,19 @@ func resolve(tag string, in string) (rtag string, out interface{}) { switch tag { case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } } failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) }() @@ -125,6 +138,15 @@ func resolve(tag string, in string) (rtag string, out interface{}) { case 'D', 'S': // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + plain := strings.Replace(in, "_", "", -1) intv, err := strconv.ParseInt(plain, 0, 64) if err == nil { @@ -158,28 +180,20 @@ func resolve(tag string, in string) (rtag string, out interface{}) { return yaml_INT_TAG, uintv } } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt(plain[3:], 2, 64) + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, -int(intv) + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) } else { - return yaml_INT_TAG, -intv + return yaml_INT_TAG, intv } } } - // XXX Handle timestamps here. - default: panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") } } - if tag == yaml_BINARY_TAG { - return yaml_BINARY_TAG, in - } - if utf8.ValidString(in) { - return yaml_STR_TAG, in - } - return yaml_BINARY_TAG, encodeBase64(in) + return yaml_STR_TAG, in } // encodeBase64 encodes s as base64 that is broken up into multiple lines @@ -206,3 +220,39 @@ func encodeBase64(s string) string { } return string(out[:k]) } + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go index 0744844..077fd1d 100644 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -871,12 +871,6 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { required := parser.flow_level == 0 && parser.indent == parser.mark.column - // A simple key is required only when it is the first token in the current - // line. Therefore it is always allowed. But we add a check anyway. - if required && !parser.simple_key_allowed { - panic("should not happen") - } - // // If the current position may start a simple key, save it. // @@ -2475,6 +2469,10 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si } } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + // Check if we are at the end of the scalar. if single { if parser.buffer[parser.buffer_pos] == '\'' { @@ -2487,10 +2485,6 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si } // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { if is_blank(parser.buffer, parser.buffer_pos) { // Consume a space or a tab character. @@ -2592,19 +2586,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b // Consume non-blank characters. for !is_blankz(parser.buffer, parser.buffer_pos) { - // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". - if parser.flow_level > 0 && - parser.buffer[parser.buffer_pos] == ':' && - !is_blankz(parser.buffer, parser.buffer_pos+1) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found unexpected ':'") - return false - } - // Check for indicators that may end a plain scalar. if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || parser.buffer[parser.buffer_pos] == '}')) { @@ -2656,10 +2641,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { if is_blank(parser.buffer, parser.buffer_pos) { - // Check for tab character that abuse indentation. + // Check for tab characters that abuse indentation. if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violate indentation") + start_mark, "found a tab character that violates indentation") return false } diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go index 5958822..4c45e66 100644 --- a/vendor/gopkg.in/yaml.v2/sorter.go +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -51,6 +51,15 @@ func (l keyList) Less(i, j int) bool { } var ai, bi int var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { an = an*10 + int64(ar[ai]-'0') } diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go index 190362f..a2dde60 100644 --- a/vendor/gopkg.in/yaml.v2/writerc.go +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -18,72 +18,9 @@ func yaml_emitter_flush(emitter *yaml_emitter_t) bool { return true } - // If the output encoding is UTF-8, we don't need to recode the buffer. - if emitter.encoding == yaml_UTF8_ENCODING { - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true - } - - // Recode the buffer into the raw buffer. - var low, high int - if emitter.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - high, low = 1, 0 - } - - pos := 0 - for pos < emitter.buffer_pos { - // See the "reader.c" code for more details on UTF-8 encoding. Note - // that we assume that the buffer contains a valid UTF-8 sequence. - - // Read the next UTF-8 character. - octet := emitter.buffer[pos] - - var w int - var value rune - switch { - case octet&0x80 == 0x00: - w, value = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, value = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, value = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, value = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = emitter.buffer[pos+k] - value = (value << 6) + (rune(octet) & 0x3F) - } - pos += w - - // Write the character. - if value < 0x10000 { - var b [2]byte - b[high] = byte(value >> 8) - b[low] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) - } else { - // Write the character using a surrogate pair (check "reader.c"). - var b [4]byte - value -= 0x10000 - b[high] = byte(0xD8 + (value >> 18)) - b[low] = byte((value >> 10) & 0xFF) - b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) - b[low+2] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) - } - } - - // Write the raw buffer. - if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) } emitter.buffer_pos = 0 - emitter.raw_buffer = emitter.raw_buffer[:0] return true } diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go index 5e3c2da..de85aa4 100644 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -9,6 +9,7 @@ package yaml import ( "errors" "fmt" + "io" "reflect" "strings" "sync" @@ -81,12 +82,58 @@ func Unmarshal(in []byte, out interface{}) (err error) { } // UnmarshalStrict is like Unmarshal except that any fields that are found -// in the data that do not have corresponding struct members will result in +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in // an error. func UnmarshalStrict(in []byte, out interface{}) (err error) { return unmarshal(in, out, true) } +// A Decorder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + func unmarshal(in []byte, out interface{}, strict bool) (err error) { defer handleErr(&err) d := newDecoder(strict) @@ -110,8 +157,8 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // of the generated document will reflect the structure of the value itself. // Maps and pointers (to struct, string, int, etc) are accepted as the in value. // -// Struct fields are only unmarshalled if they are exported (have an upper case -// first letter), and are unmarshalled using the field name lowercased as the +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the // default key. Custom keys may be defined via the "yaml" name in the field // tag: the content preceding the first comma is used as the key, and the // following comma-separated options are used to tweak the marshalling process. @@ -125,7 +172,10 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // // omitempty Only include the field if it's not set to the zero // value for the type or to empty slices or maps. -// Does not apply to zero valued structs. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be included if that method returns true. // // flow Marshal using a flow style (useful for structs, // sequences and maps). @@ -150,12 +200,47 @@ func Marshal(in interface{}) (out []byte, err error) { defer handleErr(&err) e := newEncoder() defer e.destroy() - e.marshal("", reflect.ValueOf(in)) + e.marshalDoc("", reflect.ValueOf(in)) e.finish() out = e.out return } +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + func handleErr(err *error) { if v := recover(); v != nil { if e, ok := v.(yamlError); ok { @@ -211,6 +296,9 @@ type fieldInfo struct { Num int OmitEmpty bool Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int // Inline holds the field index if the field is part of an inlined struct. Inline []int @@ -290,6 +378,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { } else { finfo.Inline = append([]int{i}, finfo.Inline...) } + finfo.Id = len(fieldsList) fieldsMap[finfo.Key] = finfo fieldsList = append(fieldsList, finfo) } @@ -311,11 +400,16 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { return nil, errors.New(msg) } + info.Id = len(fieldsList) fieldsList = append(fieldsList, info) fieldsMap[info.Key] = info } - sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } fieldMapMutex.Lock() structMap[st] = sinfo @@ -323,8 +417,23 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { return sinfo, nil } +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + func isZero(v reflect.Value) bool { - switch v.Kind() { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { case reflect.String: return len(v.String()) == 0 case reflect.Interface, reflect.Ptr: diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go index 3caeca0..e25cee5 100644 --- a/vendor/gopkg.in/yaml.v2/yamlh.go +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -1,6 +1,7 @@ package yaml import ( + "fmt" "io" ) @@ -239,6 +240,27 @@ const ( yaml_MAPPING_END_EVENT // A MAPPING-END event. ) +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + // The event structure. type yaml_event_t struct { @@ -521,9 +543,9 @@ type yaml_parser_t struct { read_handler yaml_read_handler_t // Read handler. - input_file io.Reader // File input data. - input []byte // String input data. - input_pos int + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int eof bool // EOF flag @@ -632,7 +654,7 @@ type yaml_emitter_t struct { write_handler yaml_write_handler_t // Write handler. output_buffer *[]byte // String output data. - output_file io.Writer // File output data. + output_writer io.Writer // File output data. buffer []byte // The working buffer. buffer_pos int // The current position of the buffer. diff --git a/vendor/modules.txt b/vendor/modules.txt new file mode 100644 index 0000000..504fdf1 --- /dev/null +++ b/vendor/modules.txt @@ -0,0 +1,16 @@ +# github.com/davecgh/go-spew v1.1.0 +github.com/davecgh/go-spew/spew +# github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e +## explicit +# github.com/pmezard/go-difflib v1.0.0 +github.com/pmezard/go-difflib/difflib +# github.com/stretchr/testify v1.4.0 +## explicit +github.com/stretchr/testify/assert +github.com/stretchr/testify/require +github.com/stretchr/testify/suite +# gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f +## explicit +# gopkg.in/yaml.v2 v2.2.2 +## explicit +gopkg.in/yaml.v2 diff --git a/version b/version index 88c5fb8..347f583 100644 --- a/version +++ b/version @@ -1 +1 @@ -1.4.0 +1.4.1