diff --git a/Dockerfile.test b/Dockerfile.test new file mode 100644 index 000000000..498d04834 --- /dev/null +++ b/Dockerfile.test @@ -0,0 +1,5 @@ +FROM scratch + +ADD bin/arangodb_operator_test /usr/bin/ + +ENTRYPOINT [ "/usr/bin/arangodb_operator_test" ] \ No newline at end of file diff --git a/Jenkinsfile.groovy b/Jenkinsfile.groovy new file mode 100644 index 000000000..eebb6c834 --- /dev/null +++ b/Jenkinsfile.groovy @@ -0,0 +1,70 @@ +def notifySlack(String buildStatus = 'STARTED') { + // Build status of null means success. + buildStatus = buildStatus ?: 'SUCCESS' + + def color + + if (buildStatus == 'STARTED') { + color = '#D4DADF' + } else if (buildStatus == 'SUCCESS') { + color = '#BDFFC3' + } else if (buildStatus == 'UNSTABLE') { + color = '#FFFE89' + } else { + color = '#FF9FA1' + } + + def msg = "${buildStatus}: `${env.JOB_NAME}` #${env.BUILD_NUMBER}: ${env.GIT_COMMIT}\n${env.BUILD_URL}" + + slackSend(color: color, channel: '#status-k8s', message: msg) +} + +pipeline { + options { + buildDiscarder(logRotator(daysToKeepStr: '7', numToKeepStr: '10')) + } + agent any + parameters { + string(name: 'KUBECONFIG', defaultValue: '/home/jenkins/.kube/scw-183a3b', description: 'KUBECONFIG controls which k8s cluster is used', ) + string(name: 'TESTNAMESPACE', defaultValue: 'jenkins', description: 'TESTNAMESPACE sets the kubernetes namespace to ru tests in (this must be short!!)', ) + } + stages { + stage('Build') { + steps { + timestamps { + withEnv([ + "IMAGETAG=${env.GIT_COMMIT}", + ]) { + sh "make" + } + } + } + } + stage('Test') { + steps { + timestamps { + lock("${params.TESTNAMESPACE}-${env.GIT_COMMIT}") { + withEnv([ + "KUBECONFIG=${params.KUBECONFIG}", + "TESTNAMESPACE=${params.TESTNAMESPACE}-${env.GIT_COMMIT}", + "IMAGETAG=${env.GIT_COMMIT}", + "PUSHIMAGES=1", + ]) { + sh "make run-tests" + } + } + } + } + } + } + + post { + failure { + notifySlack('FAILURE') + } + + success { + notifySlack('SUCCESS') + } + } +} diff --git a/Makefile b/Makefile index 15b8d4bd1..e946862b8 100644 --- a/Makefile +++ b/Makefile @@ -27,16 +27,31 @@ PULSAR := $(GOBUILDDIR)/bin/pulsar$(shell go env GOEXE) ifndef DOCKERNAMESPACE DOCKERNAMESPACE := arangodb endif -ifndef DOCKERFILE - DOCKERFILE := Dockerfile - #DOCKERFILE := Dockerfile.debug +DOCKERFILE := Dockerfile +DOCKERTESTFILE := Dockerfile.test + +ifdef IMAGETAG + IMAGESUFFIX := ":$(IMAGETAG)" +endif + +ifndef OPERATORIMAGE + OPERATORIMAGE := $(DOCKERNAMESPACE)/arangodb-operator$(IMAGESUFFIX) +endif +ifndef TESTIMAGE + TESTIMAGE := $(DOCKERNAMESPACE)/arangodb-operator-test$(IMAGESUFFIX) endif BINNAME := $(PROJECT) BIN := $(BINDIR)/$(BINNAME) +TESTBINNAME := $(PROJECT)_test +TESTBIN := $(BINDIR)/$(TESTBINNAME) RELEASE := $(GOBUILDDIR)/bin/release GHRELEASE := $(GOBUILDDIR)/bin/github-release +ifndef TESTNAMESPACE + TESTNAMESPACE := arangodb-operator-tests +endif + SOURCES := $(shell find $(SRCDIR) -name '*.go' -not -path './test/*') .PHONY: all clean deps docker update-vendor update-generated verify-generated @@ -77,6 +92,7 @@ update-vendor: k8s.io/client-go/... \ k8s.io/gengo/args \ k8s.io/apiextensions-apiserver \ + github.com/arangodb/go-driver \ github.com/cenkalti/backoff \ github.com/dchest/uniuri \ github.com/dgrijalva/jwt-go \ @@ -122,11 +138,43 @@ $(BIN): $(GOBUILDDIR) $(SOURCES) go build -installsuffix cgo -ldflags "-X main.projectVersion=$(VERSION) -X main.projectBuild=$(COMMIT)" -o /usr/code/bin/$(BINNAME) $(REPOPATH) docker: $(BIN) - docker build -f $(DOCKERFILE) -t arangodb/arangodb-operator . + docker build -f $(DOCKERFILE) -t $(OPERATORIMAGE) . + +# Testing + +$(TESTBIN): $(GOBUILDDIR) $(SOURCES) + @mkdir -p $(BINDIR) + docker run \ + --rm \ + -v $(SRCDIR):/usr/code \ + -e GOPATH=/usr/code/.gobuild \ + -e GOOS=linux \ + -e GOARCH=amd64 \ + -e CGO_ENABLED=0 \ + -w /usr/code/ \ + golang:$(GOVERSION) \ + go test -c -installsuffix cgo -ldflags "-X main.projectVersion=$(VERSION) -X main.projectBuild=$(COMMIT)" -o /usr/code/bin/$(TESTBINNAME) $(REPOPATH)/tests + +docker-test: $(TESTBIN) + docker build --quiet -f $(DOCKERTESTFILE) -t $(TESTIMAGE) . + +run-tests: docker-test +ifdef PUSHIMAGES + docker push $(OPERATORIMAGE) + docker push $(TESTIMAGE) +endif + $(ROOTDIR)/scripts/kube_delete_namespace.sh $(TESTNAMESPACE) + kubectl create namespace $(TESTNAMESPACE) + $(ROOTDIR)/examples/setup-rbac.sh --namespace=$(TESTNAMESPACE) + $(ROOTDIR)/scripts/kube_create_operator.sh $(TESTNAMESPACE) $(OPERATORIMAGE) + kubectl --namespace $(TESTNAMESPACE) run arangodb-operator-test -i --rm --quiet --restart=Never --image=$(TESTIMAGE) --env="TEST_NAMESPACE=$(TESTNAMESPACE)" -- -test.v + kubectl delete namespace $(TESTNAMESPACE) --ignore-not-found --now + +# Release building docker-push: docker ifneq ($(DOCKERNAMESPACE), arangodb) - docker tag arangodb/arangodb-operator $(DOCKERNAMESPACE)/arangodb-operator + docker tag $(OPERATORIMAGE) $(DOCKERNAMESPACE)/arangodb-operator endif docker push $(DOCKERNAMESPACE)/arangodb-operator @@ -161,7 +209,7 @@ minikube-start: minikube start --cpus=4 --memory=6144 delete-operator: - kubectl delete -f examples/deployment.yaml || true + kubectl delete -f examples/deployment.yaml --ignore-not-found redeploy-operator: delete-operator kubectl create -f examples/deployment.yaml diff --git a/deps/github.com/arangodb/go-driver/.envrc b/deps/github.com/arangodb/go-driver/.envrc new file mode 100644 index 000000000..a1a8fa6e2 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/.envrc @@ -0,0 +1,8 @@ +export GOBUILDDIR=$(pwd)/.gobuild +export GOPATH=$GOBUILDDIR:$GOPATH +PATH_add $GOBUILDDIR/bin + +if [ ! -e ${GOBUILDDIR} ]; then + mkdir -p ${GOBUILDDIR}/src/github.com/arangodb/ + ln -s ../../../.. ${GOBUILDDIR}/src/github.com/arangodb/go-driver +fi \ No newline at end of file diff --git a/deps/github.com/arangodb/go-driver/.gitignore b/deps/github.com/arangodb/go-driver/.gitignore new file mode 100644 index 000000000..3fd61e96c --- /dev/null +++ b/deps/github.com/arangodb/go-driver/.gitignore @@ -0,0 +1 @@ +.gobuild diff --git a/deps/github.com/arangodb/go-driver/.travis.yml b/deps/github.com/arangodb/go-driver/.travis.yml new file mode 100644 index 000000000..5ab1337f6 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/.travis.yml @@ -0,0 +1,14 @@ +sudo: required + +services: + - docker + +language: go + +env: + - TEST_SUITE=run-tests-http + - TEST_SUITE=run-tests-single ARANGODB=arangodb:3.1 + - TEST_SUITE=run-tests-single ARANGODB=arangodb/arangodb:latest + - TEST_SUITE=run-tests-single ARANGODB=arangodb/arangodb-preview:latest + +script: make $TEST_SUITE diff --git a/deps/github.com/arangodb/go-driver/.vscode/settings.json b/deps/github.com/arangodb/go-driver/.vscode/settings.json new file mode 100644 index 000000000..6326420f4 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/.vscode/settings.json @@ -0,0 +1,37 @@ +// Place your settings in this file to overwrite default and user settings. +{ + "fileHeaderComment.parameter":{ + "*":{ + "commentprefix": "//", + "company": "ArangoDB GmbH, Cologne, Germany", + "author": "Ewout Prangsma" + } + }, + "fileHeaderComment.template":{ + "*":[ + "${commentprefix} ", + "${commentprefix} DISCLAIMER", + "${commentprefix} ", + "${commentprefix} Copyright ${year} ArangoDB GmbH, Cologne, Germany", + "${commentprefix} ", + "${commentprefix} Licensed under the Apache License, Version 2.0 (the \"License\");", + "${commentprefix} you may not use this file except in compliance with the License.", + "${commentprefix} You may obtain a copy of the License at", + "${commentprefix} ", + "${commentprefix} http://www.apache.org/licenses/LICENSE-2.0", + "${commentprefix} ", + "${commentprefix} Unless required by applicable law or agreed to in writing, software", + "${commentprefix} distributed under the License is distributed on an \"AS IS\" BASIS,", + "${commentprefix} WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", + "${commentprefix} See the License for the specific language governing permissions and", + "${commentprefix} limitations under the License.", + "${commentprefix} ", + "${commentprefix} Copyright holder is ArangoDB GmbH, Cologne, Germany", + "${commentprefix} ", + "${commentprefix} Author ${author}", + "${commentprefix} ", + "" + ] + }, + "go.gopath": "${workspaceRoot}/.gobuild" +} \ No newline at end of file diff --git a/deps/github.com/arangodb/go-driver/CONTRIBUTING.md b/deps/github.com/arangodb/go-driver/CONTRIBUTING.md new file mode 100644 index 000000000..6a0faffc2 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/CONTRIBUTING.md @@ -0,0 +1,63 @@ +Contributing +============ + +We welcome bug fixes and patches from 3rd party contributors. Please +see the [Contributor Agreement](https://www.arangodb.com/community#contribute) +for details. + +Please follow these guidelines if you want to contribute to ArangoDB: + +Reporting Bugs +-------------- + +When reporting bugs, please use our issue tracker on GitHub. Please make sure +to include the version number of ArangoDB and the commit hash of the go-driver in your bug report, along with the +platform you are using (e.g. `Linux OpenSuSE x86_64`). Please also include the +ArangoDB startup mode (daemon, console, supervisor mode), type of connection used +towards ArangoDB plus any special configuration. +This will help us reproducing and finding bugs. + +Please also take the time to check there are no similar/identical issues open +yet. + +Contributing features, documentation, tests +------------------------------------------- + +* Create a new branch in your fork, based on the **master** branch + +* Develop and test your modifications there + +* Commit as you like, but preferably in logical chunks. Use meaningful commit + messages and make sure you do not commit unnecessary files (e.g. object + files). It is normally a good idea to reference the issue number from the + commit message so the issues will get updated automatically with comments. + +* If the modifications change any documented behavior or add new features, + document the changes and provide application tests in the `test` folder. + All documentation should be written in American English (AE). + +* When done, run the complete test suite (`make run-tests`) and make sure all tests pass. + +* When finished, push the changes to your GitHub repository and send a pull + request from your fork to the ArangoDB repository. Please make sure to select + the appropriate branches there. This will most likely be **master**. + +* You must use the Apache License for your changes and have signed our + [CLA](https://www.arangodb.com/documents/cla.pdf). We cannot accept pull requests + from contributors that didn't sign the CLA. + +* Please let us know if you plan to work on a ticket. This way we can make sure + redundant work is avoided. + + +Additional Resources +-------------------- + +* [ArangoDB website](https://www.arangodb.com/) + +* [ArangoDB on Twitter](https://twitter.com/arangodb) + +* [General GitHub documentation](https://help.github.com/) + +* [GitHub pull request documentation](https://help.github.com/send-pull-requests/) + diff --git a/deps/github.com/arangodb/go-driver/LICENSE b/deps/github.com/arangodb/go-driver/LICENSE new file mode 100644 index 000000000..b8ff39b5a --- /dev/null +++ b/deps/github.com/arangodb/go-driver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 ArangoDB GmbH + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/deps/github.com/arangodb/go-driver/Makefile b/deps/github.com/arangodb/go-driver/Makefile new file mode 100644 index 000000000..d42e0aa3b --- /dev/null +++ b/deps/github.com/arangodb/go-driver/Makefile @@ -0,0 +1,362 @@ +PROJECT := go-driver +SCRIPTDIR := $(shell pwd) +ROOTDIR := $(shell cd $(SCRIPTDIR) && pwd) + +GOBUILDDIR := $(SCRIPTDIR)/.gobuild +GOVERSION := 1.9.2-alpine +TMPDIR := $(GOBUILDDIR) + +ifndef ARANGODB + ARANGODB := arangodb/arangodb:latest +endif +ifndef STARTER + STARTER := arangodb/arangodb-starter:latest +endif + +ifndef TESTOPTIONS + TESTOPTIONS := +endif +ifdef VERBOSE + TESTVERBOSEOPTIONS := -v +endif + +ORGPATH := github.com/arangodb +ORGDIR := $(GOBUILDDIR)/src/$(ORGPATH) +REPONAME := $(PROJECT) +REPODIR := $(ORGDIR)/$(REPONAME) +REPOPATH := $(ORGPATH)/$(REPONAME) + +SOURCES := $(shell find . -name '*.go') + +# Test variables + +ifndef TESTCONTAINER + TESTCONTAINER := $(PROJECT)-test +endif +ifndef DBCONTAINER + DBCONTAINER := $(TESTCONTAINER)-db +endif + +ifeq ("$(TEST_AUTH)", "none") + ARANGOENV := -e ARANGO_NO_AUTH=1 + TEST_AUTHENTICATION := + TAGS := + TESTS := $(REPOPATH) $(REPOPATH)/test +else ifeq ("$(TEST_AUTH)", "rootpw") + ARANGOENV := -e ARANGO_ROOT_PASSWORD=rootpw + TEST_AUTHENTICATION := basic:root:rootpw + TAGS := -tags auth + TESTS := $(REPOPATH)/test +else ifeq ("$(TEST_AUTH)", "jwt") + ARANGOENV := -e ARANGO_ROOT_PASSWORD=rootpw + TEST_AUTHENTICATION := jwt:root:rootpw + TAGS := -tags auth + TESTS := $(REPOPATH)/test + JWTSECRET := testing + JWTSECRETFILE := $(TMPDIR)/$(TESTCONTAINER)-jwtsecret + ARANGOVOL := -v "$(JWTSECRETFILE):/jwtsecret" + ARANGOARGS := --server.jwt-secret=/jwtsecret +endif + +ifeq ("$(TEST_MODE)", "single") + TEST_NET := container:$(DBCONTAINER) + TEST_ENDPOINTS := http://localhost:8529 +else + TEST_NET := container:$(TESTCONTAINER)-ns + TEST_ENDPOINTS := http://localhost:7001 + TESTS := $(REPOPATH)/test +ifeq ("$(TEST_AUTH)", "rootpw") + CLUSTERENV := JWTSECRET=testing + TEST_AUTHENTICATION := basic:root: +endif +ifeq ("$(TEST_AUTH)", "jwt") + CLUSTERENV := JWTSECRET=testing + TEST_AUTHENTICATION := jwt:root: +endif +ifeq ("$(TEST_SSL)", "auto") + CLUSTERENV := SSL=auto $(CLUSTERENV) + TEST_ENDPOINTS = https://localhost:7001 +endif +endif + +ifeq ("$(TEST_CONNECTION)", "vst") + TESTS := $(REPOPATH)/test +ifndef TEST_CONTENT_TYPE + TEST_CONTENT_TYPE := vpack +endif +endif + +ifeq ("$(TEST_BENCHMARK)", "true") + TAGS := -bench=. -run=notests -cpu=1,2,4 + TESTS := $(REPOPATH)/test +endif + +ifdef TEST_ENDPOINTS_OVERRIDE + TEST_NET := host + TEST_ENDPOINTS := $(TEST_ENDPOINTS_OVERRIDE) +endif + +ifdef ENABLE_VST11 + VST11_SINGLE_TESTS := run-tests-single-vst-1.1 + VST11_RESILIENTSINGLE_TESTS := run-tests-resilientsingle-vst-1.1 + VST11_CLUSTER_TESTS := run-tests-cluster-vst-1.1 +endif + +.PHONY: all build clean run-tests + +all: build + +build: $(GOBUILDDIR) $(SOURCES) + GOPATH=$(GOBUILDDIR) go build -v $(REPOPATH) $(REPOPATH)/http $(REPOPATH)/vst + +clean: + rm -Rf $(GOBUILDDIR) + +$(GOBUILDDIR): + @mkdir -p $(ORGDIR) + @rm -f $(REPODIR) && ln -s ../../../.. $(REPODIR) + GOPATH=$(GOBUILDDIR) go get github.com/arangodb/go-velocypack + +run-tests: run-tests-http run-tests-single run-tests-resilientsingle run-tests-cluster + +# Tests of HTTP package +run-tests-http: $(GOBUILDDIR) + @docker run \ + --rm \ + -v $(ROOTDIR):/usr/code \ + -e GOPATH=/usr/code/.gobuild \ + -w /usr/code/ \ + golang:$(GOVERSION) \ + go test $(TESTOPTIONS) $(REPOPATH)/http + +# Single server tests +run-tests-single: run-tests-single-json run-tests-single-vpack run-tests-single-vst-1.0 $(VST11_SINGLE_TESTS) + +run-tests-single-json: run-tests-single-json-with-auth run-tests-single-json-no-auth + +run-tests-single-vpack: run-tests-single-vpack-with-auth run-tests-single-vpack-no-auth + +run-tests-single-vst-1.0: run-tests-single-vst-1.0-with-auth run-tests-single-vst-1.0-no-auth + +run-tests-single-vst-1.1: run-tests-single-vst-1.1-with-auth run-tests-single-vst-1.1-jwt-auth run-tests-single-vst-1.1-no-auth + +run-tests-single-json-no-auth: + @echo "Single server, HTTP+JSON, no authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="none" TEST_CONTENT_TYPE="json" __run_tests + +run-tests-single-vpack-no-auth: + @echo "Single server, HTTP+Velocypack, no authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="none" TEST_CONTENT_TYPE="vpack" __run_tests + +run-tests-single-vst-1.0-no-auth: + @echo "Single server, Velocystream 1.0, no authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="none" TEST_CONNECTION="vst" TEST_CVERSION="1.0" __run_tests + +run-tests-single-vst-1.1-no-auth: + @echo "Single server, Velocystream 1.1, no authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="none" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +run-tests-single-json-with-auth: + @echo "Single server, HTTP+JSON, with authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="rootpw" TEST_CONTENT_TYPE="json" __run_tests + +run-tests-single-vpack-with-auth: + @echo "Single server, HTTP+Velocypack, with authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="rootpw" TEST_CONTENT_TYPE="vpack" __run_tests + +run-tests-single-vst-1.0-with-auth: + @echo "Single server, Velocystream 1.0, with authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="rootpw" TEST_CONNECTION="vst" TEST_CVERSION="1.0" __run_tests + +run-tests-single-vst-1.1-with-auth: + @echo "Single server, Velocystream 1.1, with authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="rootpw" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +run-tests-single-vst-1.1-jwt-auth: + @echo "Single server, Velocystream 1.1, JWT authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="jwt" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +# ResilientSingle server tests +run-tests-resilientsingle: run-tests-resilientsingle-json run-tests-resilientsingle-vpack run-tests-resilientsingle-vst-1.0 $(VST11_RESILIENTSINGLE_TESTS) + +run-tests-resilientsingle-json: run-tests-resilientsingle-json-with-auth run-tests-resilientsingle-json-no-auth + +run-tests-resilientsingle-vpack: run-tests-resilientsingle-vpack-with-auth run-tests-resilientsingle-vpack-no-auth + +run-tests-resilientsingle-vst-1.0: run-tests-resilientsingle-vst-1.0-with-auth run-tests-resilientsingle-vst-1.0-no-auth + +run-tests-resilientsingle-vst-1.1: run-tests-resilientsingle-vst-1.1-with-auth run-tests-resilientsingle-vst-1.1-jwt-auth run-tests-resilientsingle-vst-1.1-no-auth + +run-tests-resilientsingle-json-no-auth: + @echo "Resilient Single server, HTTP+JSON, no authentication" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="none" TEST_CONTENT_TYPE="json" __run_tests + +run-tests-resilientsingle-vpack-no-auth: + @echo "Resilient Single server, HTTP+Velocypack, no authentication" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="none" TEST_CONTENT_TYPE="vpack" __run_tests + +run-tests-resilientsingle-vst-1.0-no-auth: + @echo "Resilient Single server, Velocystream 1.0, no authentication" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="none" TEST_CONNECTION="vst" TEST_CVERSION="1.0" __run_tests + +run-tests-resilientsingle-vst-1.1-no-auth: + @echo "Resilient Single server, Velocystream 1.1, no authentication" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="none" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +run-tests-resilientsingle-json-with-auth: + @echo "Resilient Single server, HTTP+JSON, with authentication" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="rootpw" TEST_CONTENT_TYPE="json" __run_tests + +run-tests-resilientsingle-vpack-with-auth: + @echo "Resilient Single server, HTTP+Velocypack, with authentication" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="rootpw" TEST_CONTENT_TYPE="vpack" __run_tests + +run-tests-resilientsingle-vst-1.0-with-auth: + @echo "Resilient Single server, Velocystream 1.0, with authentication" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="rootpw" TEST_CONNECTION="vst" TEST_CVERSION="1.0" __run_tests + +run-tests-resilientsingle-vst-1.1-with-auth: + @echo "Resilient Single server, Velocystream 1.1, with authentication" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="rootpw" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +run-tests-resilientsingle-vst-1.1-jwt-auth: + @echo "Resilient Single server, Velocystream 1.1, JWT authentication" + @${MAKE} TEST_MODE="resilientsingle" TEST_AUTH="jwt" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +# Cluster mode tests +run-tests-cluster: run-tests-cluster-json run-tests-cluster-vpack run-tests-cluster-vst-1.0 $(VST11_CLUSTER_TESTS) + +run-tests-cluster-json: run-tests-cluster-json-no-auth run-tests-cluster-json-with-auth run-tests-cluster-json-ssl + +run-tests-cluster-vpack: run-tests-cluster-vpack-no-auth run-tests-cluster-vpack-with-auth run-tests-cluster-vpack-ssl + +run-tests-cluster-vst-1.0: run-tests-cluster-vst-1.0-no-auth run-tests-cluster-vst-1.0-with-auth run-tests-cluster-vst-1.0-ssl + +run-tests-cluster-vst-1.1: run-tests-cluster-vst-1.1-no-auth run-tests-cluster-vst-1.1-with-auth run-tests-cluster-vst-1.1-ssl + +run-tests-cluster-json-no-auth: $(GOBUILDDIR) + @echo "Cluster server, JSON, no authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="none" TEST_CONTENT_TYPE="json" __run_tests + +run-tests-cluster-vpack-no-auth: $(GOBUILDDIR) + @echo "Cluster server, Velocypack, no authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="none" TEST_CONTENT_TYPE="vpack" __run_tests + +run-tests-cluster-vst-1.0-no-auth: $(GOBUILDDIR) + @echo "Cluster server, Velocystream 1.0, no authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="none" TEST_CONNECTION="vst" TEST_CVERSION="1.0" __run_tests + +run-tests-cluster-vst-1.1-no-auth: $(GOBUILDDIR) + @echo "Cluster server, Velocystream 1.1, no authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="none" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +run-tests-cluster-json-with-auth: $(GOBUILDDIR) + @echo "Cluster server, with authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="rootpw" TEST_CONTENT_TYPE="json" __run_tests + +run-tests-cluster-vpack-with-auth: $(GOBUILDDIR) + @echo "Cluster server, Velocypack, with authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="rootpw" TEST_CONTENT_TYPE="vpack" __run_tests + +run-tests-cluster-vst-1.0-with-auth: $(GOBUILDDIR) + @echo "Cluster server, Velocystream 1.0, with authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="rootpw" TEST_CONNECTION="vst" TEST_CVERSION="1.0" __run_tests + +run-tests-cluster-vst-1.1-with-auth: $(GOBUILDDIR) + @echo "Cluster server, Velocystream 1.1, with authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="rootpw" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +run-tests-cluster-json-ssl: $(GOBUILDDIR) + @echo "Cluster server, SSL, with authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="rootpw" TEST_SSL="auto" TEST_CONTENT_TYPE="json" __run_tests + +run-tests-cluster-vpack-ssl: $(GOBUILDDIR) + @echo "Cluster server, Velocypack, SSL, with authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="rootpw" TEST_SSL="auto" TEST_CONTENT_TYPE="vpack" __run_tests + +run-tests-cluster-vst-1.0-ssl: $(GOBUILDDIR) + @echo "Cluster server, Velocystream 1.0, SSL, with authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="rootpw" TEST_SSL="auto" TEST_CONNECTION="vst" TEST_CVERSION="1.0" __run_tests + +run-tests-cluster-vst-1.1-ssl: $(GOBUILDDIR) + @echo "Cluster server, Velocystream 1.1, SSL, with authentication" + @${MAKE} TEST_MODE="cluster" TEST_AUTH="rootpw" TEST_SSL="auto" TEST_CONNECTION="vst" TEST_CVERSION="1.1" __run_tests + +# Internal test tasks +__run_tests: $(GOBUILDDIR) __test_prepare __test_go_test __test_cleanup + +__test_go_test: + docker run \ + --name=$(TESTCONTAINER) \ + --net=$(TEST_NET) \ + -v $(ROOTDIR):/usr/code \ + -e GOPATH=/usr/code/.gobuild \ + -e TEST_ENDPOINTS=$(TEST_ENDPOINTS) \ + -e TEST_AUTHENTICATION=$(TEST_AUTHENTICATION) \ + -e TEST_CONNECTION=$(TEST_CONNECTION) \ + -e TEST_CVERSION=$(TEST_CVERSION) \ + -e TEST_CONTENT_TYPE=$(TEST_CONTENT_TYPE) \ + -w /usr/code/ \ + golang:$(GOVERSION) \ + go test $(TAGS) $(TESTOPTIONS) $(TESTVERBOSEOPTIONS) $(TESTS) + +__test_prepare: +ifdef TEST_ENDPOINTS_OVERRIDE + @-docker rm -f -v $(TESTCONTAINER) &> /dev/null +else +ifdef JWTSECRET + echo "$JWTSECRET" > "${JWTSECRETFILE}" +endif +ifeq ("$(TEST_MODE)", "single") + @-docker rm -f -v $(DBCONTAINER) $(TESTCONTAINER) &> /dev/null + docker run -d --name $(DBCONTAINER) \ + $(ARANGOENV) $(ARANGOVOL) \ + $(ARANGODB) --log.level requests=debug --log.use-microtime true $(ARANGOARGS) +else + @-docker rm -f -v $(TESTCONTAINER) &> /dev/null + @TESTCONTAINER=$(TESTCONTAINER) ARANGODB=$(ARANGODB) STARTER=$(STARTER) STARTERMODE=$(TEST_MODE) TMPDIR=${GOBUILDDIR} $(CLUSTERENV) $(ROOTDIR)/test/cluster.sh start +endif +endif + +__test_cleanup: + @docker rm -f -v $(TESTCONTAINER) &> /dev/null +ifndef TEST_ENDPOINTS_OVERRIDE +ifeq ("$(TEST_MODE)", "single") + @docker rm -f -v $(DBCONTAINER) &> /dev/null +else + @TESTCONTAINER=$(TESTCONTAINER) ARANGODB=$(ARANGODB) STARTER=$(STARTER) STARTERMODE=$(TEST_MODE) $(ROOTDIR)/test/cluster.sh cleanup +endif +endif + @sleep 3 + + +run-tests-cluster-failover: $(GOBUILDDIR) + # Note that we use 127.0.0.1:7001.. as endpoints, so we force using IPv4 + # This is essential since we only block IPv4 ports in the test. + @echo "Cluster server, failover, no authentication" + @TESTCONTAINER=$(TESTCONTAINER) ARANGODB=$(ARANGODB) $(ROOTDIR)/test/cluster.sh start + GOPATH=$(GOBUILDDIR) go get github.com/coreos/go-iptables/iptables + docker run \ + --rm \ + --net=container:$(TESTCONTAINER)-ns \ + --privileged \ + -v $(ROOTDIR):/usr/code \ + -e GOPATH=/usr/code/.gobuild \ + -e TEST_ENDPOINTS=http://127.0.0.1:7001,http://127.0.0.1:7006,http://127.0.0.1:7011 \ + -e TEST_AUTHENTICATION=basic:root: \ + -w /usr/code/ \ + golang:$(GOVERSION) \ + /bin/sh -c 'apk add -U iptables && go test -run ".*Failover.*" -tags failover $(TESTOPTIONS) $(REPOPATH)/test' + @TESTCONTAINER=$(TESTCONTAINER) ARANGODB=$(ARANGODB) $(ROOTDIR)/test/cluster.sh cleanup + +run-tests-cluster-cleanup: + @TESTCONTAINER=$(TESTCONTAINER) ARANGODB=$(ARANGODB) $(ROOTDIR)/test/cluster.sh cleanup + +# Benchmarks +run-benchmarks-single-json-no-auth: + @echo "Benchmarks: Single server, JSON no authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="none" TEST_CONTENT_TYPE="json" TEST_BENCHMARK="true" __run_tests + +run-benchmarks-single-vpack-no-auth: + @echo "Benchmarks: Single server, Velocypack, no authentication" + @${MAKE} TEST_MODE="single" TEST_AUTH="none" TEST_CONTENT_TYPE="vpack" TEST_BENCHMARK="true" __run_tests diff --git a/deps/github.com/arangodb/go-driver/README.md b/deps/github.com/arangodb/go-driver/README.md new file mode 100644 index 000000000..a25ddad95 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/README.md @@ -0,0 +1,416 @@ +# ArangoDB GO Driver. + +[![Build Status](https://travis-ci.org/arangodb/go-driver.svg?branch=master)](https://travis-ci.org/arangodb/go-driver) +[![GoDoc](https://godoc.org/github.com/arangodb/g-driver?status.svg)](http://godoc.org/github.com/arangodb/go-driver) + +API and implementation is considered stable, more protocols (Velocystream) are being added within the existing API. + +This project contains a Go driver for the [ArangoDB database](https://arangodb.com). + +## Supported versions + +- ArangoDB versions 3.1 and up. + - Single server & cluster setups + - With or without authentication +- Go 1.7 and up. + +## Go dependencies + +- None (Additional error libraries are supported). + +## Getting started + +To use the driver, first fetch the sources into your GOPATH. + +```sh +go get github.com/arangodb/go-driver +``` + +Using the driver, you always need to create a `Client`. +The following example shows how to create a `Client` for a single server +running on localhost. + +```go +import ( + "fmt" + + driver "github.com/arangodb/go-driver" + "github.com/arangodb/go-driver/http" +) + +... + +conn, err := http.NewConnection(http.ConnectionConfig{ + Endpoints: []string{"http://localhost:8529"}, +}) +if err != nil { + // Handle error +} +c, err := driver.NewClient(driver.ClientConfig{ + Connection: conn, +}) +if err != nil { + // Handle error +} +``` + +Once you have a `Client` you can access/create databases on the server, +access/create collections, graphs, documents and so on. + +The following example shows how to open an existing collection in an existing database +and create a new document in that collection. + +```go +// Open "examples_books" database +db, err := c.Database(nil, "examples_books") +if err != nil { + // Handle error +} + +// Open "books" collection +col, err := db.Collection(nil, "books") +if err != nil { + // Handle error +} + +// Create document +book := Book{ + Title: "ArangoDB Cookbook", + NoPages: 257, +} +meta, err := col.CreateDocument(nil, book) +if err != nil { + // Handle error +} +fmt.Printf("Created document in collection '%s' in database '%s'\n", col.Name(), db.Name()) +``` + +## API design + +### Concurrency + +All functions of the driver are stricly synchronous. They operate and only return a value (or error) +when they're done. + +If you want to run operations concurrently, use a go routine. All objects in the driver are designed +to be used from multiple concurrent go routines, except `Cursor`. + +All database objects (except `Cursor`) are considered static. After their creation they won't change. +E.g. after creating a `Collection` instance you can remove the collection, but the (Go) instance +will still be there. Calling functions on such a removed collection will of course fail. + +### Structured error handling & wrapping + +All functions of the driver that can fail return an `error` value. If that value is not `nil`, the +function call is considered to be failed. In that case all other return values are set to their `zero` +values. + +All errors are structured using error checking functions named `Is`. +E.g. `IsNotFound(error)` return true if the given error is of the category "not found". +There can be multiple internal error codes that all map onto the same category. + +All errors returned from any function of the driver (either internal or exposed) wrap errors +using the `WithStack` function. This can be used to provide detail stack trackes in case of an error. +All error checking functions use the `Cause` function to get the cause of an error instead of the error wrapper. + +Note that `WithStack` and `Cause` are actually variables to you can implement it using your own error +wrapper library. + +If you for example use https://github.com/pkg/errors, you want to initialize to go driver like this: +```go +import ( + driver "github.com/arangodb/go-driver" + "github.com/pkg/errors" +) + +func init() { + driver.WithStack = errors.WithStack + driver.Cause = errors.Cause +} +``` + +### Context aware + +All functions of the driver that involve some kind of long running operation or +support additional options not given as function arguments, have a `context.Context` argument. +This enables you cancel running requests, pass timeouts/deadlines and pass additional options. + +In all methods that take a `context.Context` argument you can pass `nil` as value. +This is equivalent to passing `context.Background()`. + +Many functions support 1 or more optional (and infrequently used) additional options. +These can be used with a `With` function. +E.g. to force a create document call to wait until the data is synchronized to disk, +use a prepared context like this: +```go +ctx := driver.WithWaitForSync(parentContext) +collection.CreateDocument(ctx, yourDocument) +``` + +### Failover + +The driver supports multiple endpoints to connect to. All request are in principle +send to the same endpoint until that endpoint fails to respond. +In that case a new endpoint is chosen and the operation is retried. + +The following example shows how to connect to a cluster of 3 servers. + +```go +conn, err := http.NewConnection(http.ConnectionConfig{ + Endpoints: []string{"http://server1:8529", "http://server2:8529", "http://server3:8529"}, +}) +if err != nil { + // Handle error +} +c, err := driver.NewClient(driver.ClientConfig{ + Connection: conn, +}) +if err != nil { + // Handle error +} +``` + +Note that a valid endpoint is an URL to either a standalone server, or a URL to a coordinator +in a cluster. + +### Failover: Exact behavior + +The driver monitors the request being send to a specific server (endpoint). +As soon as the request has been completely written, failover will no longer happen. +The reason for that is that several operations cannot be (safely) retried. +E.g. when a request to create a document has been send to a server and a timeout +occurs, the driver has no way of knowing if the server did or did not create +the document in the database. + +If the driver detects that a request has been completely written, but still gets +an error (other than an error response from Arango itself), it will wrap the +error in a `ResponseError`. The client can test for such an error using `IsResponseError`. + +If a client received a `ResponseError`, it can do one of the following: +- Retry the operation and be prepared for some kind of duplicate record / unique constraint violation. +- Perform a test operation to see if the "failed" operation did succeed after all. +- Simply consider the operation failed. This is risky, since it can still be the case that the operation did succeed. + +### Failover: Timeouts + +To control the timeout of any function in the driver, you must pass it a context +configured with `context.WithTimeout` (or `context.WithDeadline`). + +In the case of multiple endpoints, the actual timeout used for requests will be shorter than +the timeout given in the context. +The driver will divide the timeout by the number of endpoints with a maximum of 3. +This ensures that the driver can try up to 3 different endpoints (in case of failover) without +being canceled due to the timeout given by the client. +E.g. +- With 1 endpoint and a given timeout of 1 minute, the actual request timeout will be 1 minute. +- With 3 endpoints and a given timeout of 1 minute, the actual request timeout will be 20 seconds. +- With 8 endpoints and a given timeout of 1 minute, the actual request timeout will be 20 seconds. + +For most requests you want a actual request timeout of at least 30 seconds. + +### Secure connections (SSL) + +The driver supports endpoints that use SSL using the `https` URL scheme. + +The following example shows how to connect to a server that has a secure endpoint using +a self-signed certificate. + +```go +conn, err := http.NewConnection(http.ConnectionConfig{ + Endpoints: []string{"https://localhost:8529"}, + TLSConfig: &tls.Config{InsecureSkipVerify: true}, +}) +if err != nil { + // Handle error +} +c, err := driver.NewClient(driver.ClientConfig{ + Connection: conn, +}) +if err != nil { + // Handle error +} +``` + +# Sample requests + +## Connecting to ArangoDB + +```go +conn, err := http.NewConnection(http.ConnectionConfig{ + Endpoints: []string{"http://localhost:8529"}, + TLSConfig: &tls.Config{ /*...*/ }, +}) +if err != nil { + // Handle error +} +c, err := driver.NewClient(driver.ClientConfig{ + Connection: conn, + Authentication: driver.BasicAuthentication("user", "password"), +}) +if err != nil { + // Handle error +} +``` + +## Opening a database + +```go +ctx := context.Background() +db, err := client.Database(ctx, "myDB") +if err != nil { + // handle error +} +``` + +## Opening a collection + +```go +ctx := context.Background() +col, err := db.Collection(ctx, "myCollection") +if err != nil { + // handle error +} +``` + +## Checking if a collection exists + +```go +ctx := context.Background() +found, err := db.CollectionExists(ctx, "myCollection") +if err != nil { + // handle error +} +``` + +## Creating a collection + +```go +ctx := context.Background() +options := &driver.CreateCollectionOptions{ /* ... */ } +col, err := db.CreateCollection(ctx, "myCollection", options) +if err != nil { + // handle error +} +``` + +## Reading a document from a collection + +```go +var doc MyDocument +ctx := context.Background() +meta, err := col.ReadDocument(ctx, myDocumentKey, &doc) +if err != nil { + // handle error +} +``` + +## Reading a document from a collection with an explicit revision + +```go +var doc MyDocument +revCtx := driver.WithRevision(ctx, "mySpecificRevision") +meta, err := col.ReadDocument(revCtx, myDocumentKey, &doc) +if err != nil { + // handle error +} +``` + +## Creating a document + +```go +doc := MyDocument{ + Name: "jan", + Counter: 23, +} +ctx := context.Background() +meta, err := col.CreateDocument(ctx, doc) +if err != nil { + // handle error +} +fmt.Printf("Created document with key '%s', revision '%s'\n", meta.Key, meta.Rev) +``` + +## Removing a document + +```go +ctx := context.Background() +err := col.RemoveDocument(revCtx, myDocumentKey) +if err != nil { + // handle error +} +``` + +## Removing a document with an explicit revision + +```go +revCtx := driver.WithRevision(ctx, "mySpecificRevision") +err := col.RemoveDocument(revCtx, myDocumentKey) +if err != nil { + // handle error +} +``` + +## Updating a document + +```go +ctx := context.Background() +patch := map[string]interface{}{ + "Name": "Frank", +} +meta, err := col.UpdateDocument(ctx, myDocumentKey, patch) +if err != nil { + // handle error +} +``` + +## Querying documents, one document at a time + +```go +ctx := context.Background() +query := "FOR d IN myCollection LIMIT 10 RETURN d" +cursor, err := db.Query(ctx, query, nil) +if err != nil { + // handle error +} +defer cursor.Close() +for { + var doc MyDocument + meta, err := cursor.ReadDocument(ctx, &doc) + if driver.IsNoMoreDocuments(err) { + break + } else if err != nil { + // handle other errors + } + fmt.Printf("Got doc with key '%s' from query\n", meta.Key) +} +``` + +## Querying documents, fetching total count + +```go +ctx := driver.WithQueryCount(context.Background()) +query := "FOR d IN myCollection RETURN d" +cursor, err := db.Query(ctx, query, nil) +if err != nil { + // handle error +} +defer cursor.Close() +fmt.Printf("Query yields %d documents\n", cursor.Count()) +``` + +## Querying documents, with bind variables + +```go +ctx := context.Background() +query := "FOR d IN myCollection FILTER d.Name == @name RETURN d" +bindVars := map[string]interface{}{ + "name": "Some name", +} +cursor, err := db.Query(ctx, query, bindVars) +if err != nil { + // handle error +} +defer cursor.Close() +... +``` + diff --git a/deps/github.com/arangodb/go-driver/VERSION b/deps/github.com/arangodb/go-driver/VERSION new file mode 100644 index 000000000..81b7d49d9 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/VERSION @@ -0,0 +1 @@ +0.9.0+git \ No newline at end of file diff --git a/deps/github.com/arangodb/go-driver/authentication.go b/deps/github.com/arangodb/go-driver/authentication.go new file mode 100644 index 000000000..44c379b11 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/authentication.go @@ -0,0 +1,114 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +type AuthenticationType int + +const ( + // AuthenticationTypeBasic uses username+password basic authentication + AuthenticationTypeBasic AuthenticationType = iota + // AuthenticationTypeJWT uses username+password JWT token based authentication + AuthenticationTypeJWT + // AuthenticationTypeRaw uses a raw value for the Authorization header + AuthenticationTypeRaw +) + +// Authentication implements a kind of authentication. +type Authentication interface { + // Returns the type of authentication + Type() AuthenticationType + // Get returns a configuration property of the authentication. + // Supported properties depend on type of authentication. + Get(property string) string +} + +// BasicAuthentication creates an authentication implementation based on the given username & password. +func BasicAuthentication(userName, password string) Authentication { + return &userNameAuthentication{ + authType: AuthenticationTypeBasic, + userName: userName, + password: password, + } +} + +// JWTAuthentication creates a JWT token authentication implementation based on the given username & password. +func JWTAuthentication(userName, password string) Authentication { + return &userNameAuthentication{ + authType: AuthenticationTypeJWT, + userName: userName, + password: password, + } +} + +// basicAuthentication implements HTTP Basic authentication. +type userNameAuthentication struct { + authType AuthenticationType + userName string + password string +} + +// Returns the type of authentication +func (a *userNameAuthentication) Type() AuthenticationType { + return a.authType +} + +// Get returns a configuration property of the authentication. +// Supported properties depend on type of authentication. +func (a *userNameAuthentication) Get(property string) string { + switch property { + case "username": + return a.userName + case "password": + return a.password + default: + return "" + } +} + +// RawAuthentication creates a raw authentication implementation based on the given value for the Authorization header. +func RawAuthentication(value string) Authentication { + return &rawAuthentication{ + value: value, + } +} + +// rawAuthentication implements Raw authentication. +type rawAuthentication struct { + value string +} + +// Returns the type of authentication +func (a *rawAuthentication) Type() AuthenticationType { + return AuthenticationTypeRaw +} + +// Get returns a configuration property of the authentication. +// Supported properties depend on type of authentication. +func (a *rawAuthentication) Get(property string) string { + switch property { + case "value": + return a.value + default: + return "" + } +} diff --git a/deps/github.com/arangodb/go-driver/client.go b/deps/github.com/arangodb/go-driver/client.go new file mode 100644 index 000000000..872abca30 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/client.go @@ -0,0 +1,104 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "fmt" + "sort" + "strings" + "time" +) + +// Client provides access to a single arangodb database server, or an entire cluster of arangodb servers. +type Client interface { + // SynchronizeEndpoints fetches all endpoints from an ArangoDB cluster and updates the + // connection to use those endpoints. + // When this client is connected to a single server, nothing happens. + // When this client is connected to a cluster of servers, the connection will be updated to reflect + // the layout of the cluster. + // This function requires ArangoDB 3.1.15 or up. + SynchronizeEndpoints(ctx context.Context) error + + // Connection returns the connection used by this client + Connection() Connection + + // Database functions + ClientDatabases + + // User functions + ClientUsers + + // Cluster functions + ClientCluster + + // Individual server information functions + ClientServerInfo + + // Server/cluster administration functions + ClientServerAdmin + + // Replication functions + ClientReplication +} + +// ClientConfig contains all settings needed to create a client. +type ClientConfig struct { + // Connection is the actual server/cluster connection. + // See http.NewConnection. + Connection Connection + // Authentication implements authentication on the server. + Authentication Authentication + // SynchronizeEndpointsInterval is the interval between automatisch synchronization of endpoints. + // If this value is 0, no automatic synchronization is performed. + // If this value is > 0, automatic synchronization is started on a go routine. + // This feature requires ArangoDB 3.1.15 or up. + SynchronizeEndpointsInterval time.Duration +} + +// VersionInfo describes the version of a database server. +type VersionInfo struct { + // This will always contain "arango" + Server string `json:"server,omitempty"` + // The server version string. The string has the format "major.minor.sub". + // Major and minor will be numeric, and sub may contain a number or a textual version. + Version Version `json:"version,omitempty"` + // Type of license of the server + License string `json:"license,omitempty"` + // Optional additional details. This is returned only if the context is configured using WithDetails. + Details map[string]interface{} `json:"details,omitempty"` +} + +// String creates a string representation of the given VersionInfo. +func (v VersionInfo) String() string { + result := fmt.Sprintf("%s, version %s, license %s", v.Server, v.Version, v.License) + if len(v.Details) > 0 { + lines := make([]string, 0, len(v.Details)) + for k, v := range v.Details { + lines = append(lines, fmt.Sprintf("%s: %v", k, v)) + } + sort.Strings(lines) + result = result + "\n" + strings.Join(lines, "\n") + } + return result +} diff --git a/deps/github.com/arangodb/go-driver/client_cluster.go b/deps/github.com/arangodb/go-driver/client_cluster.go new file mode 100644 index 000000000..a6c879d2c --- /dev/null +++ b/deps/github.com/arangodb/go-driver/client_cluster.go @@ -0,0 +1,33 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// ClientCluster provides methods needed to access cluster functionality from a client. +type ClientCluster interface { + // Cluster provides access to cluster wide specific operations. + // To use this interface, an ArangoDB cluster is required. + // If this method is a called without a cluster, a PreconditionFailed error is returned. + Cluster(ctx context.Context) (Cluster, error) +} diff --git a/deps/github.com/arangodb/go-driver/client_cluster_impl.go b/deps/github.com/arangodb/go-driver/client_cluster_impl.go new file mode 100644 index 000000000..5ac0ee62a --- /dev/null +++ b/deps/github.com/arangodb/go-driver/client_cluster_impl.go @@ -0,0 +1,46 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" +) + +// Cluster provides access to cluster wide specific operations. +// To use this interface, an ArangoDB cluster is required. +// If this method is a called without a cluster, a PreconditionFailed error is returned. +func (c *client) Cluster(ctx context.Context) (Cluster, error) { + role, err := c.ServerRole(ctx) + if err != nil { + return nil, WithStack(err) + } + if role == ServerRoleSingle || role == ServerRoleSingleActive || role == ServerRoleSinglePassive { + // Standalone server, this is wrong + return nil, WithStack(newArangoError(412, 0, "Cluster expected, found SINGLE server")) + } + cl, err := newCluster(c.conn) + if err != nil { + return nil, WithStack(err) + } + return cl, nil +} diff --git a/deps/github.com/arangodb/go-driver/client_databases.go b/deps/github.com/arangodb/go-driver/client_databases.go new file mode 100644 index 000000000..4da0b59e1 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/client_databases.go @@ -0,0 +1,65 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// ClientDatabases provides access to the databases in a single arangodb database server, or an entire cluster of arangodb servers. +type ClientDatabases interface { + // Database opens a connection to an existing database. + // If no database with given name exists, an NotFoundError is returned. + Database(ctx context.Context, name string) (Database, error) + + // DatabaseExists returns true if a database with given name exists. + DatabaseExists(ctx context.Context, name string) (bool, error) + + // Databases returns a list of all databases found by the client. + Databases(ctx context.Context) ([]Database, error) + + // AccessibleDatabases returns a list of all databases that can be accessed by the authenticated user. + AccessibleDatabases(ctx context.Context) ([]Database, error) + + // CreateDatabase creates a new database with given name and opens a connection to it. + // If the a database with given name already exists, a DuplicateError is returned. + CreateDatabase(ctx context.Context, name string, options *CreateDatabaseOptions) (Database, error) +} + +// CreateDatabaseOptions contains options that customize the creating of a database. +type CreateDatabaseOptions struct { + // List of users to initially create for the new database. User information will not be changed for users that already exist. + // If users is not specified or does not contain any users, a default user root will be created with an empty string password. + // This ensures that the new database will be accessible after it is created. + Users []CreateDatabaseUserOptions `json:"users,omitempty"` +} + +// CreateDatabaseUserOptions contains options for creating a single user for a database. +type CreateDatabaseUserOptions struct { + // Loginname of the user to be created + UserName string `json:"user,omitempty"` + // The user password as a string. If not specified, it will default to an empty string. + Password string `json:"passwd,omitempty"` + // A flag indicating whether the user account should be activated or not. The default value is true. If set to false, the user won't be able to log into the database. + Active *bool `json:"active,omitempty"` + // A JSON object with extra user information. The data contained in extra will be stored for the user but not be interpreted further by ArangoDB. + Extra interface{} `json:"extra,omitempty"` +} diff --git a/deps/github.com/arangodb/go-driver/client_databases_impl.go b/deps/github.com/arangodb/go-driver/client_databases_impl.go new file mode 100644 index 000000000..4bcde2bef --- /dev/null +++ b/deps/github.com/arangodb/go-driver/client_databases_impl.go @@ -0,0 +1,153 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +// Database opens a connection to an existing database. +// If no database with given name exists, an NotFoundError is returned. +func (c *client) Database(ctx context.Context, name string) (Database, error) { + escapedName := pathEscape(name) + req, err := c.conn.NewRequest("GET", path.Join("_db", escapedName, "_api/database/current")) + if err != nil { + return nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + db, err := newDatabase(name, c.conn) + if err != nil { + return nil, WithStack(err) + } + return db, nil +} + +// DatabaseExists returns true if a database with given name exists. +func (c *client) DatabaseExists(ctx context.Context, name string) (bool, error) { + escapedName := pathEscape(name) + req, err := c.conn.NewRequest("GET", path.Join("_db", escapedName, "_api/database/current")) + if err != nil { + return false, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return false, WithStack(err) + } + if err := resp.CheckStatus(200); err == nil { + return true, nil + } else if IsNotFound(err) { + return false, nil + } else { + return false, WithStack(err) + } +} + +type getDatabaseResponse struct { + Result []string `json:"result,omitempty"` +} + +// Databases returns a list of all databases found by the client. +func (c *client) Databases(ctx context.Context) ([]Database, error) { + result, err := listDatabases(ctx, c.conn, path.Join("/_db/_system/_api/database")) + if err != nil { + return nil, WithStack(err) + } + return result, nil +} + +// AccessibleDatabases returns a list of all databases that can be accessed by the authenticated user. +func (c *client) AccessibleDatabases(ctx context.Context) ([]Database, error) { + result, err := listDatabases(ctx, c.conn, path.Join("/_db/_system/_api/database/user")) + if err != nil { + return nil, WithStack(err) + } + return result, nil +} + +// listDatabases returns a list of databases using a GET to the given path. +func listDatabases(ctx context.Context, conn Connection, path string) ([]Database, error) { + req, err := conn.NewRequest("GET", path) + if err != nil { + return nil, WithStack(err) + } + resp, err := conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data getDatabaseResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + result := make([]Database, 0, len(data.Result)) + for _, name := range data.Result { + db, err := newDatabase(name, conn) + if err != nil { + return nil, WithStack(err) + } + result = append(result, db) + } + return result, nil +} + +// CreateDatabase creates a new database with given name and opens a connection to it. +// If the a database with given name already exists, a DuplicateError is returned. +func (c *client) CreateDatabase(ctx context.Context, name string, options *CreateDatabaseOptions) (Database, error) { + input := struct { + CreateDatabaseOptions + Name string `json:"name"` + }{ + Name: name, + } + if options != nil { + input.CreateDatabaseOptions = *options + } + req, err := c.conn.NewRequest("POST", path.Join("_db/_system/_api/database")) + if err != nil { + return nil, WithStack(err) + } + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(201); err != nil { + return nil, WithStack(err) + } + db, err := newDatabase(name, c.conn) + if err != nil { + return nil, WithStack(err) + } + return db, nil +} diff --git a/deps/github.com/arangodb/go-driver/client_impl.go b/deps/github.com/arangodb/go-driver/client_impl.go new file mode 100644 index 000000000..71c9b6fda --- /dev/null +++ b/deps/github.com/arangodb/go-driver/client_impl.go @@ -0,0 +1,135 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "time" + + "github.com/arangodb/go-driver/util" +) + +// NewClient creates a new Client based on the given config setting. +func NewClient(config ClientConfig) (Client, error) { + if config.Connection == nil { + return nil, WithStack(InvalidArgumentError{Message: "Connection is not set"}) + } + conn := config.Connection + if config.Authentication != nil { + var err error + conn, err = conn.SetAuthentication(config.Authentication) + if err != nil { + return nil, WithStack(err) + } + } + c := &client{ + conn: conn, + } + if config.SynchronizeEndpointsInterval > 0 { + go c.autoSynchronizeEndpoints(config.SynchronizeEndpointsInterval) + } + return c, nil +} + +// client implements the Client interface. +type client struct { + conn Connection +} + +// Connection returns the connection used by this client +func (c *client) Connection() Connection { + return c.conn +} + +// SynchronizeEndpoints fetches all endpoints from an ArangoDB cluster and updates the +// connection to use those endpoints. +// When this client is connected to a single server, nothing happens. +// When this client is connected to a cluster of servers, the connection will be updated to reflect +// the layout of the cluster. +func (c *client) SynchronizeEndpoints(ctx context.Context) error { + role, err := c.ServerRole(ctx) + if err != nil { + return WithStack(err) + } + if role == ServerRoleSingle { + // Standalone server, do nothing + return nil + } + + // Cluster mode, fetch endpoints + cep, err := c.clusterEndpoints(ctx) + if err != nil { + return WithStack(err) + } + var endpoints []string + for _, ep := range cep.Endpoints { + endpoints = append(endpoints, util.FixupEndpointURLScheme(ep.Endpoint)) + } + + // Update connection + if err := c.conn.UpdateEndpoints(endpoints); err != nil { + return WithStack(err) + } + + return nil +} + +// autoSynchronizeEndpoints performs automatic endpoint synchronization. +func (c *client) autoSynchronizeEndpoints(interval time.Duration) { + for { + // SynchronizeEndpoints endpoints + c.SynchronizeEndpoints(nil) + + // Wait a bit + time.Sleep(interval) + } +} + +type clusterEndpointsResponse struct { + Endpoints []clusterEndpoint `json:"endpoints,omitempty"` +} + +type clusterEndpoint struct { + Endpoint string `json:"endpoint,omitempty"` +} + +// clusterEndpoints returns the endpoints of a cluster. +func (c *client) clusterEndpoints(ctx context.Context) (clusterEndpointsResponse, error) { + req, err := c.conn.NewRequest("GET", "_api/cluster/endpoints") + if err != nil { + return clusterEndpointsResponse{}, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return clusterEndpointsResponse{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return clusterEndpointsResponse{}, WithStack(err) + } + var data clusterEndpointsResponse + if err := resp.ParseBody("", &data); err != nil { + return clusterEndpointsResponse{}, WithStack(err) + } + return data, nil +} diff --git a/deps/github.com/arangodb/go-driver/client_replication.go b/deps/github.com/arangodb/go-driver/client_replication.go new file mode 100644 index 000000000..bf6e7fea5 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/client_replication.go @@ -0,0 +1,29 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +// ClientReplication provides methods needed to access replication functionality from a client. +type ClientReplication interface { + // Replication provides access to replication specific operations. + Replication() Replication +} diff --git a/deps/github.com/arangodb/go-driver/client_replication_impl.go b/deps/github.com/arangodb/go-driver/client_replication_impl.go new file mode 100644 index 000000000..03db2a4ad --- /dev/null +++ b/deps/github.com/arangodb/go-driver/client_replication_impl.go @@ -0,0 +1,28 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +// Replication provides access to replication specific operations. +func (c *client) Replication() Replication { + return c +} diff --git a/deps/github.com/arangodb/go-driver/client_server_admin.go b/deps/github.com/arangodb/go-driver/client_server_admin.go new file mode 100644 index 000000000..415026733 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/client_server_admin.go @@ -0,0 +1,48 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// ClientServerAdmin provides access to server administrations functions of an arangodb database server +// or an entire cluster of arangodb servers. +type ClientServerAdmin interface { + // ServerMode returns the current mode in which the server/cluster is operating. + // This call needs ArangoDB 3.3 and up. + ServerMode(ctx context.Context) (ServerMode, error) + // SetServerMode changes the current mode in which the server/cluster is operating. + // This call needs a client that uses JWT authentication. + // This call needs ArangoDB 3.3 and up. + SetServerMode(ctx context.Context, mode ServerMode) error +} + +type ServerMode string + +const ( + // ServerModeDefault is the normal mode of the database in which read and write requests + // are allowed. + ServerModeDefault ServerMode = "default" + // ServerModeReadOnly is the mode in which all modifications to th database are blocked. + // Behavior is the same as user that has read-only access to all databases & collections. + ServerModeReadOnly ServerMode = "readonly" +) diff --git a/deps/github.com/arangodb/go-driver/client_server_admin_impl.go b/deps/github.com/arangodb/go-driver/client_server_admin_impl.go new file mode 100644 index 000000000..bbeec577e --- /dev/null +++ b/deps/github.com/arangodb/go-driver/client_server_admin_impl.go @@ -0,0 +1,81 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" +) + +type serverModeResponse struct { + Mode ServerMode `json:"mode"` +} + +type serverModeRequest struct { + Mode ServerMode `json:"mode"` +} + +// ServerMode returns the current mode in which the server/cluster is operating. +// This call needs ArangoDB 3.3 and up. +func (c *client) ServerMode(ctx context.Context) (ServerMode, error) { + req, err := c.conn.NewRequest("GET", "_admin/server/mode") + if err != nil { + return "", WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return "", WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return "", WithStack(err) + } + var result serverModeResponse + if err := resp.ParseBody("", &result); err != nil { + return "", WithStack(err) + } + return result.Mode, nil +} + +// SetServerMode changes the current mode in which the server/cluster is operating. +// This call needs a client that uses JWT authentication. +// This call needs ArangoDB 3.3 and up. +func (c *client) SetServerMode(ctx context.Context, mode ServerMode) error { + req, err := c.conn.NewRequest("PUT", "_admin/server/mode") + if err != nil { + return WithStack(err) + } + input := serverModeRequest{ + Mode: mode, + } + req, err = req.SetBody(input) + if err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} diff --git a/deps/github.com/arangodb/go-driver/client_server_info.go b/deps/github.com/arangodb/go-driver/client_server_info.go new file mode 100644 index 000000000..179f89873 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/client_server_info.go @@ -0,0 +1,57 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// ClientServerInfo provides access to information about a single ArangoDB server. +// When your client uses multiple endpoints, it is undefined which server +// will respond to requests of this interface. +type ClientServerInfo interface { + // Version returns version information from the connected database server. + // Use WithDetails to configure a context that will include additional details in the return VersionInfo. + Version(ctx context.Context) (VersionInfo, error) + + // ServerRole returns the role of the server that answers the request. + ServerRole(ctx context.Context) (ServerRole, error) +} + +// ServerRole is the role of an arangod server +type ServerRole string + +const ( + // ServerRoleSingle indicates that the server is a single-server instance + ServerRoleSingle ServerRole = "Single" + // ServerRoleSingleActive indicates that the server is a the leader of a single-server resilient pair + ServerRoleSingleActive ServerRole = "SingleActive" + // ServerRoleSinglePassive indicates that the server is a a follower of a single-server resilient pair + ServerRoleSinglePassive ServerRole = "SinglePassive" + // ServerRoleDBServer indicates that the server is a dbserver within a cluster + ServerRoleDBServer ServerRole = "DBServer" + // ServerRoleCoordinator indicates that the server is a coordinator within a cluster + ServerRoleCoordinator ServerRole = "Coordinator" + // ServerRoleAgent indicates that the server is an agent within a cluster + ServerRoleAgent ServerRole = "Agent" + // ServerRoleUndefined indicates that the role of the server cannot be determined + ServerRoleUndefined ServerRole = "Undefined" +) diff --git a/deps/github.com/arangodb/go-driver/client_server_info_impl.go b/deps/github.com/arangodb/go-driver/client_server_info_impl.go new file mode 100644 index 000000000..611fc86c5 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/client_server_info_impl.go @@ -0,0 +1,125 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" +) + +// Version returns version information from the connected database server. +func (c *client) Version(ctx context.Context) (VersionInfo, error) { + req, err := c.conn.NewRequest("GET", "_api/version") + if err != nil { + return VersionInfo{}, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return VersionInfo{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return VersionInfo{}, WithStack(err) + } + var data VersionInfo + if err := resp.ParseBody("", &data); err != nil { + return VersionInfo{}, WithStack(err) + } + return data, nil +} + +// roleResponse contains the response body of the `/admin/server/role` api. +type roleResponse struct { + // Role of the server within a cluster + Role string `json:"role,omitempty"` + Mode string `json:"mode,omitempty"` +} + +// asServerRole converts the response into a ServerRole +func (r roleResponse) asServerRole(ctx context.Context, c *client) (ServerRole, error) { + switch r.Role { + case "SINGLE": + switch r.Mode { + case "resilient": + if err := c.echo(ctx); IsNoLeader(err) { + return ServerRoleSinglePassive, nil + } else if err != nil { + return ServerRoleUndefined, WithStack(err) + } + return ServerRoleSingleActive, nil + default: + return ServerRoleSingle, nil + } + case "PRIMARY": + return ServerRoleDBServer, nil + case "COORDINATOR": + return ServerRoleCoordinator, nil + case "AGENT": + return ServerRoleAgent, nil + case "UNDEFINED": + return ServerRoleUndefined, nil + default: + return ServerRoleUndefined, nil + } +} + +// ServerRole returns the role of the server that answers the request. +func (c *client) ServerRole(ctx context.Context) (ServerRole, error) { + req, err := c.conn.NewRequest("GET", "_admin/server/role") + if err != nil { + return ServerRoleUndefined, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return ServerRoleUndefined, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return ServerRoleUndefined, WithStack(err) + } + var data roleResponse + if err := resp.ParseBody("", &data); err != nil { + return ServerRoleUndefined, WithStack(err) + } + role, err := data.asServerRole(ctx, c) + if err != nil { + return ServerRoleUndefined, WithStack(err) + } + return role, nil +} + +// clusterEndpoints returns the endpoints of a cluster. +func (c *client) echo(ctx context.Context) error { + req, err := c.conn.NewRequest("GET", "_admin/echo") + if err != nil { + return WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} diff --git a/deps/github.com/arangodb/go-driver/client_users.go b/deps/github.com/arangodb/go-driver/client_users.go new file mode 100644 index 000000000..b891b44ca --- /dev/null +++ b/deps/github.com/arangodb/go-driver/client_users.go @@ -0,0 +1,52 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// ClientUsers provides access to the users in a single arangodb database server, or an entire cluster of arangodb servers. +type ClientUsers interface { + // User opens a connection to an existing user. + // If no user with given name exists, an NotFoundError is returned. + User(ctx context.Context, name string) (User, error) + + // UserExists returns true if a user with given name exists. + UserExists(ctx context.Context, name string) (bool, error) + + // Users returns a list of all users found by the client. + Users(ctx context.Context) ([]User, error) + + // CreateUser creates a new user with given name and opens a connection to it. + // If a user with given name already exists, a Conflict error is returned. + CreateUser(ctx context.Context, name string, options *UserOptions) (User, error) +} + +// UserOptions contains options for creating a new user, updating or replacing a user. +type UserOptions struct { + // The user password as a string. If not specified, it will default to an empty string. + Password string `json:"passwd,omitempty"` + // A flag indicating whether the user account should be activated or not. The default value is true. If set to false, the user won't be able to log into the database. + Active *bool `json:"active,omitempty"` + // A JSON object with extra user information. The data contained in extra will be stored for the user but not be interpreted further by ArangoDB. + Extra interface{} `json:"extra,omitempty"` +} diff --git a/deps/github.com/arangodb/go-driver/client_users_impl.go b/deps/github.com/arangodb/go-driver/client_users_impl.go new file mode 100644 index 000000000..4a67b1913 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/client_users_impl.go @@ -0,0 +1,143 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +// User opens a connection to an existing user. +// If no user with given name exists, an NotFoundError is returned. +func (c *client) User(ctx context.Context, name string) (User, error) { + escapedName := pathEscape(name) + req, err := c.conn.NewRequest("GET", path.Join("_api/user", escapedName)) + if err != nil { + return nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data userData + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + u, err := newUser(data, c.conn) + if err != nil { + return nil, WithStack(err) + } + return u, nil +} + +// UserExists returns true if a database with given name exists. +func (c *client) UserExists(ctx context.Context, name string) (bool, error) { + escapedName := pathEscape(name) + req, err := c.conn.NewRequest("GET", path.Join("_api", "user", escapedName)) + if err != nil { + return false, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return false, WithStack(err) + } + if err := resp.CheckStatus(200); err == nil { + return true, nil + } else if IsNotFound(err) { + return false, nil + } else { + return false, WithStack(err) + } +} + +type listUsersResponse struct { + Result []userData `json:"result,omitempty"` +} + +// Users returns a list of all users found by the client. +func (c *client) Users(ctx context.Context) ([]User, error) { + req, err := c.conn.NewRequest("GET", "/_api/user") + if err != nil { + return nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data listUsersResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + result := make([]User, 0, len(data.Result)) + for _, userData := range data.Result { + u, err := newUser(userData, c.conn) + if err != nil { + return nil, WithStack(err) + } + result = append(result, u) + } + return result, nil +} + +// CreateUser creates a new user with given name and opens a connection to it. +// If a user with given name already exists, a DuplicateError is returned. +func (c *client) CreateUser(ctx context.Context, name string, options *UserOptions) (User, error) { + input := struct { + UserOptions + Name string `json:"user"` + }{ + Name: name, + } + if options != nil { + input.UserOptions = *options + } + req, err := c.conn.NewRequest("POST", path.Join("_api/user")) + if err != nil { + return nil, WithStack(err) + } + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(201); err != nil { + return nil, WithStack(err) + } + var data userData + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + u, err := newUser(data, c.conn) + if err != nil { + return nil, WithStack(err) + } + return u, nil +} diff --git a/deps/github.com/arangodb/go-driver/cluster.go b/deps/github.com/arangodb/go-driver/cluster.go new file mode 100644 index 000000000..5323fdbe4 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/cluster.go @@ -0,0 +1,204 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "time" +) + +// Cluster provides access to cluster wide specific operations. +// To use this interface, an ArangoDB cluster is required. +type Cluster interface { + // Get the cluster configuration & health + Health(ctx context.Context) (ClusterHealth, error) + + // Get the inventory of the cluster containing all collections (with entire details) of a database. + DatabaseInventory(ctx context.Context, db Database) (DatabaseInventory, error) + + // MoveShard moves a single shard of the given collection from server `fromServer` to + // server `toServer`. + MoveShard(ctx context.Context, col Collection, shard ShardID, fromServer, toServer ServerID) error +} + +// ServerID identifies an arangod server in a cluster. +type ServerID string + +// ClusterHealth contains health information for all servers in a cluster. +type ClusterHealth struct { + // Unique identifier of the entire cluster. + // This ID is created when the cluster was first created. + ID string `json:"ClusterId"` + // Health per server + Health map[ServerID]ServerHealth `json:"Health"` +} + +// ServerHealth contains health information of a single server in a cluster. +type ServerHealth struct { + Endpoint string `json:"Endpoint"` + LastHeartbeatAcked time.Time `json:"LastHeartbeatAcked"` + LastHeartbeatSent time.Time `json:"LastHeartbeatSent"` + LastHeartbeatStatus string `json:"LastHeartbeatStatus"` + Role ServerRole `json:"Role"` + ShortName string `json:"ShortName"` + Status ServerStatus `json:"Status"` + CanBeDeleted bool `json:"CanBeDeleted"` + HostID string `json:"Host,omitempty"` +} + +// ServerStatus describes the health status of a server +type ServerStatus string + +const ( + // ServerStatusGood indicates server is in good state + ServerStatusGood ServerStatus = "GOOD" + // ServerStatusBad indicates server has missed 1 heartbeat + ServerStatusBad ServerStatus = "BAD" + // ServerStatusFailed indicates server has been declared failed by the supervision, this happens after about 15s being bad. + ServerStatusFailed ServerStatus = "FAILED" +) + +// DatabaseInventory describes a detailed state of the collections & shards of a specific database within a cluster. +type DatabaseInventory struct { + // Details of all collections + Collections []InventoryCollection `json:"collections,omitempty"` +} + +// IsReady returns true if the IsReady flag of all collections is set. +func (i DatabaseInventory) IsReady() bool { + for _, c := range i.Collections { + if !c.IsReady { + return false + } + } + return true +} + +// PlanVersion returns the plan version of the first collection in the given inventory. +func (i DatabaseInventory) PlanVersion() int64 { + if len(i.Collections) == 0 { + return 0 + } + return i.Collections[0].PlanVersion +} + +// CollectionByName returns the InventoryCollection with given name. +// Return false if not found. +func (i DatabaseInventory) CollectionByName(name string) (InventoryCollection, bool) { + for _, c := range i.Collections { + if c.Parameters.Name == name { + return c, true + } + } + return InventoryCollection{}, false +} + +// InventoryCollection is a single element of a DatabaseInventory, containing all information +// of a specific collection. +type InventoryCollection struct { + Parameters InventoryCollectionParameters `json:"parameters"` + Indexes []InventoryIndex `json:"indexes,omitempty"` + PlanVersion int64 `json:"planVersion,omitempty"` + IsReady bool `json:"isReady,omitempty"` +} + +// IndexByFieldsAndType returns the InventoryIndex with given fields & type. +// Return false if not found. +func (i InventoryCollection) IndexByFieldsAndType(fields []string, indexType string) (InventoryIndex, bool) { + for _, idx := range i.Indexes { + if idx.Type == indexType && idx.FieldsEqual(fields) { + return idx, true + } + } + return InventoryIndex{}, false +} + +// InventoryCollectionParameters contains all configuration parameters of a collection in a database inventory. +type InventoryCollectionParameters struct { + Deleted bool `json:"deleted,omitempty"` + DoCompact bool `json:"doCompact,omitempty"` + ID string `json:"id,omitempty"` + IndexBuckets int `json:"indexBuckets,omitempty"` + Indexes []InventoryIndex `json:"indexes,omitempty"` + IsSmart bool `json:"isSmart,omitempty"` + SmartGraphAttribute string `json:"smartGraphAttribute,omitempty"` + IsSystem bool `json:"isSystem,omitempty"` + IsVolatile bool `json:"isVolatile,omitempty"` + JournalSize int64 `json:"journalSize,omitempty"` + KeyOptions struct { + Type string `json:"type,omitempty"` + AllowUserKeys bool `json:"allowUserKeys,omitempty"` + LastValue int64 `json:"lastValue,omitempty"` + } `json:"keyOptions"` + Name string `json:"name,omitempty"` + NumberOfShards int `json:"numberOfShards,omitempty"` + Path string `json:"path,omitempty"` + PlanID string `json:"planId,omitempty"` + ReplicationFactor int `json:"replicationFactor,omitempty"` + ShardKeys []string `json:"shardKeys,omitempty"` + Shards map[ShardID][]ServerID `json:"shards,omitempty"` + Status CollectionStatus `json:"status,omitempty"` + Type CollectionType `json:"type,omitempty"` + WaitForSync bool `json:"waitForSync,omitempty"` + DistributeShardsLike string `json:"distributeShardsLike,omitempty"` +} + +// ShardID is an internal identifier of a specific shard +type ShardID string + +// InventoryIndex contains all configuration parameters of a single index of a collection in a database inventory. +type InventoryIndex struct { + ID string `json:"id,omitempty"` + Type string `json:"type,omitempty"` + Fields []string `json:"fields,omitempty"` + Unique bool `json:"unique"` + Sparse bool `json:"sparse"` + Deduplicate bool `json:"deduplicate"` + MinLength int `json:"minLength,omitempty"` + GeoJSON bool `json:"geoJson,omitempty"` +} + +// FieldsEqual returns true when the given fields list equals the +// Fields list in the InventoryIndex. +// The order of fields is irrelevant. +func (i InventoryIndex) FieldsEqual(fields []string) bool { + return stringSliceEqualsIgnoreOrder(i.Fields, fields) +} + +// stringSliceEqualsIgnoreOrder returns true when the given lists contain the same elements. +// The order of elements is irrelevant. +func stringSliceEqualsIgnoreOrder(a, b []string) bool { + if len(a) != len(b) { + return false + } + bMap := make(map[string]struct{}) + for _, x := range b { + bMap[x] = struct{}{} + } + for _, x := range a { + if _, found := bMap[x]; !found { + return false + } + } + return true +} diff --git a/deps/github.com/arangodb/go-driver/cluster/cluster.go b/deps/github.com/arangodb/go-driver/cluster/cluster.go new file mode 100644 index 000000000..bb42304b5 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/cluster/cluster.go @@ -0,0 +1,362 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package cluster + +import ( + "context" + "fmt" + "math" + "net/http" + "sort" + "strings" + "sync" + "time" + + driver "github.com/arangodb/go-driver" +) + +const ( + keyFollowLeaderRedirect driver.ContextKey = "arangodb-followLeaderRedirect" +) + +// ConnectionConfig provides all configuration options for a cluster connection. +type ConnectionConfig struct { + // DefaultTimeout is the timeout used by requests that have no timeout set in the given context. + DefaultTimeout time.Duration +} + +// ServerConnectionBuilder specifies a function called by the cluster connection when it +// needs to create an underlying connection to a specific endpoint. +type ServerConnectionBuilder func(endpoint string) (driver.Connection, error) + +// NewConnection creates a new cluster connection to a cluster of servers. +// The given connections are existing connections to each of the servers. +func NewConnection(config ConnectionConfig, connectionBuilder ServerConnectionBuilder, endpoints []string) (driver.Connection, error) { + if connectionBuilder == nil { + return nil, driver.WithStack(driver.InvalidArgumentError{Message: "Must a connection builder"}) + } + if len(endpoints) == 0 { + return nil, driver.WithStack(driver.InvalidArgumentError{Message: "Must provide at least 1 endpoint"}) + } + if config.DefaultTimeout == 0 { + config.DefaultTimeout = defaultTimeout + } + cConn := &clusterConnection{ + connectionBuilder: connectionBuilder, + defaultTimeout: config.DefaultTimeout, + } + // Initialize endpoints + if err := cConn.UpdateEndpoints(endpoints); err != nil { + return nil, driver.WithStack(err) + } + return cConn, nil +} + +const ( + defaultTimeout = 9 * time.Minute + keyEndpoint = "arangodb-endpoint" +) + +type clusterConnection struct { + connectionBuilder ServerConnectionBuilder + servers []driver.Connection + endpoints []string + current int + mutex sync.RWMutex + defaultTimeout time.Duration + auth driver.Authentication +} + +// NewRequest creates a new request with given method and path. +func (c *clusterConnection) NewRequest(method, path string) (driver.Request, error) { + c.mutex.RLock() + servers := c.servers + c.mutex.RUnlock() + + // It is assumed that all servers used the same protocol. + if len(servers) > 0 { + return servers[0].NewRequest(method, path) + } + return nil, driver.WithStack(driver.ArangoError{ + HasError: true, + Code: http.StatusServiceUnavailable, + ErrorMessage: "no servers available", + }) +} + +// Do performs a given request, returning its response. +func (c *clusterConnection) Do(ctx context.Context, req driver.Request) (driver.Response, error) { + followLeaderRedirect := true + if ctx == nil { + ctx = context.Background() + } else { + if v := ctx.Value(keyFollowLeaderRedirect); v != nil { + if on, ok := v.(bool); ok { + followLeaderRedirect = on + } + } + } + // Timeout management. + // We take the given timeout and divide it in 3 so we allow for other servers + // to give it a try if an earlier server fails. + deadline, hasDeadline := ctx.Deadline() + var timeout time.Duration + if hasDeadline { + timeout = deadline.Sub(time.Now()) + } else { + timeout = c.defaultTimeout + } + + serverCount := len(c.servers) + var specificServer driver.Connection + if v := ctx.Value(keyEndpoint); v != nil { + if endpoint, ok := v.(string); ok { + // Specific endpoint specified + serverCount = 1 + var err error + specificServer, err = c.getSpecificServer(endpoint) + if err != nil { + return nil, driver.WithStack(err) + } + } + } + + timeoutDivider := math.Max(1.0, math.Min(3.0, float64(serverCount))) + attempt := 1 + s := specificServer + if s == nil { + s = c.getCurrentServer() + } + for { + // Send request to specific endpoint with a 1/3 timeout (so we get 3 attempts) + serverCtx, cancel := context.WithTimeout(ctx, time.Duration(float64(timeout)/timeoutDivider)) + resp, err := s.Do(serverCtx, req) + cancel() + + isNoLeaderResponse := false + if err == nil && resp.StatusCode() == 503 { + // Service unavailable, parse the body, perhaps this is a "no leader" + // case where we have to failover. + var aerr driver.ArangoError + if perr := resp.ParseBody("", &aerr); perr == nil && aerr.HasError { + if driver.IsNoLeader(aerr) { + isNoLeaderResponse = true + // Save error in case we have no more servers + err = aerr + } + } + + } + if !isNoLeaderResponse || !followLeaderRedirect { + if err == nil { + // We're done + return resp, nil + } + // No success yet + if driver.IsCanceled(err) { + // Request was cancelled, we return directly. + return nil, driver.WithStack(err) + } + // If we've completely written the request, we return the error, + // otherwise we'll failover to a new server. + if req.Written() { + // Request has been written to network, do not failover + if driver.IsArangoError(err) { + // ArangoError, so we got an error response from server. + return nil, driver.WithStack(err) + } + // Not an ArangoError, so it must be some kind of timeout, network ... error. + return nil, driver.WithStack(&driver.ResponseError{Err: err}) + } + } + + // Failed, try next server + attempt++ + if specificServer != nil { + // A specific server was specified, no failover. + return nil, driver.WithStack(err) + } + if attempt > len(c.servers) { + // We've tried all servers. Giving up. + return nil, driver.WithStack(err) + } + s = c.getNextServer() + } +} + +/*func printError(err error, indent string) { + if err == nil { + return + } + fmt.Printf("%sGot %T %+v\n", indent, err, err) + if xerr, ok := err.(*os.SyscallError); ok { + printError(xerr.Err, indent+" ") + } else if xerr, ok := err.(*net.OpError); ok { + printError(xerr.Err, indent+" ") + } else if xerr, ok := err.(*url.Error); ok { + printError(xerr.Err, indent+" ") + } +}*/ + +// Unmarshal unmarshals the given raw object into the given result interface. +func (c *clusterConnection) Unmarshal(data driver.RawObject, result interface{}) error { + c.mutex.RLock() + servers := c.servers + c.mutex.RUnlock() + + if len(servers) > 0 { + if err := c.servers[0].Unmarshal(data, result); err != nil { + return driver.WithStack(err) + } + return nil + } + return driver.WithStack(driver.ArangoError{ + HasError: true, + Code: http.StatusServiceUnavailable, + ErrorMessage: "no servers available", + }) +} + +// Endpoints returns the endpoints used by this connection. +func (c *clusterConnection) Endpoints() []string { + c.mutex.RLock() + defer c.mutex.RUnlock() + + var result []string + for _, s := range c.servers { + result = append(result, s.Endpoints()...) + } + return result +} + +// UpdateEndpoints reconfigures the connection to use the given endpoints. +func (c *clusterConnection) UpdateEndpoints(endpoints []string) error { + if len(endpoints) == 0 { + return driver.WithStack(driver.InvalidArgumentError{Message: "Must provide at least 1 endpoint"}) + } + sort.Strings(endpoints) + if strings.Join(endpoints, ",") == strings.Join(c.endpoints, ",") { + // No changes + return nil + } + + // Create new connections + servers := make([]driver.Connection, 0, len(endpoints)) + for _, ep := range endpoints { + conn, err := c.connectionBuilder(ep) + if err != nil { + return driver.WithStack(err) + } + if c.auth != nil { + conn, err = conn.SetAuthentication(c.auth) + if err != nil { + return driver.WithStack(err) + } + } + servers = append(servers, conn) + } + + // Swap connections + c.mutex.Lock() + defer c.mutex.Unlock() + c.servers = servers + c.endpoints = endpoints + c.current = 0 + + return nil +} + +// Configure the authentication used for this connection. +func (c *clusterConnection) SetAuthentication(auth driver.Authentication) (driver.Connection, error) { + c.mutex.Lock() + defer c.mutex.Unlock() + + // Configure underlying servers + newServerConnections := make([]driver.Connection, len(c.servers)) + for i, s := range c.servers { + authConn, err := s.SetAuthentication(auth) + if err != nil { + return nil, driver.WithStack(err) + } + newServerConnections[i] = authConn + } + + // Save authentication + c.auth = auth + c.servers = newServerConnections + + return c, nil +} + +// Protocols returns all protocols used by this connection. +func (c *clusterConnection) Protocols() driver.ProtocolSet { + c.mutex.RLock() + defer c.mutex.RUnlock() + + var result driver.ProtocolSet + for _, s := range c.servers { + for _, p := range s.Protocols() { + if !result.Contains(p) { + result = append(result, p) + } + } + } + return result +} + +// getCurrentServer returns the currently used server. +func (c *clusterConnection) getCurrentServer() driver.Connection { + c.mutex.RLock() + defer c.mutex.RUnlock() + return c.servers[c.current] +} + +// getSpecificServer returns the server with the given endpoint. +func (c *clusterConnection) getSpecificServer(endpoint string) (driver.Connection, error) { + c.mutex.RLock() + defer c.mutex.RUnlock() + + for _, s := range c.servers { + endpoints := s.Endpoints() + found := false + for _, x := range endpoints { + if x == endpoint { + found = true + break + } + } + if found { + return s, nil + } + } + + return nil, driver.WithStack(driver.InvalidArgumentError{Message: fmt.Sprintf("unknown endpoint: %s", endpoint)}) +} + +// getNextServer changes the currently used server and returns the new server. +func (c *clusterConnection) getNextServer() driver.Connection { + c.mutex.Lock() + defer c.mutex.Unlock() + c.current = (c.current + 1) % len(c.servers) + return c.servers[c.current] +} diff --git a/deps/github.com/arangodb/go-driver/cluster_impl.go b/deps/github.com/arangodb/go-driver/cluster_impl.go new file mode 100644 index 000000000..d7bfef2ed --- /dev/null +++ b/deps/github.com/arangodb/go-driver/cluster_impl.go @@ -0,0 +1,120 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +// newCluster creates a new Cluster implementation. +func newCluster(conn Connection) (Cluster, error) { + if conn == nil { + return nil, WithStack(InvalidArgumentError{Message: "conn is nil"}) + } + return &cluster{ + conn: conn, + }, nil +} + +type cluster struct { + conn Connection +} + +// LoggerState returns the state of the replication logger +func (c *cluster) Health(ctx context.Context) (ClusterHealth, error) { + req, err := c.conn.NewRequest("GET", "_admin/cluster/health") + if err != nil { + return ClusterHealth{}, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return ClusterHealth{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return ClusterHealth{}, WithStack(err) + } + var result ClusterHealth + if err := resp.ParseBody("", &result); err != nil { + return ClusterHealth{}, WithStack(err) + } + return result, nil +} + +// Get the inventory of the cluster containing all collections (with entire details) of a database. +func (c *cluster) DatabaseInventory(ctx context.Context, db Database) (DatabaseInventory, error) { + req, err := c.conn.NewRequest("GET", path.Join("_db", db.Name(), "_api/replication/clusterInventory")) + if err != nil { + return DatabaseInventory{}, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DatabaseInventory{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return DatabaseInventory{}, WithStack(err) + } + var result DatabaseInventory + if err := resp.ParseBody("", &result); err != nil { + return DatabaseInventory{}, WithStack(err) + } + return result, nil +} + +type moveShardRequest struct { + Database string `json:"database"` + Collection string `json:"collection"` + Shard ShardID `json:"shard"` + FromServer ServerID `json:"fromServer"` + ToServer ServerID `json:"toServer"` +} + +// MoveShard moves a single shard of the given collection from server `fromServer` to +// server `toServer`. +func (c *cluster) MoveShard(ctx context.Context, col Collection, shard ShardID, fromServer, toServer ServerID) error { + req, err := c.conn.NewRequest("POST", "_admin/cluster/moveShard") + if err != nil { + return WithStack(err) + } + input := moveShardRequest{ + Database: col.Database().Name(), + Collection: col.Name(), + Shard: shard, + FromServer: fromServer, + ToServer: toServer, + } + if _, err := req.SetBody(input); err != nil { + return WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(202); err != nil { + return WithStack(err) + } + return nil +} diff --git a/deps/github.com/arangodb/go-driver/collection.go b/deps/github.com/arangodb/go-driver/collection.go new file mode 100644 index 000000000..ed0ffa24d --- /dev/null +++ b/deps/github.com/arangodb/go-driver/collection.go @@ -0,0 +1,216 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "time" +) + +// Collection provides access to the information of a single collection, all its documents and all its indexes. +type Collection interface { + // Name returns the name of the collection. + Name() string + + // Database returns the database containing the collection. + Database() Database + + // Status fetches the current status of the collection. + Status(ctx context.Context) (CollectionStatus, error) + + // Count fetches the number of document in the collection. + Count(ctx context.Context) (int64, error) + + // Statistics returns the number of documents and additional statistical information about the collection. + Statistics(ctx context.Context) (CollectionStatistics, error) + + // Revision fetches the revision ID of the collection. + // The revision ID is a server-generated string that clients can use to check whether data + // in a collection has changed since the last revision check. + Revision(ctx context.Context) (string, error) + + // Properties fetches extended information about the collection. + Properties(ctx context.Context) (CollectionProperties, error) + + // SetProperties changes properties of the collection. + SetProperties(ctx context.Context, options SetCollectionPropertiesOptions) error + + // Load the collection into memory. + Load(ctx context.Context) error + + // UnLoad the collection from memory. + Unload(ctx context.Context) error + + // Remove removes the entire collection. + // If the collection does not exist, a NotFoundError is returned. + Remove(ctx context.Context) error + + // Truncate removes all documents from the collection, but leaves the indexes intact. + Truncate(ctx context.Context) error + + // All index functions + CollectionIndexes + + // All document functions + CollectionDocuments +} + +// CollectionInfo contains information about a collection +type CollectionInfo struct { + // The identifier of the collection. + ID string `json:"id,omitempty"` + // The name of the collection. + Name string `json:"name,omitempty"` + // The status of the collection + Status CollectionStatus `json:"status,omitempty"` + // The type of the collection + Type CollectionType `json:"type,omitempty"` + // If true then the collection is a system collection. + IsSystem bool `json:"isSystem,omitempty"` +} + +// CollectionProperties contains extended information about a collection. +type CollectionProperties struct { + CollectionInfo + + // WaitForSync; If true then creating, changing or removing documents will wait until the data has been synchronized to disk. + WaitForSync bool `json:"waitForSync,omitempty"` + // DoCompact specifies whether or not the collection will be compacted. + DoCompact bool `json:"doCompact,omitempty"` + // JournalSize is the maximal size setting for journals / datafiles in bytes. + JournalSize int64 `json:"journalSize,omitempty"` + KeyOptions struct { + // Type specifies the type of the key generator. The currently available generators are traditional and autoincrement. + Type KeyGeneratorType `json:"type,omitempty"` + // AllowUserKeys; if set to true, then it is allowed to supply own key values in the _key attribute of a document. + // If set to false, then the key generator is solely responsible for generating keys and supplying own key values in + // the _key attribute of documents is considered an error. + AllowUserKeys bool `json:"allowUserKeys,omitempty"` + } `json:"keyOptions,omitempty"` + // NumberOfShards is the number of shards of the collection. + // Only available in cluster setup. + NumberOfShards int `json:"numberOfShards,omitempty"` + // ShardKeys contains the names of document attributes that are used to determine the target shard for documents. + // Only available in cluster setup. + ShardKeys []string `json:"shardKeys,omitempty"` + // ReplicationFactor contains how many copies of each shard are kept on different DBServers. + // Only available in cluster setup. + ReplicationFactor int `json:"replicationFactor,omitempty"` +} + +// SetCollectionPropertiesOptions contains data for Collection.SetProperties. +type SetCollectionPropertiesOptions struct { + // If true then creating or changing a document will wait until the data has been synchronized to disk. + WaitForSync *bool `json:"waitForSync,omitempty"` + // The maximal size of a journal or datafile in bytes. The value must be at least 1048576 (1 MB). Note that when changing the journalSize value, it will only have an effect for additional journals or datafiles that are created. Already existing journals or datafiles will not be affected. + JournalSize int64 `json:"journalSize,omitempty"` + // ReplicationFactor contains how many copies of each shard are kept on different DBServers. + // Only available in cluster setup. + ReplicationFactor int `json:"replicationFactor,omitempty"` +} + +// CollectionStatus indicates the status of a collection. +type CollectionStatus int + +const ( + CollectionStatusNewBorn = CollectionStatus(1) + CollectionStatusUnloaded = CollectionStatus(2) + CollectionStatusLoaded = CollectionStatus(3) + CollectionStatusUnloading = CollectionStatus(4) + CollectionStatusDeleted = CollectionStatus(5) + CollectionStatusLoading = CollectionStatus(6) +) + +// CollectionStatistics contains the number of documents and additional statistical information about a collection. +type CollectionStatistics struct { + //The number of documents currently present in the collection. + Count int64 `json:"count,omitempty"` + // The maximal size of a journal or datafile in bytes. + JournalSize int64 `json:"journalSize,omitempty"` + Figures struct { + DataFiles struct { + // The number of datafiles. + Count int64 `json:"count,omitempty"` + // The total filesize of datafiles (in bytes). + FileSize int64 `json:"fileSize,omitempty"` + } `json:"datafiles"` + // The number of markers in the write-ahead log for this collection that have not been transferred to journals or datafiles. + UncollectedLogfileEntries int64 `json:"uncollectedLogfileEntries,omitempty"` + // The number of references to documents in datafiles that JavaScript code currently holds. This information can be used for debugging compaction and unload issues. + DocumentReferences int64 `json:"documentReferences,omitempty"` + CompactionStatus struct { + // The action that was performed when the compaction was last run for the collection. This information can be used for debugging compaction issues. + Message string `json:"message,omitempty"` + // The point in time the compaction for the collection was last executed. This information can be used for debugging compaction issues. + Time time.Time `json:"time,omitempty"` + } `json:"compactionStatus"` + Compactors struct { + // The number of compactor files. + Count int64 `json:"count,omitempty"` + // The total filesize of all compactor files (in bytes). + FileSize int64 `json:"fileSize,omitempty"` + } `json:"compactors"` + Dead struct { + // The number of dead documents. This includes document versions that have been deleted or replaced by a newer version. Documents deleted or replaced that are contained the write-ahead log only are not reported in this figure. + Count int64 `json:"count,omitempty"` + // The total number of deletion markers. Deletion markers only contained in the write-ahead log are not reporting in this figure. + Deletion int64 `json:"deletion,omitempty"` + // The total size in bytes used by all dead documents. + Size int64 `json:"size,omitempty"` + } `json:"dead"` + Indexes struct { + // The total number of indexes defined for the collection, including the pre-defined indexes (e.g. primary index). + Count int64 `json:"count,omitempty"` + // The total memory allocated for indexes in bytes. + Size int64 `json:"size,omitempty"` + } `json:"indexes"` + ReadCache struct { + // The number of revisions of this collection stored in the document revisions cache. + Count int64 `json:"count,omitempty"` + // The memory used for storing the revisions of this collection in the document revisions cache (in bytes). This figure does not include the document data but only mappings from document revision ids to cache entry locations. + Size int64 `json:"size,omitempty"` + } `json:"readcache"` + // An optional string value that contains information about which object type is at the head of the collection's cleanup queue. This information can be used for debugging compaction and unload issues. + WaitingFor string `json:"waitingFor,omitempty"` + Alive struct { + // The number of currently active documents in all datafiles and journals of the collection. Documents that are contained in the write-ahead log only are not reported in this figure. + Count int64 `json:"count,omitempty"` + // The total size in bytes used by all active documents of the collection. Documents that are contained in the write-ahead log only are not reported in this figure. + Size int64 `json:"size,omitempty"` + } `json:"alive"` + // The tick of the last marker that was stored in a journal of the collection. This might be 0 if the collection does not yet have a journal. + LastTick int64 `json:"lastTick,omitempty"` + Journals struct { + // The number of journal files. + Count int64 `json:"count,omitempty"` + // The total filesize of all journal files (in bytes). + FileSize int64 `json:"fileSize,omitempty"` + } `json:"journals"` + Revisions struct { + // The number of revisions of this collection managed by the storage engine. + Count int64 `json:"count,omitempty"` + // The memory used for storing the revisions of this collection in the storage engine (in bytes). This figure does not include the document data but only mappings from document revision ids to storage engine datafile positions. + Size int64 `json:"size,omitempty"` + } `json:"revisions"` + } `json:"figures"` +} diff --git a/deps/github.com/arangodb/go-driver/collection_document_impl.go b/deps/github.com/arangodb/go-driver/collection_document_impl.go new file mode 100644 index 000000000..b9a83eb0a --- /dev/null +++ b/deps/github.com/arangodb/go-driver/collection_document_impl.go @@ -0,0 +1,612 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "fmt" + "path" + "reflect" +) + +// DocumentExists checks if a document with given key exists in the collection. +func (c *collection) DocumentExists(ctx context.Context, key string) (bool, error) { + if err := validateKey(key); err != nil { + return false, WithStack(err) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("HEAD", path.Join(c.relPath("document"), escapedKey)) + if err != nil { + return false, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return false, WithStack(err) + } + found := resp.StatusCode() == 200 + return found, nil +} + +// ReadDocument reads a single document with given key from the collection. +// The document data is stored into result, the document meta data is returned. +// If no document exists with given key, a NotFoundError is returned. +func (c *collection) ReadDocument(ctx context.Context, key string, result interface{}) (DocumentMeta, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, WithStack(err) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("GET", path.Join(c.relPath("document"), escapedKey)) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return DocumentMeta{}, WithStack(err) + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("", &meta); err != nil { + return DocumentMeta{}, WithStack(err) + } + // Parse result + if result != nil { + if err := resp.ParseBody("", result); err != nil { + return meta, WithStack(err) + } + } + return meta, nil +} + +// CreateDocument creates a single document in the collection. +// The document data is loaded from the given document, the document meta data is returned. +// If the document data already contains a `_key` field, this will be used as key of the new document, +// otherwise a unique key is created. +// A ConflictError is returned when a `_key` field contains a duplicate key, other any other field violates an index constraint. +// To return the NEW document, prepare a context with `WithReturnNew`. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +func (c *collection) CreateDocument(ctx context.Context, document interface{}) (DocumentMeta, error) { + if document == nil { + return DocumentMeta{}, WithStack(InvalidArgumentError{Message: "document nil"}) + } + req, err := c.conn.NewRequest("POST", c.relPath("document")) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if _, err := req.SetBody(document); err != nil { + return DocumentMeta{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if err := resp.CheckStatus(cs.okStatus(201, 202)); err != nil { + return DocumentMeta{}, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("", &meta); err != nil { + return DocumentMeta{}, WithStack(err) + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + if err := resp.ParseBody("new", cs.ReturnNew); err != nil { + return meta, WithStack(err) + } + } + return meta, nil +} + +// CreateDocuments creates multiple documents in the collection. +// The document data is loaded from the given documents slice, the documents meta data is returned. +// If a documents element already contains a `_key` field, this will be used as key of the new document, +// otherwise a unique key is created. +// If a documents element contains a `_key` field with a duplicate key, other any other field violates an index constraint, +// a ConflictError is returned in its inded in the errors slice. +// To return the NEW documents, prepare a context with `WithReturnNew`. The data argument passed to `WithReturnNew` must be +// a slice with the same number of entries as the `documents` slice. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +// If the create request itself fails or one of the arguments is invalid, an error is returned. +func (c *collection) CreateDocuments(ctx context.Context, documents interface{}) (DocumentMetaSlice, ErrorSlice, error) { + documentsVal := reflect.ValueOf(documents) + switch documentsVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("documents data must be of kind Array, got %s", documentsVal.Kind())}) + } + documentCount := documentsVal.Len() + req, err := c.conn.NewRequest("POST", c.relPath("document")) + if err != nil { + return nil, nil, WithStack(err) + } + if _, err := req.SetBody(documents); err != nil { + return nil, nil, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, nil, WithStack(err) + } + if err := resp.CheckStatus(cs.okStatus(201, 202)); err != nil { + return nil, nil, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return nil, nil, nil + } + // Parse response array + metas, errs, err := parseResponseArray(resp, documentCount, cs) + if err != nil { + return nil, nil, WithStack(err) + } + return metas, errs, nil +} + +// UpdateDocument updates a single document with given key in the collection. +// The document meta data is returned. +// To return the NEW document, prepare a context with `WithReturnNew`. +// To return the OLD document, prepare a context with `WithReturnOld`. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with given key, a NotFoundError is returned. +func (c *collection) UpdateDocument(ctx context.Context, key string, update interface{}) (DocumentMeta, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, WithStack(err) + } + if update == nil { + return DocumentMeta{}, WithStack(InvalidArgumentError{Message: "update nil"}) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("PATCH", path.Join(c.relPath("document"), escapedKey)) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if _, err := req.SetBody(update); err != nil { + return DocumentMeta{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if err := resp.CheckStatus(cs.okStatus(201, 202)); err != nil { + return DocumentMeta{}, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("", &meta); err != nil { + return DocumentMeta{}, WithStack(err) + } + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + if err := resp.ParseBody("old", cs.ReturnOld); err != nil { + return meta, WithStack(err) + } + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + if err := resp.ParseBody("new", cs.ReturnNew); err != nil { + return meta, WithStack(err) + } + } + return meta, nil +} + +// UpdateDocuments updates multiple document with given keys in the collection. +// The updates are loaded from the given updates slice, the documents meta data are returned. +// To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents. +// To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. +// To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *collection) UpdateDocuments(ctx context.Context, keys []string, updates interface{}) (DocumentMetaSlice, ErrorSlice, error) { + updatesVal := reflect.ValueOf(updates) + switch updatesVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("updates data must be of kind Array, got %s", updatesVal.Kind())}) + } + updateCount := updatesVal.Len() + if keys != nil { + if len(keys) != updateCount { + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("expected %d keys, got %d", updateCount, len(keys))}) + } + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + } + req, err := c.conn.NewRequest("PATCH", c.relPath("document")) + if err != nil { + return nil, nil, WithStack(err) + } + cs := applyContextSettings(ctx, req) + mergeArray, err := createMergeArray(keys, cs.Revisions) + if err != nil { + return nil, nil, WithStack(err) + } + if _, err := req.SetBodyArray(updates, mergeArray); err != nil { + return nil, nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, nil, WithStack(err) + } + if err := resp.CheckStatus(cs.okStatus(201, 202)); err != nil { + return nil, nil, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return nil, nil, nil + } + // Parse response array + metas, errs, err := parseResponseArray(resp, updateCount, cs) + if err != nil { + return nil, nil, WithStack(err) + } + return metas, errs, nil +} + +// ReplaceDocument replaces a single document with given key in the collection with the document given in the document argument. +// The document meta data is returned. +// To return the NEW document, prepare a context with `WithReturnNew`. +// To return the OLD document, prepare a context with `WithReturnOld`. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with given key, a NotFoundError is returned. +func (c *collection) ReplaceDocument(ctx context.Context, key string, document interface{}) (DocumentMeta, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, WithStack(err) + } + if document == nil { + return DocumentMeta{}, WithStack(InvalidArgumentError{Message: "document nil"}) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("PUT", path.Join(c.relPath("document"), escapedKey)) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if _, err := req.SetBody(document); err != nil { + return DocumentMeta{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if err := resp.CheckStatus(cs.okStatus(201, 202)); err != nil { + return DocumentMeta{}, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("", &meta); err != nil { + return DocumentMeta{}, WithStack(err) + } + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + if err := resp.ParseBody("old", cs.ReturnOld); err != nil { + return meta, WithStack(err) + } + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + if err := resp.ParseBody("new", cs.ReturnNew); err != nil { + return meta, WithStack(err) + } + } + return meta, nil +} + +// ReplaceDocuments replaces multiple documents with given keys in the collection with the documents given in the documents argument. +// The replacements are loaded from the given documents slice, the documents meta data are returned. +// To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents. +// To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. +// To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *collection) ReplaceDocuments(ctx context.Context, keys []string, documents interface{}) (DocumentMetaSlice, ErrorSlice, error) { + documentsVal := reflect.ValueOf(documents) + switch documentsVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("documents data must be of kind Array, got %s", documentsVal.Kind())}) + } + documentCount := documentsVal.Len() + if keys != nil { + if len(keys) != documentCount { + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("expected %d keys, got %d", documentCount, len(keys))}) + } + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + } + req, err := c.conn.NewRequest("PUT", c.relPath("document")) + if err != nil { + return nil, nil, WithStack(err) + } + cs := applyContextSettings(ctx, req) + mergeArray, err := createMergeArray(keys, cs.Revisions) + if err != nil { + return nil, nil, WithStack(err) + } + if _, err := req.SetBodyArray(documents, mergeArray); err != nil { + return nil, nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, nil, WithStack(err) + } + if err := resp.CheckStatus(cs.okStatus(201, 202)); err != nil { + return nil, nil, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return nil, nil, nil + } + // Parse response array + metas, errs, err := parseResponseArray(resp, documentCount, cs) + if err != nil { + return nil, nil, WithStack(err) + } + return metas, errs, nil +} + +// RemoveDocument removes a single document with given key from the collection. +// The document meta data is returned. +// To return the OLD document, prepare a context with `WithReturnOld`. +// To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with given key, a NotFoundError is returned. +func (c *collection) RemoveDocument(ctx context.Context, key string) (DocumentMeta, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, WithStack(err) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("DELETE", path.Join(c.relPath("document"), escapedKey)) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if err := resp.CheckStatus(cs.okStatus(200, 202)); err != nil { + return DocumentMeta{}, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("", &meta); err != nil { + return DocumentMeta{}, WithStack(err) + } + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + if err := resp.ParseBody("old", cs.ReturnOld); err != nil { + return meta, WithStack(err) + } + } + return meta, nil +} + +// RemoveDocuments removes multiple documents with given keys from the collection. +// The document meta data are returned. +// To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. +// To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *collection) RemoveDocuments(ctx context.Context, keys []string) (DocumentMetaSlice, ErrorSlice, error) { + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + keyCount := len(keys) + req, err := c.conn.NewRequest("DELETE", c.relPath("document")) + if err != nil { + return nil, nil, WithStack(err) + } + cs := applyContextSettings(ctx, req) + metaArray, err := createMergeArray(keys, cs.Revisions) + if err != nil { + return nil, nil, WithStack(err) + } + if _, err := req.SetBodyArray(metaArray, nil); err != nil { + return nil, nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, nil, WithStack(err) + } + if err := resp.CheckStatus(cs.okStatus(201, 202)); err != nil { + return nil, nil, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return nil, nil, nil + } + // Parse response array + metas, errs, err := parseResponseArray(resp, keyCount, cs) + if err != nil { + return nil, nil, WithStack(err) + } + return metas, errs, nil +} + +// ImportDocuments imports one or more documents into the collection. +// The document data is loaded from the given documents argument, statistics are returned. +// The documents argument can be one of the following: +// - An array of structs: All structs will be imported as individual documents. +// - An array of maps: All maps will be imported as individual documents. +// To wait until all documents have been synced to disk, prepare a context with `WithWaitForSync`. +// To return details about documents that could not be imported, prepare a context with `WithImportDetails`. +func (c *collection) ImportDocuments(ctx context.Context, documents interface{}, options *ImportDocumentOptions) (ImportDocumentStatistics, error) { + documentsVal := reflect.ValueOf(documents) + switch documentsVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return ImportDocumentStatistics{}, WithStack(InvalidArgumentError{Message: fmt.Sprintf("documents data must be of kind Array, got %s", documentsVal.Kind())}) + } + req, err := c.conn.NewRequest("POST", path.Join(c.db.relPath(), "_api/import")) + if err != nil { + return ImportDocumentStatistics{}, WithStack(err) + } + req.SetQuery("collection", c.name) + req.SetQuery("type", "documents") + if options != nil { + if v := options.FromPrefix; v != "" { + req.SetQuery("fromPrefix", v) + } + if v := options.ToPrefix; v != "" { + req.SetQuery("toPrefix", v) + } + if v := options.Overwrite; v { + req.SetQuery("overwrite", "true") + } + if v := options.OnDuplicate; v != "" { + req.SetQuery("onDuplicate", string(v)) + } + if v := options.Complete; v { + req.SetQuery("complete", "true") + } + } + if _, err := req.SetBodyImportArray(documents); err != nil { + return ImportDocumentStatistics{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return ImportDocumentStatistics{}, WithStack(err) + } + if err := resp.CheckStatus(201); err != nil { + return ImportDocumentStatistics{}, WithStack(err) + } + // Parse response + var data ImportDocumentStatistics + if err := resp.ParseBody("", &data); err != nil { + return ImportDocumentStatistics{}, WithStack(err) + } + // Import details (if needed) + if details := cs.ImportDetails; details != nil { + if err := resp.ParseBody("details", details); err != nil { + return ImportDocumentStatistics{}, WithStack(err) + } + } + return data, nil +} + +// createMergeArray returns an array of metadata maps with `_key` and/or `_rev` elements. +func createMergeArray(keys, revs []string) ([]map[string]interface{}, error) { + if keys == nil && revs == nil { + return nil, nil + } + if revs == nil { + mergeArray := make([]map[string]interface{}, len(keys)) + for i, k := range keys { + mergeArray[i] = map[string]interface{}{ + "_key": k, + } + } + return mergeArray, nil + } + if keys == nil { + mergeArray := make([]map[string]interface{}, len(revs)) + for i, r := range revs { + mergeArray[i] = map[string]interface{}{ + "_rev": r, + } + } + return mergeArray, nil + } + if len(keys) != len(revs) { + return nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("#keys must be equal to #revs, got %d, %d", len(keys), len(revs))}) + } + mergeArray := make([]map[string]interface{}, len(keys)) + for i, k := range keys { + mergeArray[i] = map[string]interface{}{ + "_key": k, + "_rev": revs[i], + } + } + return mergeArray, nil + +} + +// parseResponseArray parses an array response in the given response +func parseResponseArray(resp Response, count int, cs contextSettings) (DocumentMetaSlice, ErrorSlice, error) { + resps, err := resp.ParseArrayBody() + if err != nil { + return nil, nil, WithStack(err) + } + metas := make(DocumentMetaSlice, count) + errs := make(ErrorSlice, count) + returnOldVal := reflect.ValueOf(cs.ReturnOld) + returnNewVal := reflect.ValueOf(cs.ReturnNew) + for i := 0; i < count; i++ { + resp := resps[i] + var meta DocumentMeta + if err := resp.CheckStatus(200, 201, 202); err != nil { + errs[i] = err + } else { + if err := resp.ParseBody("", &meta); err != nil { + errs[i] = err + } else { + metas[i] = meta + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + returnOldEntryVal := returnOldVal.Index(i).Addr() + if err := resp.ParseBody("old", returnOldEntryVal.Interface()); err != nil { + errs[i] = err + } + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + returnNewEntryVal := returnNewVal.Index(i).Addr() + if err := resp.ParseBody("new", returnNewEntryVal.Interface()); err != nil { + errs[i] = err + } + } + } + } + } + return metas, errs, nil +} diff --git a/deps/github.com/arangodb/go-driver/collection_documents.go b/deps/github.com/arangodb/go-driver/collection_documents.go new file mode 100644 index 000000000..aef8bc2ef --- /dev/null +++ b/deps/github.com/arangodb/go-driver/collection_documents.go @@ -0,0 +1,167 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// CollectionDocuments provides access to the documents in a single collection. +type CollectionDocuments interface { + // DocumentExists checks if a document with given key exists in the collection. + DocumentExists(ctx context.Context, key string) (bool, error) + + // ReadDocument reads a single document with given key from the collection. + // The document data is stored into result, the document meta data is returned. + // If no document exists with given key, a NotFoundError is returned. + ReadDocument(ctx context.Context, key string, result interface{}) (DocumentMeta, error) + + // CreateDocument creates a single document in the collection. + // The document data is loaded from the given document, the document meta data is returned. + // If the document data already contains a `_key` field, this will be used as key of the new document, + // otherwise a unique key is created. + // A ConflictError is returned when a `_key` field contains a duplicate key, other any other field violates an index constraint. + // To return the NEW document, prepare a context with `WithReturnNew`. + // To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. + CreateDocument(ctx context.Context, document interface{}) (DocumentMeta, error) + + // CreateDocuments creates multiple documents in the collection. + // The document data is loaded from the given documents slice, the documents meta data is returned. + // If a documents element already contains a `_key` field, this will be used as key of the new document, + // otherwise a unique key is created. + // If a documents element contains a `_key` field with a duplicate key, other any other field violates an index constraint, + // a ConflictError is returned in its inded in the errors slice. + // To return the NEW documents, prepare a context with `WithReturnNew`. The data argument passed to `WithReturnNew` must be + // a slice with the same number of entries as the `documents` slice. + // To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. + // If the create request itself fails or one of the arguments is invalid, an error is returned. + CreateDocuments(ctx context.Context, documents interface{}) (DocumentMetaSlice, ErrorSlice, error) + + // UpdateDocument updates a single document with given key in the collection. + // The document meta data is returned. + // To return the NEW document, prepare a context with `WithReturnNew`. + // To return the OLD document, prepare a context with `WithReturnOld`. + // To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. + // If no document exists with given key, a NotFoundError is returned. + UpdateDocument(ctx context.Context, key string, update interface{}) (DocumentMeta, error) + + // UpdateDocuments updates multiple document with given keys in the collection. + // The updates are loaded from the given updates slice, the documents meta data are returned. + // To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents. + // To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. + // To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`. + // If no document exists with a given key, a NotFoundError is returned at its errors index. + // If keys is nil, each element in the updates slice must contain a `_key` field. + UpdateDocuments(ctx context.Context, keys []string, updates interface{}) (DocumentMetaSlice, ErrorSlice, error) + + // ReplaceDocument replaces a single document with given key in the collection with the document given in the document argument. + // The document meta data is returned. + // To return the NEW document, prepare a context with `WithReturnNew`. + // To return the OLD document, prepare a context with `WithReturnOld`. + // To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. + // If no document exists with given key, a NotFoundError is returned. + ReplaceDocument(ctx context.Context, key string, document interface{}) (DocumentMeta, error) + + // ReplaceDocuments replaces multiple documents with given keys in the collection with the documents given in the documents argument. + // The replacements are loaded from the given documents slice, the documents meta data are returned. + // To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents. + // To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. + // To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`. + // If no document exists with a given key, a NotFoundError is returned at its errors index. + // If keys is nil, each element in the documents slice must contain a `_key` field. + ReplaceDocuments(ctx context.Context, keys []string, documents interface{}) (DocumentMetaSlice, ErrorSlice, error) + + // RemoveDocument removes a single document with given key from the collection. + // The document meta data is returned. + // To return the OLD document, prepare a context with `WithReturnOld`. + // To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`. + // If no document exists with given key, a NotFoundError is returned. + RemoveDocument(ctx context.Context, key string) (DocumentMeta, error) + + // RemoveDocuments removes multiple documents with given keys from the collection. + // The document meta data are returned. + // To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. + // To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`. + // If no document exists with a given key, a NotFoundError is returned at its errors index. + RemoveDocuments(ctx context.Context, keys []string) (DocumentMetaSlice, ErrorSlice, error) + + // ImportDocuments imports one or more documents into the collection. + // The document data is loaded from the given documents argument, statistics are returned. + // The documents argument can be one of the following: + // - An array of structs: All structs will be imported as individual documents. + // - An array of maps: All maps will be imported as individual documents. + // To wait until all documents have been synced to disk, prepare a context with `WithWaitForSync`. + // To return details about documents that could not be imported, prepare a context with `WithImportDetails`. + ImportDocuments(ctx context.Context, documents interface{}, options *ImportDocumentOptions) (ImportDocumentStatistics, error) +} + +// ImportDocumentOptions holds optional options that control the import document process. +type ImportDocumentOptions struct { + // FromPrefix is an optional prefix for the values in _from attributes. If specified, the value is automatically + // prepended to each _from input value. This allows specifying just the keys for _from. + FromPrefix string `json:"fromPrefix,omitempty"` + // ToPrefix is an optional prefix for the values in _to attributes. If specified, the value is automatically + // prepended to each _to input value. This allows specifying just the keys for _to. + ToPrefix string `json:"toPrefix,omitempty"` + // Overwrite is a flag that if set, then all data in the collection will be removed prior to the import. + // Note that any existing index definitions will be preseved. + Overwrite bool `json:"overwrite,omitempty"` + // OnDuplicate controls what action is carried out in case of a unique key constraint violation. + // Possible values are: + // - ImportOnDuplicateError + // - ImportOnDuplicateUpdate + // - ImportOnDuplicateReplace + // - ImportOnDuplicateIgnore + OnDuplicate ImportOnDuplicate `json:"onDuplicate,omitempty"` + // Complete is a flag that if set, will make the whole import fail if any error occurs. + // Otherwise the import will continue even if some documents cannot be imported. + Complete bool `json:"complete,omitempty"` +} + +// ImportOnDuplicate is a type to control what action is carried out in case of a unique key constraint violation. +type ImportOnDuplicate string + +const ( + // ImportOnDuplicateError will not import the current document because of the unique key constraint violation. + // This is the default setting. + ImportOnDuplicateError = ImportOnDuplicate("error") + // ImportOnDuplicateUpdate will update an existing document in the database with the data specified in the request. + // Attributes of the existing document that are not present in the request will be preseved. + ImportOnDuplicateUpdate = ImportOnDuplicate("update") + // ImportOnDuplicateReplace will replace an existing document in the database with the data specified in the request. + ImportOnDuplicateReplace = ImportOnDuplicate("replace") + // ImportOnDuplicateIgnore will not update an existing document and simply ignore the error caused by a unique key constraint violation. + ImportOnDuplicateIgnore = ImportOnDuplicate("ignore") +) + +// ImportDocumentStatistics holds statistics of an import action. +type ImportDocumentStatistics struct { + // Created holds the number of documents imported. + Created int64 `json:"created,omitempty"` + // Errors holds the number of documents that were not imported due to an error. + Errors int64 `json:"errors,omitempty"` + // Empty holds the number of empty lines found in the input (will only contain a value greater zero for types documents or auto). + Empty int64 `json:"empty,omitempty"` + // Updated holds the number of updated/replaced documents (in case onDuplicate was set to either update or replace). + Updated int64 `json:"updated,omitempty"` + // Ignored holds the number of failed but ignored insert operations (in case onDuplicate was set to ignore). + Ignored int64 `json:"ignored,omitempty"` +} diff --git a/deps/github.com/arangodb/go-driver/collection_impl.go b/deps/github.com/arangodb/go-driver/collection_impl.go new file mode 100644 index 000000000..00264ca76 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/collection_impl.go @@ -0,0 +1,265 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +// newCollection creates a new Collection implementation. +func newCollection(name string, db *database) (Collection, error) { + if name == "" { + return nil, WithStack(InvalidArgumentError{Message: "name is empty"}) + } + if db == nil { + return nil, WithStack(InvalidArgumentError{Message: "db is nil"}) + } + return &collection{ + name: name, + db: db, + conn: db.conn, + }, nil +} + +type collection struct { + name string + db *database + conn Connection +} + +// relPath creates the relative path to this collection (`_db//_api//`) +func (c *collection) relPath(apiName string) string { + escapedName := pathEscape(c.name) + return path.Join(c.db.relPath(), "_api", apiName, escapedName) +} + +// Name returns the name of the collection. +func (c *collection) Name() string { + return c.name +} + +// Database returns the database containing the collection. +func (c *collection) Database() Database { + return c.db +} + +// Status fetches the current status of the collection. +func (c *collection) Status(ctx context.Context) (CollectionStatus, error) { + req, err := c.conn.NewRequest("GET", c.relPath("collection")) + if err != nil { + return CollectionStatus(0), WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return CollectionStatus(0), WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return CollectionStatus(0), WithStack(err) + } + var data CollectionInfo + if err := resp.ParseBody("", &data); err != nil { + return CollectionStatus(0), WithStack(err) + } + return data.Status, nil +} + +// Count fetches the number of document in the collection. +func (c *collection) Count(ctx context.Context) (int64, error) { + req, err := c.conn.NewRequest("GET", path.Join(c.relPath("collection"), "count")) + if err != nil { + return 0, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return 0, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return 0, WithStack(err) + } + var data struct { + Count int64 `json:"count,omitempty"` + } + if err := resp.ParseBody("", &data); err != nil { + return 0, WithStack(err) + } + return data.Count, nil +} + +// Statistics returns the number of documents and additional statistical information about the collection. +func (c *collection) Statistics(ctx context.Context) (CollectionStatistics, error) { + req, err := c.conn.NewRequest("GET", path.Join(c.relPath("collection"), "figures")) + if err != nil { + return CollectionStatistics{}, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return CollectionStatistics{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return CollectionStatistics{}, WithStack(err) + } + var data CollectionStatistics + if err := resp.ParseBody("", &data); err != nil { + return CollectionStatistics{}, WithStack(err) + } + return data, nil +} + +// Revision fetches the revision ID of the collection. +// The revision ID is a server-generated string that clients can use to check whether data +// in a collection has changed since the last revision check. +func (c *collection) Revision(ctx context.Context) (string, error) { + req, err := c.conn.NewRequest("GET", path.Join(c.relPath("collection"), "revision")) + if err != nil { + return "", WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return "", WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return "", WithStack(err) + } + var data struct { + Revision string `json:"revision,omitempty"` + } + if err := resp.ParseBody("", &data); err != nil { + return "", WithStack(err) + } + return data.Revision, nil +} + +// Properties fetches extended information about the collection. +func (c *collection) Properties(ctx context.Context) (CollectionProperties, error) { + req, err := c.conn.NewRequest("GET", path.Join(c.relPath("collection"), "properties")) + if err != nil { + return CollectionProperties{}, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return CollectionProperties{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return CollectionProperties{}, WithStack(err) + } + var data CollectionProperties + if err := resp.ParseBody("", &data); err != nil { + return CollectionProperties{}, WithStack(err) + } + return data, nil +} + +// SetProperties changes properties of the collection. +func (c *collection) SetProperties(ctx context.Context, options SetCollectionPropertiesOptions) error { + req, err := c.conn.NewRequest("PUT", path.Join(c.relPath("collection"), "properties")) + if err != nil { + return WithStack(err) + } + if _, err := req.SetBody(options); err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +// Load the collection into memory. +func (c *collection) Load(ctx context.Context) error { + req, err := c.conn.NewRequest("PUT", path.Join(c.relPath("collection"), "load")) + if err != nil { + return WithStack(err) + } + opts := struct { + Count bool `json:"count"` + }{ + Count: false, + } + if _, err := req.SetBody(opts); err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +// UnLoad the collection from memory. +func (c *collection) Unload(ctx context.Context) error { + req, err := c.conn.NewRequest("PUT", path.Join(c.relPath("collection"), "unload")) + if err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil + +} + +// Remove removes the entire collection. +// If the collection does not exist, a NotFoundError is returned. +func (c *collection) Remove(ctx context.Context) error { + req, err := c.conn.NewRequest("DELETE", c.relPath("collection")) + if err != nil { + return WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +// Truncate removes all documents from the collection, but leaves the indexes intact. +func (c *collection) Truncate(ctx context.Context) error { + req, err := c.conn.NewRequest("PUT", path.Join(c.relPath("collection"), "truncate")) + if err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} diff --git a/deps/github.com/arangodb/go-driver/collection_indexes.go b/deps/github.com/arangodb/go-driver/collection_indexes.go new file mode 100644 index 000000000..69d02dcd3 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/collection_indexes.go @@ -0,0 +1,115 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// CollectionIndexes provides access to the indexes in a single collection. +type CollectionIndexes interface { + // Index opens a connection to an existing index within the collection. + // If no index with given name exists, an NotFoundError is returned. + Index(ctx context.Context, name string) (Index, error) + + // IndexExists returns true if an index with given name exists within the collection. + IndexExists(ctx context.Context, name string) (bool, error) + + // Indexes returns a list of all indexes in the collection. + Indexes(ctx context.Context) ([]Index, error) + + // EnsureFullTextIndex creates a fulltext index in the collection, if it does not already exist. + // Fields is a slice of attribute names. Currently, the slice is limited to exactly one attribute. + // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). + EnsureFullTextIndex(ctx context.Context, fields []string, options *EnsureFullTextIndexOptions) (Index, bool, error) + + // EnsureGeoIndex creates a hash index in the collection, if it does not already exist. + // Fields is a slice with one or two attribute paths. If it is a slice with one attribute path location, + // then a geo-spatial index on all documents is created using location as path to the coordinates. + // The value of the attribute must be a slice with at least two double values. The slice must contain the latitude (first value) + // and the longitude (second value). All documents, which do not have the attribute path or with value that are not suitable, are ignored. + // If it is a slice with two attribute paths latitude and longitude, then a geo-spatial index on all documents is created + // using latitude and longitude as paths the latitude and the longitude. The value of the attribute latitude and of the + // attribute longitude must a double. All documents, which do not have the attribute paths or which values are not suitable, are ignored. + // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). + EnsureGeoIndex(ctx context.Context, fields []string, options *EnsureGeoIndexOptions) (Index, bool, error) + + // EnsureHashIndex creates a hash index in the collection, if it does not already exist. + // Fields is a slice of attribute paths. + // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). + EnsureHashIndex(ctx context.Context, fields []string, options *EnsureHashIndexOptions) (Index, bool, error) + + // EnsurePersistentIndex creates a persistent index in the collection, if it does not already exist. + // Fields is a slice of attribute paths. + // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). + EnsurePersistentIndex(ctx context.Context, fields []string, options *EnsurePersistentIndexOptions) (Index, bool, error) + + // EnsureSkipListIndex creates a skiplist index in the collection, if it does not already exist. + // Fields is a slice of attribute paths. + // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). + EnsureSkipListIndex(ctx context.Context, fields []string, options *EnsureSkipListIndexOptions) (Index, bool, error) +} + +// EnsureFullTextIndexOptions contains specific options for creating a full text index. +type EnsureFullTextIndexOptions struct { + // MinLength is the minimum character length of words to index. Will default to a server-defined + // value if unspecified (0). It is thus recommended to set this value explicitly when creating the index. + MinLength int +} + +// EnsureGeoIndexOptions contains specific options for creating a geo index. +type EnsureGeoIndexOptions struct { + // If a geo-spatial index on a location is constructed and GeoJSON is true, then the order within the array + // is longitude followed by latitude. This corresponds to the format described in http://geojson.org/geojson-spec.html#positions + GeoJSON bool +} + +// EnsureHashIndexOptions contains specific options for creating a hash index. +type EnsureHashIndexOptions struct { + // If true, then create a unique index. + Unique bool + // If true, then create a sparse index. + Sparse bool + // If true, de-duplication of array-values, before being added to the index, will be turned off. + // This flag requires ArangoDB 3.2. + // Note: this setting is only relevant for indexes with array fields (e.g. "fieldName[*]") + NoDeduplicate bool +} + +// EnsurePersistentIndexOptions contains specific options for creating a persistent index. +type EnsurePersistentIndexOptions struct { + // If true, then create a unique index. + Unique bool + // If true, then create a sparse index. + Sparse bool +} + +// EnsureSkipListIndexOptions contains specific options for creating a skip-list index. +type EnsureSkipListIndexOptions struct { + // If true, then create a unique index. + Unique bool + // If true, then create a sparse index. + Sparse bool + // If true, de-duplication of array-values, before being added to the index, will be turned off. + // This flag requires ArangoDB 3.2. + // Note: this setting is only relevant for indexes with array fields (e.g. "fieldName[*]") + NoDeduplicate bool +} diff --git a/deps/github.com/arangodb/go-driver/collection_indexes_impl.go b/deps/github.com/arangodb/go-driver/collection_indexes_impl.go new file mode 100644 index 000000000..4603b5b41 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/collection_indexes_impl.go @@ -0,0 +1,256 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +type indexData struct { + ID string `json:"id,omitempty"` + Type string `json:"type"` + Fields []string `json:"fields,omitempty"` + Unique *bool `json:"unique,omitempty"` + Deduplicate *bool `json:"deduplicate,omitempty"` + Sparse *bool `json:"sparse,omitempty"` + GeoJSON *bool `json:"geoJson,omitempty"` + MinLength int `json:"minLength,omitempty"` +} + +type indexListResponse struct { + Indexes []indexData `json:"indexes,omitempty"` +} + +// Index opens a connection to an existing index within the collection. +// If no index with given name exists, an NotFoundError is returned. +func (c *collection) Index(ctx context.Context, name string) (Index, error) { + req, err := c.conn.NewRequest("GET", path.Join(c.relPath("index"), name)) + if err != nil { + return nil, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data indexData + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + idx, err := newIndex(data.ID, c) + if err != nil { + return nil, WithStack(err) + } + return idx, nil +} + +// IndexExists returns true if an index with given name exists within the collection. +func (c *collection) IndexExists(ctx context.Context, name string) (bool, error) { + req, err := c.conn.NewRequest("GET", path.Join(c.relPath("index"), name)) + if err != nil { + return false, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return false, WithStack(err) + } + if err := resp.CheckStatus(200); err == nil { + return true, nil + } else if IsNotFound(err) { + return false, nil + } else { + return false, WithStack(err) + } +} + +// Indexes returns a list of all indexes in the collection. +func (c *collection) Indexes(ctx context.Context) ([]Index, error) { + req, err := c.conn.NewRequest("GET", path.Join(c.db.relPath(), "_api", "index")) + if err != nil { + return nil, WithStack(err) + } + req.SetQuery("collection", c.name) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data indexListResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + result := make([]Index, 0, len(data.Indexes)) + for _, x := range data.Indexes { + idx, err := newIndex(x.ID, c) + if err != nil { + return nil, WithStack(err) + } + result = append(result, idx) + } + return result, nil +} + +// EnsureFullTextIndex creates a fulltext index in the collection, if it does not already exist. +// +// Fields is a slice of attribute names. Currently, the slice is limited to exactly one attribute. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *collection) EnsureFullTextIndex(ctx context.Context, fields []string, options *EnsureFullTextIndexOptions) (Index, bool, error) { + input := indexData{ + Type: "fulltext", + Fields: fields, + } + if options != nil { + input.MinLength = options.MinLength + } + idx, created, err := c.ensureIndex(ctx, input) + if err != nil { + return nil, false, WithStack(err) + } + return idx, created, nil +} + +// EnsureGeoIndex creates a hash index in the collection, if it does not already exist. +// +// Fields is a slice with one or two attribute paths. If it is a slice with one attribute path location, +// then a geo-spatial index on all documents is created using location as path to the coordinates. +// The value of the attribute must be a slice with at least two double values. The slice must contain the latitude (first value) +// and the longitude (second value). All documents, which do not have the attribute path or with value that are not suitable, are ignored. +// If it is a slice with two attribute paths latitude and longitude, then a geo-spatial index on all documents is created +// using latitude and longitude as paths the latitude and the longitude. The value of the attribute latitude and of the +// attribute longitude must a double. All documents, which do not have the attribute paths or which values are not suitable, are ignored. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *collection) EnsureGeoIndex(ctx context.Context, fields []string, options *EnsureGeoIndexOptions) (Index, bool, error) { + input := indexData{ + Type: "geo", + Fields: fields, + } + if options != nil { + input.GeoJSON = &options.GeoJSON + } + idx, created, err := c.ensureIndex(ctx, input) + if err != nil { + return nil, false, WithStack(err) + } + return idx, created, nil +} + +// EnsureHashIndex creates a hash index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *collection) EnsureHashIndex(ctx context.Context, fields []string, options *EnsureHashIndexOptions) (Index, bool, error) { + input := indexData{ + Type: "hash", + Fields: fields, + } + off := false + if options != nil { + input.Unique = &options.Unique + input.Sparse = &options.Sparse + if options.NoDeduplicate { + input.Deduplicate = &off + } + } + idx, created, err := c.ensureIndex(ctx, input) + if err != nil { + return nil, false, WithStack(err) + } + return idx, created, nil +} + +// EnsurePersistentIndex creates a persistent index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *collection) EnsurePersistentIndex(ctx context.Context, fields []string, options *EnsurePersistentIndexOptions) (Index, bool, error) { + input := indexData{ + Type: "persistent", + Fields: fields, + } + if options != nil { + input.Unique = &options.Unique + input.Sparse = &options.Sparse + } + idx, created, err := c.ensureIndex(ctx, input) + if err != nil { + return nil, false, WithStack(err) + } + return idx, created, nil +} + +// EnsureSkipListIndex creates a skiplist index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *collection) EnsureSkipListIndex(ctx context.Context, fields []string, options *EnsureSkipListIndexOptions) (Index, bool, error) { + input := indexData{ + Type: "skiplist", + Fields: fields, + } + off := false + if options != nil { + input.Unique = &options.Unique + input.Sparse = &options.Sparse + if options.NoDeduplicate { + input.Deduplicate = &off + } + } + idx, created, err := c.ensureIndex(ctx, input) + if err != nil { + return nil, false, WithStack(err) + } + return idx, created, nil +} + +// ensureIndex creates a persistent index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *collection) ensureIndex(ctx context.Context, options indexData) (Index, bool, error) { + req, err := c.conn.NewRequest("POST", path.Join(c.db.relPath(), "_api/index")) + if err != nil { + return nil, false, WithStack(err) + } + req.SetQuery("collection", c.name) + if _, err := req.SetBody(options); err != nil { + return nil, false, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, false, WithStack(err) + } + if err := resp.CheckStatus(200, 201); err != nil { + return nil, false, WithStack(err) + } + created := resp.StatusCode() == 201 + var data indexData + if err := resp.ParseBody("", &data); err != nil { + return nil, false, WithStack(err) + } + idx, err := newIndex(data.ID, c) + if err != nil { + return nil, false, WithStack(err) + } + return idx, created, nil +} diff --git a/deps/github.com/arangodb/go-driver/connection.go b/deps/github.com/arangodb/go-driver/connection.go new file mode 100644 index 000000000..915a0f9de --- /dev/null +++ b/deps/github.com/arangodb/go-driver/connection.go @@ -0,0 +1,147 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "encoding/json" + "errors" + + velocypack "github.com/arangodb/go-velocypack" +) + +// Connection is a connenction to a database server using a specific protocol. +type Connection interface { + // NewRequest creates a new request with given method and path. + NewRequest(method, path string) (Request, error) + + // Do performs a given request, returning its response. + Do(ctx context.Context, req Request) (Response, error) + + // Unmarshal unmarshals the given raw object into the given result interface. + Unmarshal(data RawObject, result interface{}) error + + // Endpoints returns the endpoints used by this connection. + Endpoints() []string + + // UpdateEndpoints reconfigures the connection to use the given endpoints. + UpdateEndpoints(endpoints []string) error + + // Configure the authentication used for this connection. + SetAuthentication(Authentication) (Connection, error) + + // Protocols returns all protocols used by this connection. + Protocols() ProtocolSet +} + +// Request represents the input to a request on the server. +type Request interface { + // SetQuery sets a single query argument of the request. + // Any existing query argument with the same key is overwritten. + SetQuery(key, value string) Request + // SetBody sets the content of the request. + // The protocol of the connection determines what kinds of marshalling is taking place. + // When multiple bodies are given, they are merged, with fields in the first document prevailing. + SetBody(body ...interface{}) (Request, error) + // SetBodyArray sets the content of the request as an array. + // If the given mergeArray is not nil, its elements are merged with the elements in the body array (mergeArray data overrides bodyArray data). + // The merge is NOT recursive. + // The protocol of the connection determines what kinds of marshalling is taking place. + SetBodyArray(bodyArray interface{}, mergeArray []map[string]interface{}) (Request, error) + // SetBodyImportArray sets the content of the request as an array formatted for importing documents. + // The protocol of the connection determines what kinds of marshalling is taking place. + SetBodyImportArray(bodyArray interface{}) (Request, error) + // SetHeader sets a single header arguments of the request. + // Any existing header argument with the same key is overwritten. + SetHeader(key, value string) Request + // Written returns true as soon as this request has been written completely to the network. + // This does not guarantee that the server has received or processed the request. + Written() bool + // Clone creates a new request containing the same data as this request + Clone() Request +} + +// Response represents the response from the server on a given request. +type Response interface { + // StatusCode returns an HTTP compatible status code of the response. + StatusCode() int + // Endpoint returns the endpoint that handled the request. + Endpoint() string + // CheckStatus checks if the status of the response equals to one of the given status codes. + // If so, nil is returned. + // If not, an attempt is made to parse an error response in the body and an error is returned. + CheckStatus(validStatusCodes ...int) error + // Header returns the value of a response header with given key. + // If no such header is found, an empty string is returned. + // On nested Response's, this function will always return an empty string. + Header(key string) string + // ParseBody performs protocol specific unmarshalling of the response data into the given result. + // If the given field is non-empty, the contents of that field will be parsed into the given result. + // This can only be used for requests that return a single object. + ParseBody(field string, result interface{}) error + // ParseArrayBody performs protocol specific unmarshalling of the response array data into individual response objects. + // This can only be used for requests that return an array of objects. + ParseArrayBody() ([]Response, error) +} + +// RawObject is a raw encoded object. +// Connection implementations must be able to unmarshal *RawObject into Go objects. +type RawObject []byte + +// MarshalJSON returns *r as the JSON encoding of r. +func (r *RawObject) MarshalJSON() ([]byte, error) { + return *r, nil +} + +// UnmarshalJSON sets *r to a copy of data. +func (r *RawObject) UnmarshalJSON(data []byte) error { + if r == nil { + return errors.New("RawObject: UnmarshalJSON on nil pointer") + } + *r = append((*r)[0:0], data...) + return nil +} + +// Ensure RawObject implements json.Marshaler & json.Unmarshaler +var _ json.Marshaler = (*RawObject)(nil) +var _ json.Unmarshaler = (*RawObject)(nil) + +// MarshalVPack returns m as the Velocypack encoding of m. +func (r RawObject) MarshalVPack() (velocypack.Slice, error) { + if r == nil { + return velocypack.NullSlice(), nil + } + return velocypack.Slice(r), nil +} + +// UnmarshalVPack sets *m to a copy of data. +func (r *RawObject) UnmarshalVPack(data velocypack.Slice) error { + if r == nil { + return errors.New("velocypack.RawSlice: UnmarshalVPack on nil pointer") + } + *r = append((*r)[0:0], data...) + return nil +} + +var _ velocypack.Marshaler = (*RawObject)(nil) +var _ velocypack.Unmarshaler = (*RawObject)(nil) diff --git a/deps/github.com/arangodb/go-driver/content_type.go b/deps/github.com/arangodb/go-driver/content_type.go new file mode 100644 index 000000000..fe4b0ada7 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/content_type.go @@ -0,0 +1,46 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "fmt" + +// ContentType identifies the type of encoding to use for the data. +type ContentType int + +const ( + // ContentTypeJSON encodes data as json + ContentTypeJSON ContentType = iota + // ContentTypeVelocypack encodes data as Velocypack + ContentTypeVelocypack +) + +func (ct ContentType) String() string { + switch ct { + case ContentTypeJSON: + return "application/json" + case ContentTypeVelocypack: + return "application/x-velocypack" + default: + panic(fmt.Sprintf("Unknown content type %d", int(ct))) + } +} diff --git a/deps/github.com/arangodb/go-driver/context.go b/deps/github.com/arangodb/go-driver/context.go new file mode 100644 index 000000000..3b39bea05 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/context.go @@ -0,0 +1,397 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "reflect" + "strconv" + + "github.com/arangodb/go-driver/util" +) + +// ContextKey is an internal type used for holding values in a `context.Context` +// do not use!. +type ContextKey string + +const ( + keyRevision ContextKey = "arangodb-revision" + keyRevisions ContextKey = "arangodb-revisions" + keyReturnNew ContextKey = "arangodb-returnNew" + keyReturnOld ContextKey = "arangodb-returnOld" + keySilent ContextKey = "arangodb-silent" + keyWaitForSync ContextKey = "arangodb-waitForSync" + keyDetails ContextKey = "arangodb-details" + keyKeepNull ContextKey = "arangodb-keepNull" + keyMergeObjects ContextKey = "arangodb-mergeObjects" + keyRawResponse ContextKey = "arangodb-rawResponse" + keyImportDetails ContextKey = "arangodb-importDetails" + keyResponse ContextKey = "arangodb-response" + keyEndpoint ContextKey = "arangodb-endpoint" + keyIsRestore ContextKey = "arangodb-isRestore" + keyIsSystem ContextKey = "arangodb-isSystem" + keyIgnoreRevs ContextKey = "arangodb-ignoreRevs" + keyEnforceReplicationFactor ContextKey = "arangodb-enforceReplicationFactor" + keyConfigured ContextKey = "arangodb-configured" + keyFollowLeaderRedirect ContextKey = "arangodb-followLeaderRedirect" + keyDBServerID ContextKey = "arangodb-dbserverID" +) + +// WithRevision is used to configure a context to make document +// functions specify an explicit revision of the document using an `If-Match` condition. +func WithRevision(parent context.Context, revision string) context.Context { + return context.WithValue(contextOrBackground(parent), keyRevision, revision) +} + +// WithRevisions is used to configure a context to make multi-document +// functions specify explicit revisions of the documents. +func WithRevisions(parent context.Context, revisions []string) context.Context { + return context.WithValue(contextOrBackground(parent), keyRevisions, revisions) +} + +// WithReturnNew is used to configure a context to make create, update & replace document +// functions return the new document into the given result. +func WithReturnNew(parent context.Context, result interface{}) context.Context { + return context.WithValue(contextOrBackground(parent), keyReturnNew, result) +} + +// WithReturnOld is used to configure a context to make update & replace document +// functions return the old document into the given result. +func WithReturnOld(parent context.Context, result interface{}) context.Context { + return context.WithValue(contextOrBackground(parent), keyReturnOld, result) +} + +// WithDetails is used to configure a context to make Client.Version return additional details. +// You can pass a single (optional) boolean. If that is set to false, you explicitly ask to not provide details. +func WithDetails(parent context.Context, value ...bool) context.Context { + v := true + if len(value) == 1 { + v = value[0] + } + return context.WithValue(contextOrBackground(parent), keyDetails, v) +} + +// WithEndpoint is used to configure a context that forces a request to be executed on a specific endpoint. +// If you specify an endpoint like this, failover is disabled. +// If you specify an unknown endpoint, an InvalidArgumentError is returned from requests. +func WithEndpoint(parent context.Context, endpoint string) context.Context { + endpoint = util.FixupEndpointURLScheme(endpoint) + return context.WithValue(contextOrBackground(parent), keyEndpoint, endpoint) +} + +// WithKeepNull is used to configure a context to make update functions keep null fields (value==true) +// or remove fields with null values (value==false). +func WithKeepNull(parent context.Context, value bool) context.Context { + return context.WithValue(contextOrBackground(parent), keyKeepNull, value) +} + +// WithMergeObjects is used to configure a context to make update functions merge objects present in both +// the existing document and the patch document (value==true) or overwrite objects in the existing document +// with objects found in the patch document (value==false) +func WithMergeObjects(parent context.Context, value bool) context.Context { + return context.WithValue(contextOrBackground(parent), keyMergeObjects, value) +} + +// WithSilent is used to configure a context to make functions return an empty result (silent==true), +// instead of a metadata result (silent==false, default). +// You can pass a single (optional) boolean. If that is set to false, you explicitly ask to return metadata result. +func WithSilent(parent context.Context, value ...bool) context.Context { + v := true + if len(value) == 1 { + v = value[0] + } + return context.WithValue(contextOrBackground(parent), keySilent, v) +} + +// WithWaitForSync is used to configure a context to make modification +// functions wait until the data has been synced to disk (or not). +// You can pass a single (optional) boolean. If that is set to false, you explicitly do not wait for +// data to be synced to disk. +func WithWaitForSync(parent context.Context, value ...bool) context.Context { + v := true + if len(value) == 1 { + v = value[0] + } + return context.WithValue(contextOrBackground(parent), keyWaitForSync, v) +} + +// WithRawResponse is used to configure a context that will make all functions store the raw response into a +// buffer. +func WithRawResponse(parent context.Context, value *[]byte) context.Context { + return context.WithValue(contextOrBackground(parent), keyRawResponse, value) +} + +// WithResponse is used to configure a context that will make all functions store the response into the given value. +func WithResponse(parent context.Context, value *Response) context.Context { + return context.WithValue(contextOrBackground(parent), keyResponse, value) +} + +// WithImportDetails is used to configure a context that will make import document requests return +// details about documents that could not be imported. +func WithImportDetails(parent context.Context, value *[]string) context.Context { + return context.WithValue(contextOrBackground(parent), keyImportDetails, value) +} + +// WithIsRestore is used to configure a context to make insert functions use the "isRestore=" +// setting. +// Note: This function is intended for internal (replication) use. It is NOT intended to +// be used by normal client. This CAN screw up your database. +func WithIsRestore(parent context.Context, value bool) context.Context { + return context.WithValue(contextOrBackground(parent), keyIsRestore, value) +} + +// WithIsSystem is used to configure a context to make insert functions use the "isSystem=" +// setting. +func WithIsSystem(parent context.Context, value bool) context.Context { + return context.WithValue(contextOrBackground(parent), keyIsSystem, value) +} + +// WithIgnoreRevisions is used to configure a context to make modification +// functions ignore revisions in the update. +// Do not use in combination with WithRevision or WithRevisions. +func WithIgnoreRevisions(parent context.Context, value ...bool) context.Context { + v := true + if len(value) == 1 { + v = value[0] + } + return context.WithValue(contextOrBackground(parent), keyIgnoreRevs, v) +} + +// WithEnforceReplicationFactor is used to configure a context to make adding collections +// fail if the replication factor is too high (default or true) or +// silently accept (false). +func WithEnforceReplicationFactor(parent context.Context, value bool) context.Context { + return context.WithValue(contextOrBackground(parent), keyEnforceReplicationFactor, value) +} + +// WithConfigured is used to configure a context to return the configured value of +// a user grant instead of the effective grant. +func WithConfigured(parent context.Context, value ...bool) context.Context { + v := true + if len(value) == 1 { + v = value[0] + } + return context.WithValue(contextOrBackground(parent), keyConfigured, v) +} + +// WithFollowLeaderRedirect is used to configure a context to return turn on/off +// following redirection responses from the server when the request is answered by a follower. +// Default behavior is "on". +func WithFollowLeaderRedirect(parent context.Context, value bool) context.Context { + return context.WithValue(contextOrBackground(parent), keyFollowLeaderRedirect, value) +} + +// WithDBServerID is used to configure a context that includes an ID of a specific DBServer. +func WithDBServerID(parent context.Context, id string) context.Context { + return context.WithValue(contextOrBackground(parent), keyDBServerID, id) +} + +type contextSettings struct { + Silent bool + WaitForSync bool + ReturnOld interface{} + ReturnNew interface{} + Revision string + Revisions []string + ImportDetails *[]string + IsRestore bool + IsSystem bool + IgnoreRevs *bool + EnforceReplicationFactor *bool + Configured *bool + FollowLeaderRedirect *bool + DBServerID string +} + +// applyContextSettings returns the settings configured in the context in the given request. +// It then returns information about the applied settings that may be needed later in API implementation functions. +func applyContextSettings(ctx context.Context, req Request) contextSettings { + result := contextSettings{} + if ctx == nil { + return result + } + // Details + if v := ctx.Value(keyDetails); v != nil { + if details, ok := v.(bool); ok { + req.SetQuery("details", strconv.FormatBool(details)) + } + } + // KeepNull + if v := ctx.Value(keyKeepNull); v != nil { + if keepNull, ok := v.(bool); ok { + req.SetQuery("keepNull", strconv.FormatBool(keepNull)) + } + } + // MergeObjects + if v := ctx.Value(keyMergeObjects); v != nil { + if mergeObjects, ok := v.(bool); ok { + req.SetQuery("mergeObjects", strconv.FormatBool(mergeObjects)) + } + } + // Silent + if v := ctx.Value(keySilent); v != nil { + if silent, ok := v.(bool); ok { + req.SetQuery("silent", strconv.FormatBool(silent)) + result.Silent = silent + } + } + // WaitForSync + if v := ctx.Value(keyWaitForSync); v != nil { + if waitForSync, ok := v.(bool); ok { + req.SetQuery("waitForSync", strconv.FormatBool(waitForSync)) + result.WaitForSync = waitForSync + } + } + // ReturnOld + if v := ctx.Value(keyReturnOld); v != nil { + req.SetQuery("returnOld", "true") + result.ReturnOld = v + } + // ReturnNew + if v := ctx.Value(keyReturnNew); v != nil { + req.SetQuery("returnNew", "true") + result.ReturnNew = v + } + // If-Match + if v := ctx.Value(keyRevision); v != nil { + if rev, ok := v.(string); ok { + req.SetHeader("If-Match", rev) + result.Revision = rev + } + } + // Revisions + if v := ctx.Value(keyRevisions); v != nil { + if revs, ok := v.([]string); ok { + req.SetQuery("ignoreRevs", "false") + result.Revisions = revs + } + } + // ImportDetails + if v := ctx.Value(keyImportDetails); v != nil { + if details, ok := v.(*[]string); ok { + req.SetQuery("details", "true") + result.ImportDetails = details + } + } + // IsRestore + if v := ctx.Value(keyIsRestore); v != nil { + if isRestore, ok := v.(bool); ok { + req.SetQuery("isRestore", strconv.FormatBool(isRestore)) + result.IsRestore = isRestore + } + } + // IsSystem + if v := ctx.Value(keyIsSystem); v != nil { + if isSystem, ok := v.(bool); ok { + req.SetQuery("isSystem", strconv.FormatBool(isSystem)) + result.IsSystem = isSystem + } + } + // IgnoreRevs + if v := ctx.Value(keyIgnoreRevs); v != nil { + if ignoreRevs, ok := v.(bool); ok { + req.SetQuery("ignoreRevs", strconv.FormatBool(ignoreRevs)) + result.IgnoreRevs = &ignoreRevs + } + } + // EnforeReplicationFactor + if v := ctx.Value(keyEnforceReplicationFactor); v != nil { + if enforceReplicationFactor, ok := v.(bool); ok { + req.SetQuery("enforceReplicationFactor", strconv.FormatBool(enforceReplicationFactor)) + result.EnforceReplicationFactor = &enforceReplicationFactor + } + } + // Configured + if v := ctx.Value(keyConfigured); v != nil { + if configured, ok := v.(bool); ok { + req.SetQuery("configured", strconv.FormatBool(configured)) + result.Configured = &configured + } + } + // FollowLeaderRedirect + if v := ctx.Value(keyFollowLeaderRedirect); v != nil { + if followLeaderRedirect, ok := v.(bool); ok { + result.FollowLeaderRedirect = &followLeaderRedirect + } + } + // DBServerID + if v := ctx.Value(keyDBServerID); v != nil { + if id, ok := v.(string); ok { + req.SetQuery("DBserver", id) + result.DBServerID = id + } + } + return result +} + +// okStatus returns one of the given status codes depending on the WaitForSync field value. +// If WaitForSync==true, statusWithWaitForSync is returned, otherwise statusWithoutWaitForSync is returned. +func (cs contextSettings) okStatus(statusWithWaitForSync, statusWithoutWaitForSync int) int { + if cs.WaitForSync { + return statusWithWaitForSync + } else { + return statusWithoutWaitForSync + } +} + +// contextOrBackground returns the given context if it is not nil. +// Returns context.Background() otherwise. +func contextOrBackground(ctx context.Context) context.Context { + if ctx != nil { + return ctx + } + return context.Background() +} + +// withDocumentAt returns a context derived from the given parent context to be used in multi-document options +// that needs a client side "loop" implementation. +// It handle: +// - WithRevisions +// - WithReturnNew +// - WithReturnOld +func withDocumentAt(ctx context.Context, index int) (context.Context, error) { + if ctx == nil { + return nil, nil + } + // Revisions + if v := ctx.Value(keyRevisions); v != nil { + if revs, ok := v.([]string); ok { + if index >= len(revs) { + return nil, WithStack(InvalidArgumentError{Message: "Index out of range: revisions"}) + } + ctx = WithRevision(ctx, revs[index]) + } + } + // ReturnOld + if v := ctx.Value(keyReturnOld); v != nil { + val := reflect.ValueOf(v) + ctx = WithReturnOld(ctx, val.Index(index).Addr().Interface()) + } + // ReturnNew + if v := ctx.Value(keyReturnNew); v != nil { + val := reflect.ValueOf(v) + ctx = WithReturnNew(ctx, val.Index(index).Addr().Interface()) + } + + return ctx, nil +} diff --git a/deps/github.com/arangodb/go-driver/cursor.go b/deps/github.com/arangodb/go-driver/cursor.go new file mode 100644 index 000000000..da0deeedd --- /dev/null +++ b/deps/github.com/arangodb/go-driver/cursor.go @@ -0,0 +1,49 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "io" +) + +// Cursor is returned from a query, used to iterate over a list of documents. +// Note that a Cursor must always be closed to avoid holding on to resources in the server while they are no longer needed. +type Cursor interface { + io.Closer + + // HasMore returns true if the next call to ReadDocument does not return a NoMoreDocuments error. + HasMore() bool + + // ReadDocument reads the next document from the cursor. + // The document data is stored into result, the document meta data is returned. + // If the cursor has no more documents, a NoMoreDocuments error is returned. + // Note: If the query (resulting in this cursor) does not return documents, + // then the returned DocumentMeta will be empty. + ReadDocument(ctx context.Context, result interface{}) (DocumentMeta, error) + + // Count returns the total number of result documents available. + // A valid return value is only available when the cursor has been created with a context that was + // prepare with `WithQueryCount`. + Count() int64 +} diff --git a/deps/github.com/arangodb/go-driver/cursor_impl.go b/deps/github.com/arangodb/go-driver/cursor_impl.go new file mode 100644 index 000000000..62c7edc3e --- /dev/null +++ b/deps/github.com/arangodb/go-driver/cursor_impl.go @@ -0,0 +1,155 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" + "sync" + "sync/atomic" +) + +// newCursor creates a new Cursor implementation. +func newCursor(data cursorData, endpoint string, db *database) (Cursor, error) { + if db == nil { + return nil, WithStack(InvalidArgumentError{Message: "db is nil"}) + } + return &cursor{ + cursorData: data, + endpoint: endpoint, + db: db, + conn: db.conn, + }, nil +} + +type cursor struct { + cursorData + endpoint string + resultIndex int + db *database + conn Connection + closed int32 + closeMutex sync.Mutex +} + +type cursorData struct { + Count int64 `json:"count,omitempty"` // the total number of result documents available (only available if the query was executed with the count attribute set) + ID string `json:"id"` // id of temporary cursor created on the server (optional, see above) + Result []*RawObject `json:"result,omitempty"` // an array of result documents (might be empty if query has no results) + HasMore bool `json:"hasMore,omitempty"` // A boolean indicator whether there are more results available for the cursor on the server +} + +// relPath creates the relative path to this cursor (`_db//_api/cursor`) +func (c *cursor) relPath() string { + return path.Join(c.db.relPath(), "_api", "cursor") +} + +// Name returns the name of the collection. +func (c *cursor) HasMore() bool { + return c.resultIndex < len(c.Result) || c.cursorData.HasMore +} + +// Count returns the total number of result documents available. +// A valid return value is only available when the cursor has been created with a context that was +// prepare with `WithQueryCount`. +func (c *cursor) Count() int64 { + return c.cursorData.Count +} + +// Close deletes the cursor and frees the resources associated with it. +func (c *cursor) Close() error { + if c == nil { + // Avoid panics in the case that someone defer's a close before checking that the cursor is not nil. + return nil + } + if c := atomic.LoadInt32(&c.closed); c != 0 { + return nil + } + c.closeMutex.Lock() + defer c.closeMutex.Unlock() + if c.closed == 0 { + if c.cursorData.ID != "" { + // Force use of initial endpoint + ctx := WithEndpoint(nil, c.endpoint) + + req, err := c.conn.NewRequest("DELETE", path.Join(c.relPath(), c.cursorData.ID)) + if err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(202); err != nil { + return WithStack(err) + } + } + atomic.StoreInt32(&c.closed, 1) + } + return nil +} + +// ReadDocument reads the next document from the cursor. +// The document data is stored into result, the document meta data is returned. +// If the cursor has no more documents, a NoMoreDocuments error is returned. +func (c *cursor) ReadDocument(ctx context.Context, result interface{}) (DocumentMeta, error) { + // Force use of initial endpoint + ctx = WithEndpoint(ctx, c.endpoint) + + if c.resultIndex >= len(c.Result) && c.cursorData.HasMore { + // Fetch next batch + req, err := c.conn.NewRequest("PUT", path.Join(c.relPath(), c.cursorData.ID)) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return DocumentMeta{}, WithStack(err) + } + var data cursorData + if err := resp.ParseBody("", &data); err != nil { + return DocumentMeta{}, WithStack(err) + } + c.cursorData = data + c.resultIndex = 0 + } + + index := c.resultIndex + if index >= len(c.Result) { + // Out of data + return DocumentMeta{}, WithStack(NoMoreDocumentsError{}) + } + c.resultIndex++ + var meta DocumentMeta + if err := c.conn.Unmarshal(*c.Result[index], &meta); err != nil { + // If a cursor returns something other than a document, this will fail. + // Just ignore it. + } + if err := c.conn.Unmarshal(*c.Result[index], result); err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} diff --git a/deps/github.com/arangodb/go-driver/database.go b/deps/github.com/arangodb/go-driver/database.go new file mode 100644 index 000000000..f09ed8d67 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/database.go @@ -0,0 +1,92 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// Database provides access to all collections & graphs in a single database. +type Database interface { + // Name returns the name of the database. + Name() string + + // Info fetches information about the database. + Info(ctx context.Context) (DatabaseInfo, error) + + // EngineInfo returns information about the database engine being used. + // Note: When your cluster has multiple endpoints (cluster), you will get information + // from the server that is currently being used. + // If you want to know exactly which server the information is from, use a client + // with only a single endpoint and avoid automatic synchronization of endpoints. + EngineInfo(ctx context.Context) (EngineInfo, error) + + // Remove removes the entire database. + // If the database does not exist, a NotFoundError is returned. + Remove(ctx context.Context) error + + // Collection functions + DatabaseCollections + + // Graph functions + DatabaseGraphs + + // Query performs an AQL query, returning a cursor used to iterate over the returned documents. + // Note that the returned Cursor must always be closed to avoid holding on to resources in the server while they are no longer needed. + Query(ctx context.Context, query string, bindVars map[string]interface{}) (Cursor, error) + + // ValidateQuery validates an AQL query. + // When the query is valid, nil returned, otherwise an error is returned. + // The query is not executed. + ValidateQuery(ctx context.Context, query string) error + + // Transaction performs a javascript transaction. The result of the transaction function is returned. + Transaction(ctx context.Context, action string, options *TransactionOptions) (interface{}, error) +} + +// DatabaseInfo contains information about a database +type DatabaseInfo struct { + // The identifier of the database. + ID string `json:"id,omitempty"` + // The name of the database. + Name string `json:"name,omitempty"` + // The filesystem path of the database. + Path string `json:"path,omitempty"` + // If true then the database is the _system database. + IsSystem bool `json:"isSystem,omitempty"` +} + +// EngineType indicates type of database engine being used. +type EngineType string + +const ( + EngineTypeMMFiles = EngineType("mmfiles") + EngineTypeRocksDB = EngineType("rocksdb") +) + +func (t EngineType) String() string { + return string(t) +} + +// EngineInfo contains information about the database engine being used. +type EngineInfo struct { + Type EngineType `json:"name"` +} diff --git a/deps/github.com/arangodb/go-driver/database_collections.go b/deps/github.com/arangodb/go-driver/database_collections.go new file mode 100644 index 000000000..41989f924 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/database_collections.go @@ -0,0 +1,130 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// DatabaseCollections provides access to all collections in a single database. +type DatabaseCollections interface { + // Collection opens a connection to an existing collection within the database. + // If no collection with given name exists, an NotFoundError is returned. + Collection(ctx context.Context, name string) (Collection, error) + + // CollectionExists returns true if a collection with given name exists within the database. + CollectionExists(ctx context.Context, name string) (bool, error) + + // Collections returns a list of all collections in the database. + Collections(ctx context.Context) ([]Collection, error) + + // CreateCollection creates a new collection with given name and options, and opens a connection to it. + // If a collection with given name already exists within the database, a DuplicateError is returned. + CreateCollection(ctx context.Context, name string, options *CreateCollectionOptions) (Collection, error) +} + +// CreateCollectionOptions contains options that customize the creating of a collection. +type CreateCollectionOptions struct { + // The maximal size of a journal or datafile in bytes. The value must be at least 1048576 (1 MiB). (The default is a configuration parameter) + JournalSize int `json:"journalSize,omitempty"` + // ReplicationFactor in a cluster (default is 1), this attribute determines how many copies of each shard are kept on different DBServers. + // The value 1 means that only one copy (no synchronous replication) is kept. + // A value of k means that k-1 replicas are kept. Any two copies reside on different DBServers. + // Replication between them is synchronous, that is, every write operation to the "leader" copy will be replicated to all "follower" replicas, + // before the write operation is reported successful. If a server fails, this is detected automatically + // and one of the servers holding copies take over, usually without an error being reported. + ReplicationFactor int `json:"replicationFactor,omitempty"` + // If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false) + WaitForSync bool `json:"waitForSync,omitempty"` + // Whether or not the collection will be compacted (default is true) + DoCompact *bool `json:"doCompact,omitempty"` + // If true then the collection data is kept in-memory only and not made persistent. + // Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also + // cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster + // than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any + // CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, + // and not for data that cannot be re-created otherwise. (The default is false) + IsVolatile bool `json:"isVolatile,omitempty"` + // In a cluster, this attribute determines which document attributes are used to + // determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. + // The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. + // Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup. + // The default is []string{"_key"}. + ShardKeys []string `json:"shardKeys,omitempty"` + // In a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless. (default is 1) + NumberOfShards int `json:"numberOfShards,omitempty"` + // If true, create a system collection. In this case collection-name should start with an underscore. + // End users should normally create non-system collections only. API implementors may be required to create system + // collections in very special occasions, but normally a regular collection will do. (The default is false) + IsSystem bool `json:"isSystem,omitempty"` + // The type of the collection to create. (default is CollectionTypeDocument) + Type CollectionType `json:"type,omitempty"` + // The number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power + // of 2 and less than or equal to 1024. For very large collections one should increase this to avoid long pauses when the hash + // table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. + // For example, 64 might be a sensible value for a collection with 100 000 000 documents. + // Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. + // Changes are applied when the collection is loaded the next time. + IndexBuckets int `json:"indexBuckets,omitempty"` + // Specifies how keys in the collection are created. + KeyOptions *CollectionKeyOptions `json:"keyOptions,omitempty"` + // This field is used for internal purposes only. DO NOT USE. + DistributeShardsLike string `json:"distributeShardsLike,omitempty"` + // Set to create a smart edge or vertex collection. + // This requires ArangoDB enterprise. + IsSmart bool `json:"isSmart,omitempty"` + // This field must be set to the attribute that will be used for sharding or smart graphs. + // All vertices are required to have this attribute set. Edges derive the attribute from their connected vertices. + // This requires ArangoDB enterprise. + SmartGraphAttribute string `json:"smartGraphAttribute,omitempty"` +} + +// CollectionType is the type of a collection. +type CollectionType int + +const ( + // CollectionTypeDocument specifies a document collection + CollectionTypeDocument = CollectionType(2) + // CollectionTypeEdge specifies an edges collection + CollectionTypeEdge = CollectionType(3) +) + +// CollectionKeyOptions specifies ways for creating keys of a collection. +type CollectionKeyOptions struct { + // If set to true, then it is allowed to supply own key values in the _key attribute of a document. + // If set to false, then the key generator will solely be responsible for generating keys and supplying own + // key values in the _key attribute of documents is considered an error. + AllowUserKeys bool `json:"allowUserKeys,omitempty"` + // Specifies the type of the key generator. The currently available generators are traditional and autoincrement. + Type KeyGeneratorType `json:"type,omitempty"` + // increment value for autoincrement key generator. Not used for other key generator types. + Increment int `json:"increment,omitempty"` + // Initial offset value for autoincrement key generator. Not used for other key generator types. + Offset int `json:"offset,omitempty"` +} + +// KeyGeneratorType is a type of key generated, used in `CollectionKeyOptions`. +type KeyGeneratorType string + +const ( + KeyGeneratorTraditional = KeyGeneratorType("traditional") + KeyGeneratorAutoIncrement = KeyGeneratorType("autoincrement") +) diff --git a/deps/github.com/arangodb/go-driver/database_collections_impl.go b/deps/github.com/arangodb/go-driver/database_collections_impl.go new file mode 100644 index 000000000..3eae4c5c6 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/database_collections_impl.go @@ -0,0 +1,136 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +// Collection opens a connection to an existing collection within the database. +// If no collection with given name exists, an NotFoundError is returned. +func (d *database) Collection(ctx context.Context, name string) (Collection, error) { + escapedName := pathEscape(name) + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/collection", escapedName)) + if err != nil { + return nil, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + coll, err := newCollection(name, d) + if err != nil { + return nil, WithStack(err) + } + return coll, nil +} + +// CollectionExists returns true if a collection with given name exists within the database. +func (d *database) CollectionExists(ctx context.Context, name string) (bool, error) { + escapedName := pathEscape(name) + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/collection", escapedName)) + if err != nil { + return false, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return false, WithStack(err) + } + if err := resp.CheckStatus(200); err == nil { + return true, nil + } else if IsNotFound(err) { + return false, nil + } else { + return false, WithStack(err) + } +} + +type getCollectionResponse struct { + Result []CollectionInfo `json:"result,omitempty"` +} + +// Collections returns a list of all collections in the database. +func (d *database) Collections(ctx context.Context) ([]Collection, error) { + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/collection")) + if err != nil { + return nil, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data getCollectionResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + result := make([]Collection, 0, len(data.Result)) + for _, info := range data.Result { + col, err := newCollection(info.Name, d) + if err != nil { + return nil, WithStack(err) + } + result = append(result, col) + } + return result, nil +} + +// CreateCollection creates a new collection with given name and options, and opens a connection to it. +// If a collection with given name already exists within the database, a DuplicateError is returned. +func (d *database) CreateCollection(ctx context.Context, name string, options *CreateCollectionOptions) (Collection, error) { + input := struct { + CreateCollectionOptions + Name string `json:"name"` + }{ + Name: name, + } + if options != nil { + input.CreateCollectionOptions = *options + } + req, err := d.conn.NewRequest("POST", path.Join(d.relPath(), "_api/collection")) + if err != nil { + return nil, WithStack(err) + } + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + col, err := newCollection(name, d) + if err != nil { + return nil, WithStack(err) + } + return col, nil +} diff --git a/deps/github.com/arangodb/go-driver/database_graphs.go b/deps/github.com/arangodb/go-driver/database_graphs.go new file mode 100644 index 000000000..899f5445a --- /dev/null +++ b/deps/github.com/arangodb/go-driver/database_graphs.go @@ -0,0 +1,71 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// DatabaseGraphs provides access to all graphs in a single database. +type DatabaseGraphs interface { + // Graph opens a connection to an existing graph within the database. + // If no graph with given name exists, an NotFoundError is returned. + Graph(ctx context.Context, name string) (Graph, error) + + // GraphExists returns true if a graph with given name exists within the database. + GraphExists(ctx context.Context, name string) (bool, error) + + // Graphs returns a list of all graphs in the database. + Graphs(ctx context.Context) ([]Graph, error) + + // CreateGraph creates a new graph with given name and options, and opens a connection to it. + // If a graph with given name already exists within the database, a DuplicateError is returned. + CreateGraph(ctx context.Context, name string, options *CreateGraphOptions) (Graph, error) +} + +// CreateGraphOptions contains options that customize the creating of a graph. +type CreateGraphOptions struct { + // OrphanVertexCollections is an array of additional vertex collections used in the graph. + // These are vertices for which there are no edges linking these vertices with anything. + OrphanVertexCollections []string + // EdgeDefinitions is an array of edge definitions for the graph. + EdgeDefinitions []EdgeDefinition + // IsSmart defines if the created graph should be smart. + // This only has effect in Enterprise version. + IsSmart bool + // SmartGraphAttribute is the attribute name that is used to smartly shard the vertices of a graph. + // Every vertex in this Graph has to have this attribute. + // Cannot be modified later. + SmartGraphAttribute string + // NumberOfShards is the number of shards that is used for every collection within this graph. + // Cannot be modified later. + NumberOfShards int +} + +// EdgeDefinition contains all information needed to define a single edge in a graph. +type EdgeDefinition struct { + // The name of the edge collection to be used. + Collection string `json:"collection"` + // To contains the names of one or more edge collections that can contain target vertices. + To []string `json:"to"` + // From contains the names of one or more vertex collections that can contain source vertices. + From []string `json:"from"` +} diff --git a/deps/github.com/arangodb/go-driver/database_graphs_impl.go b/deps/github.com/arangodb/go-driver/database_graphs_impl.go new file mode 100644 index 000000000..b2aa5616d --- /dev/null +++ b/deps/github.com/arangodb/go-driver/database_graphs_impl.go @@ -0,0 +1,158 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +// Graph opens a connection to an existing graph within the database. +// If no graph with given name exists, an NotFoundError is returned. +func (d *database) Graph(ctx context.Context, name string) (Graph, error) { + escapedName := pathEscape(name) + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/gharial", escapedName)) + if err != nil { + return nil, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + g, err := newGraph(name, d) + if err != nil { + return nil, WithStack(err) + } + return g, nil +} + +// GraphExists returns true if a graph with given name exists within the database. +func (d *database) GraphExists(ctx context.Context, name string) (bool, error) { + escapedName := pathEscape(name) + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/gharial", escapedName)) + if err != nil { + return false, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return false, WithStack(err) + } + if err := resp.CheckStatus(200); err == nil { + return true, nil + } else if IsNotFound(err) { + return false, nil + } else { + return false, WithStack(err) + } +} + +type getGraphsResponse struct { + Graphs []DocumentMeta `json:"graphs,omitempty"` +} + +// Graphs returns a list of all graphs in the database. +func (d *database) Graphs(ctx context.Context) ([]Graph, error) { + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/gharial")) + if err != nil { + return nil, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data getGraphsResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + result := make([]Graph, 0, len(data.Graphs)) + for _, info := range data.Graphs { + g, err := newGraph(info.Key, d) + if err != nil { + return nil, WithStack(err) + } + result = append(result, g) + } + return result, nil +} + +type createGraphOptions struct { + Name string `json:"name"` + OrphanVertexCollections []string `json:"orphanCollections,omitempty"` + EdgeDefinitions []EdgeDefinition `json:"edgeDefinitions,omitempty"` + IsSmart bool `json:"isSmart,omitempty"` + Options *createGraphAdditionalOptions `json:"options,omitempty"` +} + +type createGraphAdditionalOptions struct { + // SmartGraphAttribute is the attribute name that is used to smartly shard the vertices of a graph. + // Every vertex in this Graph has to have this attribute. + // Cannot be modified later. + SmartGraphAttribute string `json:"smartGraphAttribute,omitempty"` + // NumberOfShards is the number of shards that is used for every collection within this graph. + // Cannot be modified later. + NumberOfShards int `json:"numberOfShards,omitempty"` +} + +// CreateGraph creates a new graph with given name and options, and opens a connection to it. +// If a graph with given name already exists within the database, a DuplicateError is returned. +func (d *database) CreateGraph(ctx context.Context, name string, options *CreateGraphOptions) (Graph, error) { + input := createGraphOptions{ + Name: name, + } + if options != nil { + input.OrphanVertexCollections = options.OrphanVertexCollections + input.EdgeDefinitions = options.EdgeDefinitions + input.IsSmart = options.IsSmart + if options.SmartGraphAttribute != "" || options.NumberOfShards != 0 { + input.Options = &createGraphAdditionalOptions{ + SmartGraphAttribute: options.SmartGraphAttribute, + NumberOfShards: options.NumberOfShards, + } + } + } + req, err := d.conn.NewRequest("POST", path.Join(d.relPath(), "_api/gharial")) + if err != nil { + return nil, WithStack(err) + } + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return nil, WithStack(err) + } + g, err := newGraph(name, d) + if err != nil { + return nil, WithStack(err) + } + return g, nil +} diff --git a/deps/github.com/arangodb/go-driver/database_impl.go b/deps/github.com/arangodb/go-driver/database_impl.go new file mode 100644 index 000000000..b9aa6d4dd --- /dev/null +++ b/deps/github.com/arangodb/go-driver/database_impl.go @@ -0,0 +1,217 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "net/http" + "path" +) + +// newDatabase creates a new Database implementation. +func newDatabase(name string, conn Connection) (Database, error) { + if name == "" { + return nil, WithStack(InvalidArgumentError{Message: "name is empty"}) + } + if conn == nil { + return nil, WithStack(InvalidArgumentError{Message: "conn is nil"}) + } + return &database{ + name: name, + conn: conn, + }, nil +} + +// database implements the Database interface. +type database struct { + name string + conn Connection +} + +// relPath creates the relative path to this database (`_db/`) +func (d *database) relPath() string { + escapedName := pathEscape(d.name) + return path.Join("_db", escapedName) +} + +// Name returns the name of the database. +func (d *database) Name() string { + return d.name +} + +// Info fetches information about the database. +func (d *database) Info(ctx context.Context) (DatabaseInfo, error) { + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/database/current")) + if err != nil { + return DatabaseInfo{}, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := d.conn.Do(ctx, req) + if err != nil { + return DatabaseInfo{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return DatabaseInfo{}, WithStack(err) + } + var data DatabaseInfo + if err := resp.ParseBody("result", &data); err != nil { + return DatabaseInfo{}, WithStack(err) + } + return data, nil +} + +// EngineInfo returns information about the database engine being used. +// Note: When your cluster has multiple endpoints (cluster), you will get information +// from the server that is currently being used. +// If you want to know exactly which server the information is from, use a client +// with only a single endpoint and avoid automatic synchronization of endpoints. +func (d *database) EngineInfo(ctx context.Context) (EngineInfo, error) { + req, err := d.conn.NewRequest("GET", path.Join(d.relPath(), "_api/engine")) + if err != nil { + return EngineInfo{}, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return EngineInfo{}, WithStack(err) + } + if err := resp.CheckStatus(200, 404); err != nil { + return EngineInfo{}, WithStack(err) + } + if resp.StatusCode() == 404 { + // On version 3.1, this endpoint is not yet supported + return EngineInfo{Type: EngineTypeMMFiles}, nil + } + var data EngineInfo + if err := resp.ParseBody("", &data); err != nil { + return EngineInfo{}, WithStack(err) + } + return data, nil +} + +// Remove removes the entire database. +// If the database does not exist, a NotFoundError is returned. +func (d *database) Remove(ctx context.Context) error { + req, err := d.conn.NewRequest("DELETE", path.Join("_db/_system/_api/database", pathEscape(d.name))) + if err != nil { + return WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +// Query performs an AQL query, returning a cursor used to iterate over the returned documents. +func (d *database) Query(ctx context.Context, query string, bindVars map[string]interface{}) (Cursor, error) { + req, err := d.conn.NewRequest("POST", path.Join(d.relPath(), "_api/cursor")) + if err != nil { + return nil, WithStack(err) + } + input := queryRequest{ + Query: query, + BindVars: bindVars, + } + input.applyContextSettings(ctx) + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(201); err != nil { + return nil, WithStack(err) + } + var data cursorData + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + col, err := newCursor(data, resp.Endpoint(), d) + if err != nil { + return nil, WithStack(err) + } + return col, nil +} + +// ValidateQuery validates an AQL query. +// When the query is valid, nil returned, otherwise an error is returned. +// The query is not executed. +func (d *database) ValidateQuery(ctx context.Context, query string) error { + req, err := d.conn.NewRequest("POST", path.Join(d.relPath(), "_api/query")) + if err != nil { + return WithStack(err) + } + input := parseQueryRequest{ + Query: query, + } + if _, err := req.SetBody(input); err != nil { + return WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +func (d *database) Transaction(ctx context.Context, action string, options *TransactionOptions) (interface{}, error) { + req, err := d.conn.NewRequest("POST", path.Join(d.relPath(), "_api/transaction")) + if err != nil { + return nil, WithStack(err) + } + input := transactionRequest{Action: action} + if options != nil { + input.MaxTransactionSize = options.MaxTransactionSize + input.LockTimeout = options.LockTimeout + input.WaitForSync = options.WaitForSync + input.IntermediateCommitCount = options.IntermediateCommitCount + input.Params = options.Params + input.IntermediateCommitSize = options.IntermediateCommitSize + input.Collections.Read = options.ReadCollections + input.Collections.Write = options.WriteCollections + } + if _, err = req.SetBody(input); err != nil { + return nil, WithStack(err) + } + resp, err := d.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err = resp.CheckStatus(http.StatusOK); err != nil { + return nil, WithStack(err) + } + + output := &transactionResponse{} + if err = resp.ParseBody("", output); err != nil { + return nil, WithStack(err) + } + + return output.Result, nil +} diff --git a/deps/github.com/arangodb/go-driver/doc.go b/deps/github.com/arangodb/go-driver/doc.go new file mode 100644 index 000000000..2e3f4851e --- /dev/null +++ b/deps/github.com/arangodb/go-driver/doc.go @@ -0,0 +1,44 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +/* +Package driver implements a Go driver for the ArangoDB database. + +To get started, create a connection to the database and wrap a client around it. + + // Create an HTTP connection to the database + conn, err := http.NewConnection(http.ConnectionConfig{ + Endpoints: []string{"http://localhost:8529"}, + }) + if err != nil { + // Handle error + } + // Create a client + c, err := driver.NewClient(driver.ClientConfig{ + Connection: conn, + }) + if err != nil { + // Handle error + } + +*/ +package driver diff --git a/deps/github.com/arangodb/go-driver/edge.go b/deps/github.com/arangodb/go-driver/edge.go new file mode 100644 index 000000000..5f82fd500 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/edge.go @@ -0,0 +1,31 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +// EdgeDocument is a minimal document for use in edge collection. +// You can use this in your own edge document structures completely use your own. +// If you use your own, make sure to include a `_from` and `_to` field. +type EdgeDocument struct { + From DocumentID `json:"_from,omitempty"` + To DocumentID `json:"_to,omitempty"` +} diff --git a/deps/github.com/arangodb/go-driver/edge_collection_documents_impl.go b/deps/github.com/arangodb/go-driver/edge_collection_documents_impl.go new file mode 100644 index 000000000..c09d5d7b7 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/edge_collection_documents_impl.go @@ -0,0 +1,537 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "fmt" + "path" + "reflect" + "strings" +) + +// DocumentExists checks if a document with given key exists in the collection. +func (c *edgeCollection) DocumentExists(ctx context.Context, key string) (bool, error) { + if result, err := c.rawCollection().DocumentExists(ctx, key); err != nil { + return false, WithStack(err) + } else { + return result, nil + } +} + +// ReadDocument reads a single document with given key from the collection. +// The document data is stored into result, the document meta data is returned. +// If no document exists with given key, a NotFoundError is returned. +func (c *edgeCollection) ReadDocument(ctx context.Context, key string, result interface{}) (DocumentMeta, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, WithStack(err) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("GET", path.Join(c.relPath(), escapedKey)) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return DocumentMeta{}, WithStack(err) + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("edge", &meta); err != nil { + return DocumentMeta{}, WithStack(err) + } + // Parse result + if result != nil { + if err := resp.ParseBody("edge", result); err != nil { + return meta, WithStack(err) + } + } + return meta, nil +} + +// CreateDocument creates a single document in the collection. +// The document data is loaded from the given document, the document meta data is returned. +// If the document data already contains a `_key` field, this will be used as key of the new document, +// otherwise a unique key is created. +// A ConflictError is returned when a `_key` field contains a duplicate key, other any other field violates an index constraint. +// To return the NEW document, prepare a context with `WithReturnNew`. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +func (c *edgeCollection) CreateDocument(ctx context.Context, document interface{}) (DocumentMeta, error) { + meta, _, err := c.createDocument(ctx, document) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} + +func (c *edgeCollection) createDocument(ctx context.Context, document interface{}) (DocumentMeta, contextSettings, error) { + if document == nil { + return DocumentMeta{}, contextSettings{}, WithStack(InvalidArgumentError{Message: "document nil"}) + } + req, err := c.conn.NewRequest("POST", c.relPath()) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if _, err := req.SetBody(document); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if err := resp.CheckStatus(cs.okStatus(201, 202)); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, cs, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("edge", &meta); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + if err := resp.ParseBody("new", cs.ReturnNew); err != nil { + return meta, cs, WithStack(err) + } + } + return meta, cs, nil +} + +// CreateDocuments creates multiple documents in the collection. +// The document data is loaded from the given documents slice, the documents meta data is returned. +// If a documents element already contains a `_key` field, this will be used as key of the new document, +// otherwise a unique key is created. +// If a documents element contains a `_key` field with a duplicate key, other any other field violates an index constraint, +// a ConflictError is returned in its inded in the errors slice. +// To return the NEW documents, prepare a context with `WithReturnNew`. The data argument passed to `WithReturnNew` must be +// a slice with the same number of entries as the `documents` slice. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +// If the create request itself fails or one of the arguments is invalid, an error is returned. +func (c *edgeCollection) CreateDocuments(ctx context.Context, documents interface{}) (DocumentMetaSlice, ErrorSlice, error) { + documentsVal := reflect.ValueOf(documents) + switch documentsVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("documents data must be of kind Array, got %s", documentsVal.Kind())}) + } + documentCount := documentsVal.Len() + metas := make(DocumentMetaSlice, documentCount) + errs := make(ErrorSlice, documentCount) + silent := false + for i := 0; i < documentCount; i++ { + doc := documentsVal.Index(i) + ctx, err := withDocumentAt(ctx, i) + if err != nil { + return nil, nil, WithStack(err) + } + meta, cs, err := c.createDocument(ctx, doc.Interface()) + if cs.Silent { + silent = true + } else { + metas[i], errs[i] = meta, err + } + } + if silent { + return nil, nil, nil + } + return metas, errs, nil +} + +// UpdateDocument updates a single document with given key in the collection. +// The document meta data is returned. +// To return the NEW document, prepare a context with `WithReturnNew`. +// To return the OLD document, prepare a context with `WithReturnOld`. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with given key, a NotFoundError is returned. +func (c *edgeCollection) UpdateDocument(ctx context.Context, key string, update interface{}) (DocumentMeta, error) { + meta, _, err := c.updateDocument(ctx, key, update) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} + +func (c *edgeCollection) updateDocument(ctx context.Context, key string, update interface{}) (DocumentMeta, contextSettings, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if update == nil { + return DocumentMeta{}, contextSettings{}, WithStack(InvalidArgumentError{Message: "update nil"}) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("PATCH", path.Join(c.relPath(), escapedKey)) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if _, err := req.SetBody(update); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if err := resp.CheckStatus(200, 201, 202); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, cs, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("edge", &meta); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + if err := resp.ParseBody("old", cs.ReturnOld); err != nil { + return meta, cs, WithStack(err) + } + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + if err := resp.ParseBody("new", cs.ReturnNew); err != nil { + return meta, cs, WithStack(err) + } + } + return meta, cs, nil +} + +// UpdateDocuments updates multiple document with given keys in the collection. +// The updates are loaded from the given updates slice, the documents meta data are returned. +// To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents. +// To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. +// To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *edgeCollection) UpdateDocuments(ctx context.Context, keys []string, updates interface{}) (DocumentMetaSlice, ErrorSlice, error) { + updatesVal := reflect.ValueOf(updates) + switch updatesVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("updates data must be of kind Array, got %s", updatesVal.Kind())}) + } + updateCount := updatesVal.Len() + if keys != nil { + if len(keys) != updateCount { + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("expected %d keys, got %d", updateCount, len(keys))}) + } + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + } + metas := make(DocumentMetaSlice, updateCount) + errs := make(ErrorSlice, updateCount) + silent := false + for i := 0; i < updateCount; i++ { + update := updatesVal.Index(i) + ctx, err := withDocumentAt(ctx, i) + if err != nil { + return nil, nil, WithStack(err) + } + var key string + if keys != nil { + key = keys[i] + } else { + var err error + key, err = getKeyFromDocument(update) + if err != nil { + errs[i] = err + continue + } + } + meta, cs, err := c.updateDocument(ctx, key, update.Interface()) + if cs.Silent { + silent = true + } else { + metas[i], errs[i] = meta, err + } + } + if silent { + return nil, nil, nil + } + return metas, errs, nil +} + +// ReplaceDocument replaces a single document with given key in the collection with the document given in the document argument. +// The document meta data is returned. +// To return the NEW document, prepare a context with `WithReturnNew`. +// To return the OLD document, prepare a context with `WithReturnOld`. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with given key, a NotFoundError is returned. +func (c *edgeCollection) ReplaceDocument(ctx context.Context, key string, document interface{}) (DocumentMeta, error) { + meta, _, err := c.replaceDocument(ctx, key, document) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} + +func (c *edgeCollection) replaceDocument(ctx context.Context, key string, document interface{}) (DocumentMeta, contextSettings, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if document == nil { + return DocumentMeta{}, contextSettings{}, WithStack(InvalidArgumentError{Message: "document nil"}) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("PUT", path.Join(c.relPath(), escapedKey)) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if _, err := req.SetBody(document); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if err := resp.CheckStatus(cs.okStatus(201, 202)); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, cs, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("edge", &meta); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + if err := resp.ParseBody("old", cs.ReturnOld); err != nil { + return meta, cs, WithStack(err) + } + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + if err := resp.ParseBody("new", cs.ReturnNew); err != nil { + return meta, cs, WithStack(err) + } + } + return meta, cs, nil +} + +// ReplaceDocuments replaces multiple documents with given keys in the collection with the documents given in the documents argument. +// The replacements are loaded from the given documents slice, the documents meta data are returned. +// To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents. +// To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. +// To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *edgeCollection) ReplaceDocuments(ctx context.Context, keys []string, documents interface{}) (DocumentMetaSlice, ErrorSlice, error) { + documentsVal := reflect.ValueOf(documents) + switch documentsVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("documents data must be of kind Array, got %s", documentsVal.Kind())}) + } + documentCount := documentsVal.Len() + if keys != nil { + if len(keys) != documentCount { + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("expected %d keys, got %d", documentCount, len(keys))}) + } + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + } + metas := make(DocumentMetaSlice, documentCount) + errs := make(ErrorSlice, documentCount) + silent := false + for i := 0; i < documentCount; i++ { + doc := documentsVal.Index(i) + ctx, err := withDocumentAt(ctx, i) + if err != nil { + return nil, nil, WithStack(err) + } + var key string + if keys != nil { + key = keys[i] + } else { + var err error + key, err = getKeyFromDocument(doc) + if err != nil { + errs[i] = err + continue + } + } + meta, cs, err := c.replaceDocument(ctx, key, doc.Interface()) + if cs.Silent { + silent = true + } else { + metas[i], errs[i] = meta, err + } + } + if silent { + return nil, nil, nil + } + return metas, errs, nil +} + +// RemoveDocument removes a single document with given key from the collection. +// The document meta data is returned. +// To return the OLD document, prepare a context with `WithReturnOld`. +// To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with given key, a NotFoundError is returned. +func (c *edgeCollection) RemoveDocument(ctx context.Context, key string) (DocumentMeta, error) { + meta, _, err := c.removeDocument(ctx, key) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} + +func (c *edgeCollection) removeDocument(ctx context.Context, key string) (DocumentMeta, contextSettings, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("DELETE", path.Join(c.relPath(), escapedKey)) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + if cs.ReturnOld != nil { + return DocumentMeta{}, contextSettings{}, WithStack(InvalidArgumentError{Message: "ReturnOld is not support when removing edges"}) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if err := resp.CheckStatus(cs.okStatus(200, 202)); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, cs, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("edge", &meta); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + if err := resp.ParseBody("old", cs.ReturnOld); err != nil { + return meta, cs, WithStack(err) + } + } + return meta, cs, nil +} + +// RemoveDocuments removes multiple documents with given keys from the collection. +// The document meta data are returned. +// To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. +// To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *edgeCollection) RemoveDocuments(ctx context.Context, keys []string) (DocumentMetaSlice, ErrorSlice, error) { + keyCount := len(keys) + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + metas := make(DocumentMetaSlice, keyCount) + errs := make(ErrorSlice, keyCount) + silent := false + for i := 0; i < keyCount; i++ { + key := keys[i] + ctx, err := withDocumentAt(ctx, i) + if err != nil { + return nil, nil, WithStack(err) + } + meta, cs, err := c.removeDocument(ctx, key) + if cs.Silent { + silent = true + } else { + metas[i], errs[i] = meta, err + } + } + if silent { + return nil, nil, nil + } + return metas, errs, nil +} + +// ImportDocuments imports one or more documents into the collection. +// The document data is loaded from the given documents argument, statistics are returned. +// The documents argument can be one of the following: +// - An array of structs: All structs will be imported as individual documents. +// - An array of maps: All maps will be imported as individual documents. +// To wait until all documents have been synced to disk, prepare a context with `WithWaitForSync`. +// To return details about documents that could not be imported, prepare a context with `WithImportDetails`. +func (c *edgeCollection) ImportDocuments(ctx context.Context, documents interface{}, options *ImportDocumentOptions) (ImportDocumentStatistics, error) { + stats, err := c.rawCollection().ImportDocuments(ctx, documents, options) + if err != nil { + return ImportDocumentStatistics{}, WithStack(err) + } + return stats, nil +} + +// getKeyFromDocument looks for a `_key` document in the given document and returns it. +func getKeyFromDocument(doc reflect.Value) (string, error) { + if doc.IsNil() { + return "", WithStack(InvalidArgumentError{Message: "Document is nil"}) + } + if doc.Kind() == reflect.Ptr { + doc = doc.Elem() + } + switch doc.Kind() { + case reflect.Struct: + structType := doc.Type() + fieldCount := structType.NumField() + for i := 0; i < fieldCount; i++ { + f := structType.Field(i) + tagParts := strings.Split(f.Tag.Get("json"), ",") + if tagParts[0] == "_key" { + // We found the _key field + keyVal := doc.Field(i) + return keyVal.String(), nil + } + } + return "", WithStack(InvalidArgumentError{Message: "Document contains no '_key' field"}) + case reflect.Map: + keyVal := doc.MapIndex(reflect.ValueOf("_key")) + if keyVal.IsNil() { + return "", WithStack(InvalidArgumentError{Message: "Document contains no '_key' entry"}) + } + return keyVal.String(), nil + default: + return "", WithStack(InvalidArgumentError{Message: fmt.Sprintf("Document must be struct or map. Got %s", doc.Kind())}) + } +} diff --git a/deps/github.com/arangodb/go-driver/edge_collection_impl.go b/deps/github.com/arangodb/go-driver/edge_collection_impl.go new file mode 100644 index 000000000..fad101c6f --- /dev/null +++ b/deps/github.com/arangodb/go-driver/edge_collection_impl.go @@ -0,0 +1,168 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +// newEdgeCollection creates a new EdgeCollection implementation. +func newEdgeCollection(name string, g *graph) (Collection, error) { + if name == "" { + return nil, WithStack(InvalidArgumentError{Message: "name is empty"}) + } + if g == nil { + return nil, WithStack(InvalidArgumentError{Message: "g is nil"}) + } + return &edgeCollection{ + name: name, + g: g, + conn: g.db.conn, + }, nil +} + +type edgeCollection struct { + name string + g *graph + conn Connection +} + +// relPath creates the relative path to this edge collection (`_db//_api/gharial//edge/`) +func (c *edgeCollection) relPath() string { + escapedName := pathEscape(c.name) + return path.Join(c.g.relPath(), "edge", escapedName) +} + +// Name returns the name of the edge collection. +func (c *edgeCollection) Name() string { + return c.name +} + +// Database returns the database containing the collection. +func (c *edgeCollection) Database() Database { + return c.g.db +} + +// rawCollection returns a standard document implementation of Collection +// for this edge collection. +func (c *edgeCollection) rawCollection() Collection { + result, _ := newCollection(c.name, c.g.db) + return result +} + +// Status fetches the current status of the collection. +func (c *edgeCollection) Status(ctx context.Context) (CollectionStatus, error) { + result, err := c.rawCollection().Status(ctx) + if err != nil { + return CollectionStatus(0), WithStack(err) + } + return result, nil +} + +// Count fetches the number of document in the collection. +func (c *edgeCollection) Count(ctx context.Context) (int64, error) { + result, err := c.rawCollection().Count(ctx) + if err != nil { + return 0, WithStack(err) + } + return result, nil +} + +// Statistics returns the number of documents and additional statistical information about the collection. +func (c *edgeCollection) Statistics(ctx context.Context) (CollectionStatistics, error) { + result, err := c.rawCollection().Statistics(ctx) + if err != nil { + return CollectionStatistics{}, WithStack(err) + } + return result, nil +} + +// Revision fetches the revision ID of the collection. +// The revision ID is a server-generated string that clients can use to check whether data +// in a collection has changed since the last revision check. +func (c *edgeCollection) Revision(ctx context.Context) (string, error) { + result, err := c.rawCollection().Revision(ctx) + if err != nil { + return "", WithStack(err) + } + return result, nil +} + +// Properties fetches extended information about the collection. +func (c *edgeCollection) Properties(ctx context.Context) (CollectionProperties, error) { + result, err := c.rawCollection().Properties(ctx) + if err != nil { + return CollectionProperties{}, WithStack(err) + } + return result, nil +} + +// SetProperties changes properties of the collection. +func (c *edgeCollection) SetProperties(ctx context.Context, options SetCollectionPropertiesOptions) error { + if err := c.rawCollection().SetProperties(ctx, options); err != nil { + return WithStack(err) + } + return nil +} + +// Load the collection into memory. +func (c *edgeCollection) Load(ctx context.Context) error { + if err := c.rawCollection().Load(ctx); err != nil { + return WithStack(err) + } + return nil +} + +// UnLoad the collection from memory. +func (c *edgeCollection) Unload(ctx context.Context) error { + if err := c.rawCollection().Unload(ctx); err != nil { + return WithStack(err) + } + return nil +} + +// Remove removes the entire collection. +// If the collection does not exist, a NotFoundError is returned. +func (c *edgeCollection) Remove(ctx context.Context) error { + req, err := c.conn.NewRequest("DELETE", c.relPath()) + if err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return WithStack(err) + } + return nil +} + +// Truncate removes all documents from the collection, but leaves the indexes intact. +func (c *edgeCollection) Truncate(ctx context.Context) error { + if err := c.rawCollection().Truncate(ctx); err != nil { + return WithStack(err) + } + return nil +} diff --git a/deps/github.com/arangodb/go-driver/edge_collection_indexes_impl.go b/deps/github.com/arangodb/go-driver/edge_collection_indexes_impl.go new file mode 100644 index 000000000..c7caefbdb --- /dev/null +++ b/deps/github.com/arangodb/go-driver/edge_collection_indexes_impl.go @@ -0,0 +1,116 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// Index opens a connection to an existing index within the collection. +// If no index with given name exists, an NotFoundError is returned. +func (c *edgeCollection) Index(ctx context.Context, name string) (Index, error) { + result, err := c.rawCollection().Index(ctx, name) + if err != nil { + return nil, WithStack(err) + } + return result, nil +} + +// IndexExists returns true if an index with given name exists within the collection. +func (c *edgeCollection) IndexExists(ctx context.Context, name string) (bool, error) { + result, err := c.rawCollection().IndexExists(ctx, name) + if err != nil { + return false, WithStack(err) + } + return result, nil +} + +// Indexes returns a list of all indexes in the collection. +func (c *edgeCollection) Indexes(ctx context.Context) ([]Index, error) { + result, err := c.rawCollection().Indexes(ctx) + if err != nil { + return nil, WithStack(err) + } + return result, nil +} + +// EnsureFullTextIndex creates a fulltext index in the collection, if it does not already exist. +// +// Fields is a slice of attribute names. Currently, the slice is limited to exactly one attribute. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *edgeCollection) EnsureFullTextIndex(ctx context.Context, fields []string, options *EnsureFullTextIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureFullTextIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsureGeoIndex creates a hash index in the collection, if it does not already exist. +// +// Fields is a slice with one or two attribute paths. If it is a slice with one attribute path location, +// then a geo-spatial index on all documents is created using location as path to the coordinates. +// The value of the attribute must be a slice with at least two double values. The slice must contain the latitude (first value) +// and the longitude (second value). All documents, which do not have the attribute path or with value that are not suitable, are ignored. +// If it is a slice with two attribute paths latitude and longitude, then a geo-spatial index on all documents is created +// using latitude and longitude as paths the latitude and the longitude. The value of the attribute latitude and of the +// attribute longitude must a double. All documents, which do not have the attribute paths or which values are not suitable, are ignored. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *edgeCollection) EnsureGeoIndex(ctx context.Context, fields []string, options *EnsureGeoIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureGeoIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsureHashIndex creates a hash index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *edgeCollection) EnsureHashIndex(ctx context.Context, fields []string, options *EnsureHashIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureHashIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsurePersistentIndex creates a persistent index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *edgeCollection) EnsurePersistentIndex(ctx context.Context, fields []string, options *EnsurePersistentIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsurePersistentIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsureSkipListIndex creates a skiplist index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *edgeCollection) EnsureSkipListIndex(ctx context.Context, fields []string, options *EnsureSkipListIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureSkipListIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} diff --git a/deps/github.com/arangodb/go-driver/encode-go_1_8.go b/deps/github.com/arangodb/go-driver/encode-go_1_8.go new file mode 100644 index 000000000..1fc2d8348 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/encode-go_1_8.go @@ -0,0 +1,37 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// +build "go1.8" + +package driver + +import "net/url" + +// pathEscape the given value for use in a URL path. +func pathEscape(s string) string { + return url.PathEscape(s) +} + +// pathUnescape unescapes the given value for use in a URL path. +func pathUnescape(s string) string { + return url.PathUnescape(s) +} diff --git a/deps/github.com/arangodb/go-driver/encode.go b/deps/github.com/arangodb/go-driver/encode.go new file mode 100644 index 000000000..a490ab94e --- /dev/null +++ b/deps/github.com/arangodb/go-driver/encode.go @@ -0,0 +1,38 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// +build !"go1.8" + +package driver + +import "net/url" + +// Escape the given value for use in a URL path. +func pathEscape(s string) string { + return url.QueryEscape(s) +} + +// pathUnescape unescapes the given value for use in a URL path. +func pathUnescape(s string) string { + r, _ := url.QueryUnescape(s) + return r +} diff --git a/deps/github.com/arangodb/go-driver/error.go b/deps/github.com/arangodb/go-driver/error.go new file mode 100644 index 000000000..fa8b698c7 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/error.go @@ -0,0 +1,236 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "fmt" + "net" + "net/http" + "net/url" + "os" +) + +// ArangoError is a Go error with arangodb specific error information. +type ArangoError struct { + HasError bool `json:"error"` + Code int `json:"code"` + ErrorNum int `json:"errorNum"` + ErrorMessage string `json:"errorMessage"` +} + +// Error returns the error message of an ArangoError. +func (ae ArangoError) Error() string { + if ae.ErrorMessage != "" { + return ae.ErrorMessage + } + return fmt.Sprintf("ArangoError: Code %d, ErrorNum %d", ae.Code, ae.ErrorNum) +} + +// Timeout returns true when the given error is a timeout error. +func (ae ArangoError) Timeout() bool { + return ae.HasError && (ae.Code == http.StatusRequestTimeout || ae.Code == http.StatusGatewayTimeout) +} + +// Temporary returns true when the given error is a temporary error. +func (ae ArangoError) Temporary() bool { + return ae.HasError && ae.Code == http.StatusServiceUnavailable +} + +// newArangoError creates a new ArangoError with given values. +func newArangoError(code, errorNum int, errorMessage string) error { + return ArangoError{ + HasError: true, + Code: code, + ErrorNum: errorNum, + ErrorMessage: errorMessage, + } +} + +// IsArangoError returns true when the given error is an ArangoError. +func IsArangoError(err error) bool { + ae, ok := Cause(err).(ArangoError) + return ok && ae.HasError +} + +// IsArangoErrorWithCode returns true when the given error is an ArangoError and its Code field is equal to the given code. +func IsArangoErrorWithCode(err error, code int) bool { + ae, ok := Cause(err).(ArangoError) + return ok && ae.Code == code +} + +// IsArangoErrorWithErrorNum returns true when the given error is an ArangoError and its ErrorNum field is equal to one of the given numbers. +func IsArangoErrorWithErrorNum(err error, errorNum ...int) bool { + ae, ok := Cause(err).(ArangoError) + if !ok { + return false + } + for _, x := range errorNum { + if ae.ErrorNum == x { + return true + } + } + return false +} + +// IsInvalidRequest returns true if the given error is an ArangoError with code 400, indicating an invalid request. +func IsInvalidRequest(err error) bool { + return IsArangoErrorWithCode(err, 400) +} + +// IsUnauthorized returns true if the given error is an ArangoError with code 401, indicating an unauthorized request. +func IsUnauthorized(err error) bool { + return IsArangoErrorWithCode(err, 401) +} + +// IsForbidden returns true if the given error is an ArangoError with code 403, indicating a forbidden request. +func IsForbidden(err error) bool { + return IsArangoErrorWithCode(err, 403) +} + +// IsNotFound returns true if the given error is an ArangoError with code 404, indicating a object not found. +func IsNotFound(err error) bool { + return IsArangoErrorWithCode(err, 404) || IsArangoErrorWithErrorNum(err, 1202, 1203) +} + +// IsConflict returns true if the given error is an ArangoError with code 409, indicating a conflict. +func IsConflict(err error) bool { + return IsArangoErrorWithCode(err, 409) || IsArangoErrorWithErrorNum(err, 1702) +} + +// IsPreconditionFailed returns true if the given error is an ArangoError with code 412, indicating a failed precondition. +func IsPreconditionFailed(err error) bool { + return IsArangoErrorWithCode(err, 412) || IsArangoErrorWithErrorNum(err, 1200, 1210) +} + +// IsNoLeader returns true if the given error is an ArangoError with code 503 error number 1496. +func IsNoLeader(err error) bool { + return IsArangoErrorWithCode(err, 503) && IsArangoErrorWithErrorNum(err, 1496) +} + +// InvalidArgumentError is returned when a go function argument is invalid. +type InvalidArgumentError struct { + Message string +} + +// Error implements the error interface for InvalidArgumentError. +func (e InvalidArgumentError) Error() string { + return e.Message +} + +// IsInvalidArgument returns true if the given error is an InvalidArgumentError. +func IsInvalidArgument(err error) bool { + _, ok := Cause(err).(InvalidArgumentError) + return ok +} + +// NoMoreDocumentsError is returned by Cursor's, when an attempt is made to read documents when there are no more. +type NoMoreDocumentsError struct{} + +// Error implements the error interface for NoMoreDocumentsError. +func (e NoMoreDocumentsError) Error() string { + return "no more documents" +} + +// IsNoMoreDocuments returns true if the given error is an NoMoreDocumentsError. +func IsNoMoreDocuments(err error) bool { + _, ok := Cause(err).(NoMoreDocumentsError) + return ok +} + +// A ResponseError is returned when a request was completely written to a server, but +// the server did not respond, or some kind of network error occurred during the response. +type ResponseError struct { + Err error +} + +// Error returns the Error() result of the underlying error. +func (e *ResponseError) Error() string { + return e.Err.Error() +} + +// IsResponse returns true if the given error is (or is caused by) a ResponseError. +func IsResponse(err error) bool { + return isCausedBy(err, func(e error) bool { _, ok := e.(*ResponseError); return ok }) +} + +// IsCanceled returns true if the given error is the result on a cancelled context. +func IsCanceled(err error) bool { + return isCausedBy(err, func(e error) bool { return e == context.Canceled }) +} + +// IsTimeout returns true if the given error is the result on a deadline that has been exceeded. +func IsTimeout(err error) bool { + return isCausedBy(err, func(e error) bool { return e == context.DeadlineExceeded }) +} + +// isCausedBy returns true if the given error returns true on the given predicate, +// unwrapping various standard library error wrappers. +func isCausedBy(err error, p func(error) bool) bool { + if p(err) { + return true + } + err = Cause(err) + for { + if p(err) { + return true + } else if err == nil { + return false + } + if xerr, ok := err.(*ResponseError); ok { + err = xerr.Err + } else if xerr, ok := err.(*url.Error); ok { + err = xerr.Err + } else if xerr, ok := err.(*net.OpError); ok { + err = xerr.Err + } else if xerr, ok := err.(*os.SyscallError); ok { + err = xerr.Err + } else { + return false + } + } +} + +var ( + // WithStack is called on every return of an error to add stacktrace information to the error. + // When setting this function, also set the Cause function. + // The interface of this function is compatible with functions in github.com/pkg/errors. + WithStack = func(err error) error { return err } + // Cause is used to get the root cause of the given error. + // The interface of this function is compatible with functions in github.com/pkg/errors. + Cause = func(err error) error { return err } +) + +// ErrorSlice is a slice of errors +type ErrorSlice []error + +// FirstNonNil returns the first error in the slice that is not nil. +// If all errors in the slice are nil, nil is returned. +func (l ErrorSlice) FirstNonNil() error { + for _, e := range l { + if e != nil { + return e + } + } + return nil +} diff --git a/deps/github.com/arangodb/go-driver/example_client_test.go b/deps/github.com/arangodb/go-driver/example_client_test.go new file mode 100644 index 000000000..dc9d26c50 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/example_client_test.go @@ -0,0 +1,53 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// +build !auth + +package driver_test + +import ( + "fmt" + "log" + + driver "github.com/arangodb/go-driver" + "github.com/arangodb/go-driver/http" +) + +func ExampleNewClient() { + // Create an HTTP connection to the database + conn, err := http.NewConnection(http.ConnectionConfig{ + Endpoints: []string{"http://localhost:8529"}, + }) + if err != nil { + log.Fatalf("Failed to create HTTP connection: %v", err) + } + // Create a client + c, err := driver.NewClient(driver.ClientConfig{ + Connection: conn, + }) + // Ask the version of the server + versionInfo, err := c.Version(nil) + if err != nil { + log.Fatalf("Failed to get version info: %v", err) + } + fmt.Printf("Database has version '%s' and license '%s'\n", versionInfo.Version, versionInfo.License) +} diff --git a/deps/github.com/arangodb/go-driver/example_context_test.go b/deps/github.com/arangodb/go-driver/example_context_test.go new file mode 100644 index 000000000..fc10b2d9c --- /dev/null +++ b/deps/github.com/arangodb/go-driver/example_context_test.go @@ -0,0 +1,49 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// +build !auth + +package driver_test + +import ( + "context" + + driver "github.com/arangodb/go-driver" +) + +func ExampleWithRevision(collection driver.Collection) { + var result Book + // Using WithRevision we get an error when the current revision of the document is different. + ctx := driver.WithRevision(context.Background(), "a-specific-revision") + if _, err := collection.ReadDocument(ctx, "someValidKey", &result); err != nil { + // This call will fail when a document does not exist, or when its current revision is different. + } +} + +func ExampleWithSilent(collection driver.Collection) { + var result Book + // Using WithSilent we do not care about any returned meta data. + ctx := driver.WithSilent(context.Background()) + if _, err := collection.ReadDocument(ctx, "someValidKey", &result); err != nil { + // No meta data is returned + } +} diff --git a/deps/github.com/arangodb/go-driver/example_create_document_test.go b/deps/github.com/arangodb/go-driver/example_create_document_test.go new file mode 100644 index 000000000..d9bb35102 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/example_create_document_test.go @@ -0,0 +1,85 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// +build !auth + +// This example demonstrates how to create a single document. +package driver_test + +import ( + "fmt" + "log" + + driver "github.com/arangodb/go-driver" + "github.com/arangodb/go-driver/http" +) + +type Book struct { + Title string `json:"title"` + NoPages int `json:"no_pages"` +} + +func Example_createDocument() { + conn, err := http.NewConnection(http.ConnectionConfig{ + Endpoints: []string{"http://localhost:8529"}, + }) + if err != nil { + log.Fatalf("Failed to create HTTP connection: %v", err) + } + c, err := driver.NewClient(driver.ClientConfig{ + Connection: conn, + }) + + // Create database + db, err := c.CreateDatabase(nil, "examples_books", nil) + if err != nil { + log.Fatalf("Failed to create database: %v", err) + } + + // Create collection + col, err := db.CreateCollection(nil, "books", nil) + if err != nil { + log.Fatalf("Failed to create collection: %v", err) + } + + // Create document + book := Book{ + Title: "ArangoDB Cookbook", + NoPages: 257, + } + meta, err := col.CreateDocument(nil, book) + if err != nil { + log.Fatalf("Failed to create document: %v", err) + } + fmt.Printf("Created document in collection '%s' in database '%s'\n", col.Name(), db.Name()) + + // Read the document back + var result Book + if _, err := col.ReadDocument(nil, meta.Key, &result); err != nil { + log.Fatalf("Failed to read document: %v", err) + } + fmt.Printf("Read book '%+v'\n", result) + + // Output: + // Created document in collection 'books' in database 'examples_books' + // Read book '{Title:ArangoDB Cookbook NoPages:257}' +} diff --git a/deps/github.com/arangodb/go-driver/example_create_documents_test.go b/deps/github.com/arangodb/go-driver/example_create_documents_test.go new file mode 100644 index 000000000..ec745bd33 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/example_create_documents_test.go @@ -0,0 +1,90 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// +build !auth + +// This example demonstrates how to create multiple documents at once. +package driver_test + +import ( + "flag" + "fmt" + "log" + "strings" + + driver "github.com/arangodb/go-driver" + "github.com/arangodb/go-driver/http" +) + +type User struct { + Name string `json:"name"` + Age int `json:"age"` +} + +func Example_createDocuments() { + flag.Parse() + conn, err := http.NewConnection(http.ConnectionConfig{ + Endpoints: []string{"http://localhost:8529"}, + }) + if err != nil { + log.Fatalf("Failed to create HTTP connection: %v", err) + } + c, err := driver.NewClient(driver.ClientConfig{ + Connection: conn, + }) + + // Create database + db, err := c.CreateDatabase(nil, "examples_users", nil) + if err != nil { + log.Fatalf("Failed to create database: %v", err) + } + + // Create collection + col, err := db.CreateCollection(nil, "users", nil) + if err != nil { + log.Fatalf("Failed to create collection: %v", err) + } + + // Create documents + users := []User{ + User{ + Name: "John", + Age: 65, + }, + User{ + Name: "Tina", + Age: 25, + }, + User{ + Name: "George", + Age: 31, + }, + } + metas, errs, err := col.CreateDocuments(nil, users) + if err != nil { + log.Fatalf("Failed to create documents: %v", err) + } else if err := errs.FirstNonNil(); err != nil { + log.Fatalf("Failed to create documents: first error: %v", err) + } + + fmt.Printf("Created documents with keys '%s' in collection '%s' in database '%s'\n", strings.Join(metas.Keys(), ","), col.Name(), db.Name()) +} diff --git a/deps/github.com/arangodb/go-driver/example_error_test.go b/deps/github.com/arangodb/go-driver/example_error_test.go new file mode 100644 index 000000000..d45c05db3 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/example_error_test.go @@ -0,0 +1,46 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// +build !auth + +package driver_test + +import ( + "context" + + driver "github.com/arangodb/go-driver" +) + +func ExampleIsNotFound(collection driver.Collection) { + var result Book + if _, err := collection.ReadDocument(nil, "keyDoesNotExist", &result); driver.IsNotFound(err) { + // No document with given key exists + } +} + +func ExampleIsPreconditionFailed(collection driver.Collection) { + var result Book + ctx := driver.WithRevision(context.Background(), "an-old-revision") + if _, err := collection.ReadDocument(ctx, "someValidKey", &result); driver.IsPreconditionFailed(err) { + // Document is found, but its revision is incorrect + } +} diff --git a/deps/github.com/arangodb/go-driver/graph.go b/deps/github.com/arangodb/go-driver/graph.go new file mode 100644 index 000000000..55c01f398 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/graph.go @@ -0,0 +1,41 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// Graph provides access to all edge & vertex collections of a single graph in a database. +type Graph interface { + // Name returns the name of the graph. + Name() string + + // Remove removes the entire graph. + // If the graph does not exist, a NotFoundError is returned. + Remove(ctx context.Context) error + + // Edge collection functions + GraphEdgeCollections + + // Vertex collection functions + GraphVertexCollections +} diff --git a/deps/github.com/arangodb/go-driver/graph_edge_collections.go b/deps/github.com/arangodb/go-driver/graph_edge_collections.go new file mode 100644 index 000000000..2515e2e2a --- /dev/null +++ b/deps/github.com/arangodb/go-driver/graph_edge_collections.go @@ -0,0 +1,57 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// GraphEdgeCollections provides access to all edge collections of a single graph in a database. +type GraphEdgeCollections interface { + // EdgeCollection opens a connection to an existing edge-collection within the graph. + // If no edge-collection with given name exists, an NotFoundError is returned. + // Note: When calling Remove on the returned Collection, the collection is removed from the graph. Not from the database. + EdgeCollection(ctx context.Context, name string) (Collection, VertexConstraints, error) + + // EdgeCollectionExists returns true if an edge-collection with given name exists within the graph. + EdgeCollectionExists(ctx context.Context, name string) (bool, error) + + // EdgeCollections returns all edge collections of this graph + // Note: When calling Remove on any of the returned Collection's, the collection is removed from the graph. Not from the database. + EdgeCollections(ctx context.Context) ([]Collection, []VertexConstraints, error) + + // CreateEdgeCollection creates an edge collection in the graph. + // collection: The name of the edge collection to be used. + // constraints.From: contains the names of one or more vertex collections that can contain source vertices. + // constraints.To: contains the names of one or more edge collections that can contain target vertices. + CreateEdgeCollection(ctx context.Context, collection string, constraints VertexConstraints) (Collection, error) + + // SetVertexConstraints modifies the vertex constraints of an existing edge collection in the graph. + SetVertexConstraints(ctx context.Context, collection string, constraints VertexConstraints) error +} + +// VertexConstraints limit the vertex collection you can use in an edge. +type VertexConstraints struct { + // From contains names of vertex collection that are allowed to be used in the From part of an edge. + From []string + // To contains names of vertex collection that are allowed to be used in the To part of an edge. + To []string +} diff --git a/deps/github.com/arangodb/go-driver/graph_edge_collections_impl.go b/deps/github.com/arangodb/go-driver/graph_edge_collections_impl.go new file mode 100644 index 000000000..bf26a14ad --- /dev/null +++ b/deps/github.com/arangodb/go-driver/graph_edge_collections_impl.go @@ -0,0 +1,180 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +type getGraphResponse struct { + Graph struct { + EdgeDefinitions []EdgeDefinition `json:"edgeDefinitions,omitempty"` + } `json:"graph"` +} + +// EdgeCollection opens a connection to an existing edge-collection within the graph. +// If no edge-collection with given name exists, an NotFoundError is returned. +func (g *graph) EdgeCollection(ctx context.Context, name string) (Collection, VertexConstraints, error) { + req, err := g.conn.NewRequest("GET", g.relPath()) + if err != nil { + return nil, VertexConstraints{}, WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return nil, VertexConstraints{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, VertexConstraints{}, WithStack(err) + } + var data getGraphResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, VertexConstraints{}, WithStack(err) + } + for _, n := range data.Graph.EdgeDefinitions { + if n.Collection == name { + ec, err := newEdgeCollection(name, g) + if err != nil { + return nil, VertexConstraints{}, WithStack(err) + } + constraints := VertexConstraints{ + From: n.From, + To: n.To, + } + return ec, constraints, nil + } + } + return nil, VertexConstraints{}, WithStack(newArangoError(404, 0, "not found")) +} + +// EdgeCollectionExists returns true if an edge-collection with given name exists within the graph. +func (g *graph) EdgeCollectionExists(ctx context.Context, name string) (bool, error) { + req, err := g.conn.NewRequest("GET", g.relPath()) + if err != nil { + return false, WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return false, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return false, WithStack(err) + } + var data getGraphResponse + if err := resp.ParseBody("", &data); err != nil { + return false, WithStack(err) + } + for _, n := range data.Graph.EdgeDefinitions { + if n.Collection == name { + return true, nil + } + } + return false, nil +} + +// EdgeCollections returns all edge collections of this graph +func (g *graph) EdgeCollections(ctx context.Context) ([]Collection, []VertexConstraints, error) { + req, err := g.conn.NewRequest("GET", g.relPath()) + if err != nil { + return nil, nil, WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return nil, nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, nil, WithStack(err) + } + var data getGraphResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, nil, WithStack(err) + } + result := make([]Collection, 0, len(data.Graph.EdgeDefinitions)) + constraints := make([]VertexConstraints, 0, len(data.Graph.EdgeDefinitions)) + for _, n := range data.Graph.EdgeDefinitions { + ec, err := newEdgeCollection(n.Collection, g) + if err != nil { + return nil, nil, WithStack(err) + } + result = append(result, ec) + constraints = append(constraints, VertexConstraints{ + From: n.From, + To: n.To, + }) + } + return result, constraints, nil +} + +// collection: The name of the edge collection to be used. +// from: contains the names of one or more vertex collections that can contain source vertices. +// to: contains the names of one or more edge collections that can contain target vertices. +func (g *graph) CreateEdgeCollection(ctx context.Context, collection string, constraints VertexConstraints) (Collection, error) { + req, err := g.conn.NewRequest("POST", path.Join(g.relPath(), "edge")) + if err != nil { + return nil, WithStack(err) + } + input := EdgeDefinition{ + Collection: collection, + From: constraints.From, + To: constraints.To, + } + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return nil, WithStack(err) + } + ec, err := newEdgeCollection(collection, g) + if err != nil { + return nil, WithStack(err) + } + return ec, nil +} + +// SetVertexConstraints modifies the vertex constraints of an existing edge collection in the graph. +func (g *graph) SetVertexConstraints(ctx context.Context, collection string, constraints VertexConstraints) error { + req, err := g.conn.NewRequest("PUT", path.Join(g.relPath(), "edge", collection)) + if err != nil { + return WithStack(err) + } + input := EdgeDefinition{ + Collection: collection, + From: constraints.From, + To: constraints.To, + } + if _, err := req.SetBody(input); err != nil { + return WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return WithStack(err) + } + return nil +} diff --git a/deps/github.com/arangodb/go-driver/graph_impl.go b/deps/github.com/arangodb/go-driver/graph_impl.go new file mode 100644 index 000000000..1e84151dc --- /dev/null +++ b/deps/github.com/arangodb/go-driver/graph_impl.go @@ -0,0 +1,77 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +// newGraph creates a new Graph implementation. +func newGraph(name string, db *database) (Graph, error) { + if name == "" { + return nil, WithStack(InvalidArgumentError{Message: "name is empty"}) + } + if db == nil { + return nil, WithStack(InvalidArgumentError{Message: "db is nil"}) + } + return &graph{ + name: name, + db: db, + conn: db.conn, + }, nil +} + +type graph struct { + name string + db *database + conn Connection +} + +// relPath creates the relative path to this graph (`_db//_api/gharial/`) +func (g *graph) relPath() string { + escapedName := pathEscape(g.name) + return path.Join(g.db.relPath(), "_api", "gharial", escapedName) +} + +// Name returns the name of the graph. +func (g *graph) Name() string { + return g.name +} + +// Remove removes the entire graph. +// If the graph does not exist, a NotFoundError is returned. +func (g *graph) Remove(ctx context.Context) error { + req, err := g.conn.NewRequest("DELETE", g.relPath()) + if err != nil { + return WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return WithStack(err) + } + return nil +} diff --git a/deps/github.com/arangodb/go-driver/graph_vertex_collections.go b/deps/github.com/arangodb/go-driver/graph_vertex_collections.go new file mode 100644 index 000000000..f03457c88 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/graph_vertex_collections.go @@ -0,0 +1,44 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// GraphVertexCollections provides access to all vertex collections of a single graph in a database. +type GraphVertexCollections interface { + // VertexCollection opens a connection to an existing vertex-collection within the graph. + // If no vertex-collection with given name exists, an NotFoundError is returned. + // Note: When calling Remove on the returned Collection, the collection is removed from the graph. Not from the database. + VertexCollection(ctx context.Context, name string) (Collection, error) + + // VertexCollectionExists returns true if an vertex-collection with given name exists within the graph. + VertexCollectionExists(ctx context.Context, name string) (bool, error) + + // VertexCollections returns all vertex collections of this graph + // Note: When calling Remove on any of the returned Collection's, the collection is removed from the graph. Not from the database. + VertexCollections(ctx context.Context) ([]Collection, error) + + // CreateVertexCollection creates a vertex collection in the graph. + // collection: The name of the vertex collection to be used. + CreateVertexCollection(ctx context.Context, collection string) (Collection, error) +} diff --git a/deps/github.com/arangodb/go-driver/graph_vertex_collections_impl.go b/deps/github.com/arangodb/go-driver/graph_vertex_collections_impl.go new file mode 100644 index 000000000..296866edc --- /dev/null +++ b/deps/github.com/arangodb/go-driver/graph_vertex_collections_impl.go @@ -0,0 +1,145 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +type listVertexCollectionResponse struct { + Collections []string `json:"collections,omitempty"` +} + +// VertexCollection opens a connection to an existing edge-collection within the graph. +// If no edge-collection with given name exists, an NotFoundError is returned. +func (g *graph) VertexCollection(ctx context.Context, name string) (Collection, error) { + req, err := g.conn.NewRequest("GET", path.Join(g.relPath(), "vertex")) + if err != nil { + return nil, WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data listVertexCollectionResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + for _, n := range data.Collections { + if n == name { + ec, err := newVertexCollection(name, g) + if err != nil { + return nil, WithStack(err) + } + return ec, nil + } + } + return nil, WithStack(newArangoError(404, 0, "not found")) +} + +// VertexCollectionExists returns true if an edge-collection with given name exists within the graph. +func (g *graph) VertexCollectionExists(ctx context.Context, name string) (bool, error) { + req, err := g.conn.NewRequest("GET", path.Join(g.relPath(), "vertex")) + if err != nil { + return false, WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return false, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return false, WithStack(err) + } + var data listVertexCollectionResponse + if err := resp.ParseBody("", &data); err != nil { + return false, WithStack(err) + } + for _, n := range data.Collections { + if n == name { + return true, nil + } + } + return false, nil +} + +// VertexCollections returns all edge collections of this graph +func (g *graph) VertexCollections(ctx context.Context) ([]Collection, error) { + req, err := g.conn.NewRequest("GET", path.Join(g.relPath(), "vertex")) + if err != nil { + return nil, WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data listVertexCollectionResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + result := make([]Collection, 0, len(data.Collections)) + for _, name := range data.Collections { + ec, err := newVertexCollection(name, g) + if err != nil { + return nil, WithStack(err) + } + result = append(result, ec) + } + return result, nil +} + +// collection: The name of the edge collection to be used. +// from: contains the names of one or more vertex collections that can contain source vertices. +// to: contains the names of one or more edge collections that can contain target vertices. +func (g *graph) CreateVertexCollection(ctx context.Context, collection string) (Collection, error) { + req, err := g.conn.NewRequest("POST", path.Join(g.relPath(), "vertex")) + if err != nil { + return nil, WithStack(err) + } + input := struct { + Collection string `json:"collection,omitempty"` + }{ + Collection: collection, + } + if _, err := req.SetBody(input); err != nil { + return nil, WithStack(err) + } + resp, err := g.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return nil, WithStack(err) + } + ec, err := newVertexCollection(collection, g) + if err != nil { + return nil, WithStack(err) + } + return ec, nil +} diff --git a/deps/github.com/arangodb/go-driver/http/authentication.go b/deps/github.com/arangodb/go-driver/http/authentication.go new file mode 100644 index 000000000..76b15c9be --- /dev/null +++ b/deps/github.com/arangodb/go-driver/http/authentication.go @@ -0,0 +1,246 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "context" + "encoding/base64" + "fmt" + "sync" + "sync/atomic" + + driver "github.com/arangodb/go-driver" +) + +// Authentication implements a kind of authentication. +type httpAuthentication interface { + // Prepare is called before the first request of the given connection is made. + Prepare(ctx context.Context, conn driver.Connection) error + + // Configure is called for every request made on a connection. + Configure(req driver.Request) error +} + +// newBasicAuthentication creates an authentication implementation based on the given username & password. +func newBasicAuthentication(userName, password string) httpAuthentication { + auth := fmt.Sprintf("%s:%s", userName, password) + encoded := base64.StdEncoding.EncodeToString([]byte(auth)) + return &basicAuthentication{ + authorizationValue: "Basic " + encoded, + } +} + +// newJWTAuthentication creates a JWT token authentication implementation based on the given username & password. +func newJWTAuthentication(userName, password string) httpAuthentication { + return &jwtAuthentication{ + userName: userName, + password: password, + } +} + +// newRawAuthentication creates a Raw authentication implementation based on the given value. +func newRawAuthentication(value string) httpAuthentication { + return &basicAuthentication{ + authorizationValue: value, + } +} + +// basicAuthentication implements HTTP Basic authentication. +type basicAuthentication struct { + authorizationValue string +} + +// Prepare is called before the first request of the given connection is made. +func (a *basicAuthentication) Prepare(ctx context.Context, conn driver.Connection) error { + // No need to do anything here + return nil +} + +// Configure is called for every request made on a connection. +func (a *basicAuthentication) Configure(req driver.Request) error { + req.SetHeader("Authorization", a.authorizationValue) + return nil +} + +// jwtAuthentication implements JWT token authentication. +type jwtAuthentication struct { + userName string + password string + token string +} + +type jwtOpenRequest struct { + UserName string `json:"username"` + Password string `json:"password"` +} + +type jwtOpenResponse struct { + Token string `json:"jwt"` + MustChangePassword bool `json:"must_change_password,omitempty"` +} + +// Prepare is called before the first request of the given connection is made. +func (a *jwtAuthentication) Prepare(ctx context.Context, conn driver.Connection) error { + // Prepare request + r, err := conn.NewRequest("POST", "/_open/auth") + if err != nil { + return driver.WithStack(err) + } + r.SetBody(jwtOpenRequest{ + UserName: a.userName, + Password: a.password, + }) + + // Perform request + resp, err := conn.Do(ctx, r) + if err != nil { + return driver.WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return driver.WithStack(err) + } + + // Parse response + var data jwtOpenResponse + if err := resp.ParseBody("", &data); err != nil { + return driver.WithStack(err) + } + + // Store token + a.token = data.Token + + // Ok + return nil +} + +// Configure is called for every request made on a connection. +func (a *jwtAuthentication) Configure(req driver.Request) error { + req.SetHeader("Authorization", "bearer "+a.token) + return nil +} + +// newAuthenticatedConnection creates a Connection that applies the given connection on the given underlying connection. +func newAuthenticatedConnection(conn driver.Connection, auth httpAuthentication) (driver.Connection, error) { + if conn == nil { + return nil, driver.WithStack(driver.InvalidArgumentError{Message: "conn is nil"}) + } + if auth == nil { + return nil, driver.WithStack(driver.InvalidArgumentError{Message: "auth is nil"}) + } + return &authenticatedConnection{ + conn: conn, + auth: auth, + }, nil +} + +// authenticatedConnection implements authentication behavior for connections. +type authenticatedConnection struct { + conn driver.Connection // Un-authenticated connection + auth httpAuthentication + prepareMutex sync.Mutex + prepared int32 +} + +// NewRequest creates a new request with given method and path. +func (c *authenticatedConnection) NewRequest(method, path string) (driver.Request, error) { + r, err := c.conn.NewRequest(method, path) + if err != nil { + return nil, driver.WithStack(err) + } + return r, nil +} + +// Do performs a given request, returning its response. +func (c *authenticatedConnection) Do(ctx context.Context, req driver.Request) (driver.Response, error) { + if atomic.LoadInt32(&c.prepared) == 0 { + // Probably we're not yet prepared + if err := c.prepare(ctx); err != nil { + // Authentication failed + return nil, driver.WithStack(err) + } + } + // Configure the request for authentication. + if err := c.auth.Configure(req); err != nil { + // Failed to configure request for authentication + return nil, driver.WithStack(err) + } + // Do the authenticated request + resp, err := c.conn.Do(ctx, req) + if err != nil { + return nil, driver.WithStack(err) + } + return resp, nil +} + +// Unmarshal unmarshals the given raw object into the given result interface. +func (c *authenticatedConnection) Unmarshal(data driver.RawObject, result interface{}) error { + if err := c.conn.Unmarshal(data, result); err != nil { + return driver.WithStack(err) + } + return nil +} + +// Endpoints returns the endpoints used by this connection. +func (c *authenticatedConnection) Endpoints() []string { + return c.conn.Endpoints() +} + +// UpdateEndpoints reconfigures the connection to use the given endpoints. +func (c *authenticatedConnection) UpdateEndpoints(endpoints []string) error { + if err := c.conn.UpdateEndpoints(endpoints); err != nil { + return driver.WithStack(err) + } + return nil +} + +// Configure the authentication used for this connection. +func (c *authenticatedConnection) SetAuthentication(auth driver.Authentication) (driver.Connection, error) { + result, err := c.conn.SetAuthentication(auth) + if err != nil { + return nil, driver.WithStack(err) + } + return result, nil +} + +// Protocols returns all protocols used by this connection. +func (c *authenticatedConnection) Protocols() driver.ProtocolSet { + return c.conn.Protocols() +} + +// prepare calls Authentication.Prepare if needed. +func (c *authenticatedConnection) prepare(ctx context.Context) error { + c.prepareMutex.Lock() + defer c.prepareMutex.Unlock() + if c.prepared == 0 { + // We need to prepare first + if err := c.auth.Prepare(ctx, c.conn); err != nil { + // Authentication failed + return driver.WithStack(err) + } + // We're now prepared + atomic.StoreInt32(&c.prepared, 1) + } else { + // We're already prepared, do nothing + } + return nil +} diff --git a/deps/github.com/arangodb/go-driver/http/connection.go b/deps/github.com/arangodb/go-driver/http/connection.go new file mode 100644 index 000000000..bb6f70a47 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/http/connection.go @@ -0,0 +1,348 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptrace" + "net/url" + "strings" + "time" + + driver "github.com/arangodb/go-driver" + "github.com/arangodb/go-driver/cluster" + "github.com/arangodb/go-driver/util" + velocypack "github.com/arangodb/go-velocypack" +) + +const ( + DefaultMaxIdleConnsPerHost = 64 + + keyRawResponse driver.ContextKey = "arangodb-rawResponse" + keyResponse driver.ContextKey = "arangodb-response" +) + +// ConnectionConfig provides all configuration options for a HTTP connection. +type ConnectionConfig struct { + // Endpoints holds 1 or more URL's used to connect to the database. + // In case of a connection to an ArangoDB cluster, you must provide the URL's of all coordinators. + Endpoints []string + // TLSConfig holds settings used to configure a TLS (HTTPS) connection. + // This is only used for endpoints using the HTTPS scheme. + TLSConfig *tls.Config + // Transport allows the use of a custom round tripper. + // If Transport is not of type `*http.Transport`, the `TLSConfig` property is not used. + // Otherwise a `TLSConfig` property other than `nil` will overwrite the `TLSClientConfig` + // property of `Transport`. + // + // When using a custom `http.Transport`, make sure to set the `MaxIdleConnsPerHost` field at least as + // high as the maximum number of concurrent requests you will make to your database. + // A lower number will cause the golang runtime to create additional connections and close them + // directly after use, resulting in a large number of connections in `TIME_WAIT` state. + // When this value is not set, the driver will set it to 64 automatically. + Transport http.RoundTripper + // FailOnRedirect; if set, redirect will not be followed, instead the status code is returned as error + FailOnRedirect bool + // Cluster configuration settings + cluster.ConnectionConfig + // ContentType specified type of content encoding to use. + ContentType driver.ContentType +} + +// NewConnection creates a new HTTP connection based on the given configuration settings. +func NewConnection(config ConnectionConfig) (driver.Connection, error) { + c, err := cluster.NewConnection(config.ConnectionConfig, func(endpoint string) (driver.Connection, error) { + conn, err := newHTTPConnection(endpoint, config) + if err != nil { + return nil, driver.WithStack(err) + } + return conn, nil + }, config.Endpoints) + if err != nil { + return nil, driver.WithStack(err) + } + return c, nil +} + +// newHTTPConnection creates a new HTTP connection for a single endpoint and the remainder of the given configuration settings. +func newHTTPConnection(endpoint string, config ConnectionConfig) (driver.Connection, error) { + endpoint = util.FixupEndpointURLScheme(endpoint) + u, err := url.Parse(endpoint) + if err != nil { + return nil, driver.WithStack(err) + } + var httpTransport *http.Transport + if config.Transport != nil { + httpTransport, _ = config.Transport.(*http.Transport) + } else { + httpTransport = &http.Transport{ + // Copy default values from http.DefaultTransport + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } + config.Transport = httpTransport + } + if httpTransport != nil { + if httpTransport.MaxIdleConnsPerHost == 0 { + // Raise the default number of idle connections per host since in a database application + // it is very likely that you want more than 2 concurrent connections to a host. + // We raise it to avoid the extra concurrent connections being closed directly + // after use, resulting in a lot of connection in `TIME_WAIT` state. + httpTransport.MaxIdleConnsPerHost = DefaultMaxIdleConnsPerHost + } + defaultMaxIdleConns := 3 * DefaultMaxIdleConnsPerHost + if httpTransport.MaxIdleConns > 0 && httpTransport.MaxIdleConns < defaultMaxIdleConns { + // For a cluster scenario we assume the use of 3 coordinators (don't know the exact number here) + // and derive the maximum total number of idle connections from that. + httpTransport.MaxIdleConns = defaultMaxIdleConns + } + if config.TLSConfig != nil { + httpTransport.TLSClientConfig = config.TLSConfig + } + } + httpClient := &http.Client{ + Transport: config.Transport, + } + if config.FailOnRedirect { + httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return driver.ArangoError{ + HasError: true, + Code: http.StatusFound, + ErrorNum: 0, + ErrorMessage: "Redirect not allowed", + } + } + } + c := &httpConnection{ + endpoint: *u, + contentType: config.ContentType, + client: httpClient, + } + return c, nil +} + +// httpConnection implements an HTTP + JSON connection to an arangodb server. +type httpConnection struct { + endpoint url.URL + contentType driver.ContentType + client *http.Client +} + +// String returns the endpoint as string +func (c *httpConnection) String() string { + return c.endpoint.String() +} + +// NewRequest creates a new request with given method and path. +func (c *httpConnection) NewRequest(method, path string) (driver.Request, error) { + switch method { + case "GET", "POST", "DELETE", "HEAD", "PATCH", "PUT", "OPTIONS": + // Ok + default: + return nil, driver.WithStack(driver.InvalidArgumentError{Message: fmt.Sprintf("Invalid method '%s'", method)}) + } + ct := c.contentType + if ct != driver.ContentTypeJSON && strings.Contains(path, "_api/gharial") { + // Currently (3.1.18) calls to this API do not work well with vpack. + ct = driver.ContentTypeJSON + } + switch ct { + case driver.ContentTypeJSON: + r := &httpJSONRequest{ + method: method, + path: path, + } + return r, nil + case driver.ContentTypeVelocypack: + r := &httpVPackRequest{ + method: method, + path: path, + } + return r, nil + default: + return nil, driver.WithStack(fmt.Errorf("Unsupported content type %d", int(c.contentType))) + } +} + +// Do performs a given request, returning its response. +func (c *httpConnection) Do(ctx context.Context, req driver.Request) (driver.Response, error) { + httpReq, ok := req.(httpRequest) + if !ok { + return nil, driver.WithStack(driver.InvalidArgumentError{Message: "request is not a httpRequest"}) + } + r, err := httpReq.createHTTPRequest(c.endpoint) + rctx := ctx + if rctx == nil { + rctx = context.Background() + } + rctx = httptrace.WithClientTrace(rctx, &httptrace.ClientTrace{ + WroteRequest: func(info httptrace.WroteRequestInfo) { + httpReq.WroteRequest(info) + }, + }) + r = r.WithContext(rctx) + if err != nil { + return nil, driver.WithStack(err) + } + resp, err := c.client.Do(r) + if err != nil { + return nil, driver.WithStack(err) + } + var rawResponse *[]byte + if ctx != nil { + if v := ctx.Value(keyRawResponse); v != nil { + if buf, ok := v.(*[]byte); ok { + rawResponse = buf + } + } + } + + // Read response body + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, driver.WithStack(err) + } + if rawResponse != nil { + *rawResponse = body + } + + ct := resp.Header.Get("Content-Type") + var httpResp driver.Response + switch strings.Split(ct, ";")[0] { + case "application/json", "application/x-arango-dump": + httpResp = &httpJSONResponse{resp: resp, rawResponse: body} + case "application/x-velocypack": + httpResp = &httpVPackResponse{resp: resp, rawResponse: body} + default: + if resp.StatusCode == http.StatusUnauthorized { + // When unauthorized the server sometimes return a `text/plain` response. + return nil, driver.WithStack(driver.ArangoError{ + HasError: true, + Code: resp.StatusCode, + ErrorMessage: string(body), + }) + } + // Handle empty 'text/plain' body as empty JSON object + if len(body) == 0 { + body = []byte("{}") + if rawResponse != nil { + *rawResponse = body + } + httpResp = &httpJSONResponse{resp: resp, rawResponse: body} + } else { + return nil, driver.WithStack(fmt.Errorf("Unsupported content type '%s' with status %d and content '%s'", ct, resp.StatusCode, string(body))) + } + } + if ctx != nil { + if v := ctx.Value(keyResponse); v != nil { + if respPtr, ok := v.(*driver.Response); ok { + *respPtr = httpResp + } + } + } + return httpResp, nil +} + +// Unmarshal unmarshals the given raw object into the given result interface. +func (c *httpConnection) Unmarshal(data driver.RawObject, result interface{}) error { + ct := c.contentType + if ct == driver.ContentTypeVelocypack && len(data) >= 2 { + // Poor mans auto detection of json + l := len(data) + if (data[0] == '{' && data[l-1] == '}') || (data[0] == '[' && data[l-1] == ']') { + ct = driver.ContentTypeJSON + } + } + switch ct { + case driver.ContentTypeJSON: + if err := json.Unmarshal(data, result); err != nil { + return driver.WithStack(err) + } + case driver.ContentTypeVelocypack: + //panic(velocypack.Slice(data)) + if err := velocypack.Unmarshal(velocypack.Slice(data), result); err != nil { + return driver.WithStack(err) + } + default: + return driver.WithStack(fmt.Errorf("Unsupported content type %d", int(c.contentType))) + } + return nil +} + +// Endpoints returns the endpoints used by this connection. +func (c *httpConnection) Endpoints() []string { + return []string{c.endpoint.String()} +} + +// UpdateEndpoints reconfigures the connection to use the given endpoints. +func (c *httpConnection) UpdateEndpoints(endpoints []string) error { + // Do nothing here. + // The real updating is done in cluster Connection. + return nil +} + +// Configure the authentication used for this connection. +func (c *httpConnection) SetAuthentication(auth driver.Authentication) (driver.Connection, error) { + var httpAuth httpAuthentication + switch auth.Type() { + case driver.AuthenticationTypeBasic: + userName := auth.Get("username") + password := auth.Get("password") + httpAuth = newBasicAuthentication(userName, password) + case driver.AuthenticationTypeJWT: + userName := auth.Get("username") + password := auth.Get("password") + httpAuth = newJWTAuthentication(userName, password) + case driver.AuthenticationTypeRaw: + value := auth.Get("value") + httpAuth = newRawAuthentication(value) + default: + return nil, driver.WithStack(fmt.Errorf("Unsupported authentication type %d", int(auth.Type()))) + } + + result, err := newAuthenticatedConnection(c, httpAuth) + if err != nil { + return nil, driver.WithStack(err) + } + return result, nil +} + +// Protocols returns all protocols used by this connection. +func (c *httpConnection) Protocols() driver.ProtocolSet { + return driver.ProtocolSet{driver.ProtocolHTTP} +} diff --git a/deps/github.com/arangodb/go-driver/http/mergeObject.go b/deps/github.com/arangodb/go-driver/http/mergeObject.go new file mode 100644 index 000000000..c11dda23a --- /dev/null +++ b/deps/github.com/arangodb/go-driver/http/mergeObject.go @@ -0,0 +1,84 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "encoding/json" + + driver "github.com/arangodb/go-driver" +) + +// mergeObject is a helper used to merge 2 objects into JSON. +type mergeObject struct { + Object interface{} + Merge interface{} +} + +func (m mergeObject) MarshalJSON() ([]byte, error) { + m1, err := toMap(m.Object) + if err != nil { + return nil, driver.WithStack(err) + } + m2, err := toMap(m.Merge) + if err != nil { + return nil, driver.WithStack(err) + } + var merged map[string]interface{} + // If m1 an empty object? + if len(m1) == 0 { + merged = m2 + } else if len(m2) == 0 { + merged = m1 + } else { + // Merge + merged = make(map[string]interface{}) + for k, v := range m1 { + merged[k] = v + } + for k, v := range m2 { + merged[k] = v + } + } + // Marshal merged map + data, err := json.Marshal(merged) + if err != nil { + return nil, driver.WithStack(err) + } + return data, nil +} + +// toMap converts the given object to a map (using JSON marshal/unmarshal when needed) +func toMap(object interface{}) (map[string]interface{}, error) { + if m, ok := object.(map[string]interface{}); ok { + return m, nil + } + data, err := json.Marshal(object) + if err != nil { + return nil, driver.WithStack(err) + } + var m map[string]interface{} + if err := json.Unmarshal(data, &m); err != nil { + return nil, driver.WithStack(err) + } + return m, nil +} diff --git a/deps/github.com/arangodb/go-driver/http/request.go b/deps/github.com/arangodb/go-driver/http/request.go new file mode 100644 index 000000000..e82ad8ed7 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/http/request.go @@ -0,0 +1,38 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "net/http" + "net/http/httptrace" + "net/url" +) + +// httpRequest implements driver.Request using standard golang http requests. +type httpRequest interface { + // createHTTPRequest creates a golang http.Request based on the configured arguments. + createHTTPRequest(endpoint url.URL) (*http.Request, error) + // WroteRequest implements the WroteRequest function of an httptrace. + // It sets written to true. + WroteRequest(httptrace.WroteRequestInfo) +} diff --git a/deps/github.com/arangodb/go-driver/http/request_json.go b/deps/github.com/arangodb/go-driver/http/request_json.go new file mode 100644 index 000000000..712ed3ea2 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/http/request_json.go @@ -0,0 +1,242 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptrace" + "net/url" + "reflect" + "strconv" + "strings" + + driver "github.com/arangodb/go-driver" +) + +// httpRequest implements driver.Request using standard golang http requests. +type httpJSONRequest struct { + method string + path string + q url.Values + hdr map[string]string + body []byte + written bool +} + +// Clone creates a new request containing the same data as this request +func (r *httpJSONRequest) Clone() driver.Request { + clone := *r + clone.q = url.Values{} + for k, v := range r.q { + for _, x := range v { + clone.q.Add(k, x) + } + } + if clone.hdr != nil { + clone.hdr = make(map[string]string) + for k, v := range r.hdr { + clone.hdr[k] = v + } + } + return &clone +} + +// SetQuery sets a single query argument of the request. +// Any existing query argument with the same key is overwritten. +func (r *httpJSONRequest) SetQuery(key, value string) driver.Request { + if r.q == nil { + r.q = url.Values{} + } + r.q.Set(key, value) + return r +} + +// SetBody sets the content of the request. +// The protocol of the connection determines what kinds of marshalling is taking place. +func (r *httpJSONRequest) SetBody(body ...interface{}) (driver.Request, error) { + switch len(body) { + case 0: + return r, driver.WithStack(fmt.Errorf("Must provide at least 1 body")) + case 1: + if data, err := json.Marshal(body[0]); err != nil { + return r, driver.WithStack(err) + } else { + r.body = data + } + return r, nil + case 2: + mo := mergeObject{Object: body[1], Merge: body[0]} + if data, err := json.Marshal(mo); err != nil { + return r, driver.WithStack(err) + } else { + r.body = data + } + return r, nil + default: + return r, driver.WithStack(fmt.Errorf("Must provide at most 2 bodies")) + } + +} + +// SetBodyArray sets the content of the request as an array. +// If the given mergeArray is not nil, its elements are merged with the elements in the body array (mergeArray data overrides bodyArray data). +// The protocol of the connection determines what kinds of marshalling is taking place. +func (r *httpJSONRequest) SetBodyArray(bodyArray interface{}, mergeArray []map[string]interface{}) (driver.Request, error) { + bodyArrayVal := reflect.ValueOf(bodyArray) + switch bodyArrayVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, driver.WithStack(driver.InvalidArgumentError{Message: fmt.Sprintf("bodyArray must be slice, got %s", bodyArrayVal.Kind())}) + } + if mergeArray == nil { + // Simple case; just marshal bodyArray directly. + if data, err := json.Marshal(bodyArray); err != nil { + return r, driver.WithStack(err) + } else { + r.body = data + } + return r, nil + } + // Complex case, mergeArray is not nil + elementCount := bodyArrayVal.Len() + mergeObjects := make([]mergeObject, elementCount) + for i := 0; i < elementCount; i++ { + mergeObjects[i] = mergeObject{ + Object: bodyArrayVal.Index(i).Interface(), + Merge: mergeArray[i], + } + } + // Now marshal merged array + if data, err := json.Marshal(mergeObjects); err != nil { + return r, driver.WithStack(err) + } else { + r.body = data + } + return r, nil +} + +// SetBodyImportArray sets the content of the request as an array formatted for importing documents. +// The protocol of the connection determines what kinds of marshalling is taking place. +func (r *httpJSONRequest) SetBodyImportArray(bodyArray interface{}) (driver.Request, error) { + bodyArrayVal := reflect.ValueOf(bodyArray) + switch bodyArrayVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, driver.WithStack(driver.InvalidArgumentError{Message: fmt.Sprintf("bodyArray must be slice, got %s", bodyArrayVal.Kind())}) + } + // Render elements + elementCount := bodyArrayVal.Len() + buf := &bytes.Buffer{} + encoder := json.NewEncoder(buf) + for i := 0; i < elementCount; i++ { + entryVal := bodyArrayVal.Index(i) + if isNil(entryVal) { + buf.WriteString("\n") + } else { + if err := encoder.Encode(entryVal.Interface()); err != nil { + return nil, driver.WithStack(err) + } + } + } + r.body = buf.Bytes() + return r, nil +} + +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice: + return v.IsNil() + default: + return false + } +} + +// SetHeader sets a single header arguments of the request. +// Any existing header argument with the same key is overwritten. +func (r *httpJSONRequest) SetHeader(key, value string) driver.Request { + if r.hdr == nil { + r.hdr = make(map[string]string) + } + r.hdr[key] = value + return r +} + +// Written returns true as soon as this request has been written completely to the network. +// This does not guarantee that the server has received or processed the request. +func (r *httpJSONRequest) Written() bool { + return r.written +} + +// WroteRequest implements the WroteRequest function of an httptrace. +// It sets written to true. +func (r *httpJSONRequest) WroteRequest(httptrace.WroteRequestInfo) { + r.written = true +} + +// createHTTPRequest creates a golang http.Request based on the configured arguments. +func (r *httpJSONRequest) createHTTPRequest(endpoint url.URL) (*http.Request, error) { + r.written = false + u := endpoint + u.Path = "" + url := u.String() + if !strings.HasSuffix(url, "/") { + url = url + "/" + } + p := r.path + if strings.HasPrefix(p, "/") { + p = p[1:] + } + url = url + p + if r.q != nil { + q := r.q.Encode() + if len(q) > 0 { + url = url + "?" + q + } + } + var body io.Reader + if r.body != nil { + body = bytes.NewReader(r.body) + } + req, err := http.NewRequest(r.method, url, body) + if err != nil { + return nil, driver.WithStack(err) + } + + if r.hdr != nil { + for k, v := range r.hdr { + req.Header.Set(k, v) + } + } + + if r.body != nil { + req.Header.Set("Content-Length", strconv.Itoa(len(r.body))) + req.Header.Set("Content-Type", "application/json") + } + return req, nil +} diff --git a/deps/github.com/arangodb/go-driver/http/request_json_test.go b/deps/github.com/arangodb/go-driver/http/request_json_test.go new file mode 100644 index 000000000..22ff075bb --- /dev/null +++ b/deps/github.com/arangodb/go-driver/http/request_json_test.go @@ -0,0 +1,125 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "strings" + "testing" +) + +type Sample struct { + Title string `json:"a"` + Age int `json:"b,omitempty"` +} + +func TestSetBodyImportArrayStructs(t *testing.T) { + r := &httpJSONRequest{} + docs := []Sample{ + Sample{"Foo", 2}, + Sample{"Dunn", 23}, + Sample{"Short", 0}, + Sample{"Sample", 45}, + } + expected := strings.Join([]string{ + `{"a":"Foo","b":2}`, + `{"a":"Dunn","b":23}`, + `{"a":"Short"}`, + `{"a":"Sample","b":45}`, + }, "\n") + if _, err := r.SetBodyImportArray(docs); err != nil { + t.Fatalf("SetBodyImportArray failed: %v", err) + } + data := strings.TrimSpace(string(r.body)) + if data != expected { + t.Errorf("Encoding failed: Expected\n%s\nGot\n%s\n", expected, data) + } +} + +func TestSetBodyImportArrayStructPtrs(t *testing.T) { + r := &httpJSONRequest{} + docs := []*Sample{ + &Sample{"Foo", 2}, + &Sample{"Dunn", 23}, + &Sample{"Short", 0}, + &Sample{"Sample", 45}, + } + expected := strings.Join([]string{ + `{"a":"Foo","b":2}`, + `{"a":"Dunn","b":23}`, + `{"a":"Short"}`, + `{"a":"Sample","b":45}`, + }, "\n") + if _, err := r.SetBodyImportArray(docs); err != nil { + t.Fatalf("SetBodyImportArray failed: %v", err) + } + data := strings.TrimSpace(string(r.body)) + if data != expected { + t.Errorf("Encoding failed: Expected\n%s\nGot\n%s\n", expected, data) + } +} + +func TestSetBodyImportArrayStructPtrsNil(t *testing.T) { + r := &httpJSONRequest{} + docs := []*Sample{ + &Sample{"Foo", 2}, + nil, + &Sample{"Dunn", 23}, + &Sample{"Short", 0}, + nil, + &Sample{"Sample", 45}, + } + expected := strings.Join([]string{ + `{"a":"Foo","b":2}`, + ``, + `{"a":"Dunn","b":23}`, + `{"a":"Short"}`, + ``, + `{"a":"Sample","b":45}`, + }, "\n") + if _, err := r.SetBodyImportArray(docs); err != nil { + t.Fatalf("SetBodyImportArray failed: %v", err) + } + data := strings.TrimSpace(string(r.body)) + if data != expected { + t.Errorf("Encoding failed: Expected\n%s\nGot\n%s\n", expected, data) + } +} + +func TestSetBodyImportArrayMaps(t *testing.T) { + r := &httpJSONRequest{} + docs := []map[string]interface{}{ + map[string]interface{}{"a": 5, "b": "c", "c": true}, + map[string]interface{}{"a": 77, "c": false}, + } + expected := strings.Join([]string{ + `{"a":5,"b":"c","c":true}`, + `{"a":77,"c":false}`, + }, "\n") + if _, err := r.SetBodyImportArray(docs); err != nil { + t.Fatalf("SetBodyImportArray failed: %v", err) + } + data := strings.TrimSpace(string(r.body)) + if data != expected { + t.Errorf("Encoding failed: Expected\n%s\nGot\n%s\n", expected, data) + } +} diff --git a/deps/github.com/arangodb/go-driver/http/request_vpack.go b/deps/github.com/arangodb/go-driver/http/request_vpack.go new file mode 100644 index 000000000..d9fdf7017 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/http/request_vpack.go @@ -0,0 +1,228 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/http/httptrace" + "net/url" + "reflect" + "strconv" + "strings" + + driver "github.com/arangodb/go-driver" + velocypack "github.com/arangodb/go-velocypack" +) + +// httpVPackRequest implements driver.Request using standard golang http requests. +type httpVPackRequest struct { + method string + path string + q url.Values + hdr map[string]string + body []byte + written bool +} + +// Clone creates a new request containing the same data as this request +func (r *httpVPackRequest) Clone() driver.Request { + clone := *r + clone.q = url.Values{} + for k, v := range r.q { + for _, x := range v { + clone.q.Add(k, x) + } + } + if clone.hdr != nil { + clone.hdr = make(map[string]string) + for k, v := range r.hdr { + clone.hdr[k] = v + } + } + return &clone +} + +// SetQuery sets a single query argument of the request. +// Any existing query argument with the same key is overwritten. +func (r *httpVPackRequest) SetQuery(key, value string) driver.Request { + if r.q == nil { + r.q = url.Values{} + } + r.q.Set(key, value) + return r +} + +// SetBody sets the content of the request. +// The protocol of the connection determines what kinds of marshalling is taking place. +func (r *httpVPackRequest) SetBody(body ...interface{}) (driver.Request, error) { + switch len(body) { + case 0: + return r, driver.WithStack(fmt.Errorf("Must provide at least 1 body")) + case 1: + if data, err := velocypack.Marshal(body[0]); err != nil { + return r, driver.WithStack(err) + } else { + r.body = data + } + return r, nil + case 2: + mo := mergeObject{Object: body[1], Merge: body[0]} + if data, err := velocypack.Marshal(mo); err != nil { + return r, driver.WithStack(err) + } else { + r.body = data + } + return r, nil + default: + return r, driver.WithStack(fmt.Errorf("Must provide at most 2 bodies")) + } + +} + +// SetBodyArray sets the content of the request as an array. +// If the given mergeArray is not nil, its elements are merged with the elements in the body array (mergeArray data overrides bodyArray data). +// The protocol of the connection determines what kinds of marshalling is taking place. +func (r *httpVPackRequest) SetBodyArray(bodyArray interface{}, mergeArray []map[string]interface{}) (driver.Request, error) { + bodyArrayVal := reflect.ValueOf(bodyArray) + switch bodyArrayVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, driver.WithStack(driver.InvalidArgumentError{Message: fmt.Sprintf("bodyArray must be slice, got %s", bodyArrayVal.Kind())}) + } + if mergeArray == nil { + // Simple case; just marshal bodyArray directly. + if data, err := velocypack.Marshal(bodyArray); err != nil { + return r, driver.WithStack(err) + } else { + r.body = data + } + return r, nil + } + // Complex case, mergeArray is not nil + elementCount := bodyArrayVal.Len() + mergeObjects := make([]mergeObject, elementCount) + for i := 0; i < elementCount; i++ { + mergeObjects[i] = mergeObject{ + Object: bodyArrayVal.Index(i).Interface(), + Merge: mergeArray[i], + } + } + // Now marshal merged array + if data, err := velocypack.Marshal(mergeObjects); err != nil { + return r, driver.WithStack(err) + } else { + r.body = data + } + return r, nil +} + +// SetBodyImportArray sets the content of the request as an array formatted for importing documents. +// The protocol of the connection determines what kinds of marshalling is taking place. +func (r *httpVPackRequest) SetBodyImportArray(bodyArray interface{}) (driver.Request, error) { + bodyArrayVal := reflect.ValueOf(bodyArray) + switch bodyArrayVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, driver.WithStack(driver.InvalidArgumentError{Message: fmt.Sprintf("bodyArray must be slice, got %s", bodyArrayVal.Kind())}) + } + // Render elements + buf := &bytes.Buffer{} + encoder := velocypack.NewEncoder(buf) + if err := encoder.Encode(bodyArray); err != nil { + return nil, driver.WithStack(err) + } + r.body = buf.Bytes() + r.SetQuery("type", "list") + return r, nil +} + +// SetHeader sets a single header arguments of the request. +// Any existing header argument with the same key is overwritten. +func (r *httpVPackRequest) SetHeader(key, value string) driver.Request { + if r.hdr == nil { + r.hdr = make(map[string]string) + } + r.hdr[key] = value + return r +} + +// Written returns true as soon as this request has been written completely to the network. +// This does not guarantee that the server has received or processed the request. +func (r *httpVPackRequest) Written() bool { + return r.written +} + +// WroteRequest implements the WroteRequest function of an httptrace. +// It sets written to true. +func (r *httpVPackRequest) WroteRequest(httptrace.WroteRequestInfo) { + r.written = true +} + +// createHTTPRequest creates a golang http.Request based on the configured arguments. +func (r *httpVPackRequest) createHTTPRequest(endpoint url.URL) (*http.Request, error) { + r.written = false + u := endpoint + u.Path = "" + url := u.String() + if !strings.HasSuffix(url, "/") { + url = url + "/" + } + p := r.path + if strings.HasPrefix(p, "/") { + p = p[1:] + } + url = url + p + if r.q != nil { + q := r.q.Encode() + if len(q) > 0 { + url = url + "?" + q + } + } + var body io.Reader + if r.body != nil { + body = bytes.NewReader(r.body) + } + req, err := http.NewRequest(r.method, url, body) + if err != nil { + return nil, driver.WithStack(err) + } + + if r.hdr != nil { + for k, v := range r.hdr { + req.Header.Set(k, v) + } + } + + req.Header.Set("Accept", "application/x-velocypack") + //req.Header.Set("Accept", "application/json") + if r.body != nil { + req.Header.Set("Content-Length", strconv.Itoa(len(r.body))) + req.Header.Set("Content-Type", "application/x-velocypack") + } + return req, nil +} diff --git a/deps/github.com/arangodb/go-driver/http/response_json.go b/deps/github.com/arangodb/go-driver/http/response_json.go new file mode 100644 index 000000000..2087171e9 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/http/response_json.go @@ -0,0 +1,205 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "strings" + + driver "github.com/arangodb/go-driver" +) + +// httpJSONResponse implements driver.Response for standard golang JSON encoded http responses. +type httpJSONResponse struct { + resp *http.Response + rawResponse []byte + bodyObject map[string]*json.RawMessage + bodyArray []map[string]*json.RawMessage +} + +// StatusCode returns an HTTP compatible status code of the response. +func (r *httpJSONResponse) StatusCode() int { + return r.resp.StatusCode +} + +// Endpoint returns the endpoint that handled the request. +func (r *httpJSONResponse) Endpoint() string { + u := *r.resp.Request.URL + u.Path = "" + return u.String() +} + +// CheckStatus checks if the status of the response equals to one of the given status codes. +// If so, nil is returned. +// If not, an attempt is made to parse an error response in the body and an error is returned. +func (r *httpJSONResponse) CheckStatus(validStatusCodes ...int) error { + for _, x := range validStatusCodes { + if x == r.resp.StatusCode { + // Found valid status code + return nil + } + } + // Invalid status code, try to parse arango error response. + var aerr driver.ArangoError + if err := r.ParseBody("", &aerr); err == nil && aerr.HasError { + // Found correct arango error. + return aerr + } + + // We do not have a valid error code, so we can only create one based on the HTTP status code. + return driver.ArangoError{ + HasError: true, + Code: r.resp.StatusCode, + ErrorMessage: fmt.Sprintf("Unexpected status code %d", r.resp.StatusCode), + } +} + +// Header returns the value of a response header with given key. +// If no such header is found, an empty string is returned. +func (r *httpJSONResponse) Header(key string) string { + return r.resp.Header.Get(key) +} + +// ParseBody performs protocol specific unmarshalling of the response data into the given result. +// If the given field is non-empty, the contents of that field will be parsed into the given result. +func (r *httpJSONResponse) ParseBody(field string, result interface{}) error { + if r.bodyObject == nil { + bodyMap := make(map[string]*json.RawMessage) + if err := json.Unmarshal(r.rawResponse, &bodyMap); err != nil { + return driver.WithStack(err) + } + r.bodyObject = bodyMap + } + if result != nil { + if err := parseBody(r.bodyObject, field, result); err != nil { + return driver.WithStack(err) + } + } + return nil +} + +// ParseArrayBody performs protocol specific unmarshalling of the response array data into individual response objects. +// This can only be used for requests that return an array of objects. +func (r *httpJSONResponse) ParseArrayBody() ([]driver.Response, error) { + if r.bodyArray == nil { + var bodyArray []map[string]*json.RawMessage + if err := json.Unmarshal(r.rawResponse, &bodyArray); err != nil { + return nil, driver.WithStack(err) + } + r.bodyArray = bodyArray + } + resps := make([]driver.Response, len(r.bodyArray)) + for i, x := range r.bodyArray { + resps[i] = &httpJSONResponseElement{bodyObject: x} + } + return resps, nil +} + +func parseBody(bodyObject map[string]*json.RawMessage, field string, result interface{}) error { + if field != "" { + // Unmarshal only a specific field + raw, ok := bodyObject[field] + if !ok || raw == nil { + // Field not found, silently ignored + return nil + } + // Unmarshal field + if err := json.Unmarshal(*raw, result); err != nil { + return driver.WithStack(err) + } + return nil + } + // Unmarshal entire body + rv := reflect.ValueOf(result) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &json.InvalidUnmarshalError{Type: reflect.TypeOf(result)} + } + objValue := rv.Elem() + switch objValue.Kind() { + case reflect.Struct: + if err := decodeObjectFields(objValue, bodyObject); err != nil { + return driver.WithStack(err) + } + case reflect.Map: + if err := decodeMapFields(objValue, bodyObject); err != nil { + return driver.WithStack(err) + } + default: + return &json.InvalidUnmarshalError{Type: reflect.TypeOf(result)} + } + return nil +} + +// decodeObjectFields decodes fields from the given body into a objValue of kind struct. +func decodeObjectFields(objValue reflect.Value, body map[string]*json.RawMessage) error { + objValueType := objValue.Type() + for i := 0; i != objValue.NumField(); i++ { + f := objValueType.Field(i) + if f.Anonymous { + // Recurse into fields of anonymous field + if err := decodeObjectFields(objValue.Field(i), body); err != nil { + return driver.WithStack(err) + } + } else { + // Decode individual field + jsonName := strings.Split(f.Tag.Get("json"), ",")[0] + if jsonName == "" { + jsonName = f.Name + } else if jsonName == "-" { + continue + } + raw, ok := body[jsonName] + if ok && raw != nil { + field := objValue.Field(i) + if err := json.Unmarshal(*raw, field.Addr().Interface()); err != nil { + return driver.WithStack(err) + } + } + } + } + return nil +} + +// decodeMapFields decodes fields from the given body into a mapValue of kind map. +func decodeMapFields(val reflect.Value, body map[string]*json.RawMessage) error { + mapVal := val + if mapVal.IsNil() { + valType := val.Type() + mapType := reflect.MapOf(valType.Key(), valType.Elem()) + mapVal = reflect.MakeMap(mapType) + } + for jsonName, raw := range body { + var value interface{} + if raw != nil { + if err := json.Unmarshal(*raw, &value); err != nil { + return driver.WithStack(err) + } + } + mapVal.SetMapIndex(reflect.ValueOf(jsonName), reflect.ValueOf(value)) + } + val.Set(mapVal) + return nil +} diff --git a/deps/github.com/arangodb/go-driver/http/response_json_element.go b/deps/github.com/arangodb/go-driver/http/response_json_element.go new file mode 100644 index 000000000..17b50e1fb --- /dev/null +++ b/deps/github.com/arangodb/go-driver/http/response_json_element.go @@ -0,0 +1,113 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "encoding/json" + "fmt" + + driver "github.com/arangodb/go-driver" +) + +// httpJSONResponseElement implements driver.Response for an entry of an array response. +type httpJSONResponseElement struct { + statusCode *int + bodyObject map[string]*json.RawMessage +} + +// StatusCode returns an HTTP compatible status code of the response. +func (r *httpJSONResponseElement) StatusCode() int { + if r.statusCode == nil { + statusCode := 200 + // Look for "error" field + if errorFieldJSON, found := r.bodyObject["error"]; found { + var hasError bool + if err := json.Unmarshal(*errorFieldJSON, &hasError); err == nil && hasError { + // We have an error, look for code field + statusCode = 500 + if codeFieldJSON, found := r.bodyObject["code"]; found { + var code int + if err := json.Unmarshal(*codeFieldJSON, &code); err == nil { + statusCode = code + } + } + } + } + r.statusCode = &statusCode + } + return *r.statusCode +} + +// Endpoint returns the endpoint that handled the request. +func (r *httpJSONResponseElement) Endpoint() string { + return "" +} + +// CheckStatus checks if the status of the response equals to one of the given status codes. +// If so, nil is returned. +// If not, an attempt is made to parse an error response in the body and an error is returned. +func (r *httpJSONResponseElement) CheckStatus(validStatusCodes ...int) error { + statusCode := r.StatusCode() + for _, x := range validStatusCodes { + if x == statusCode { + // Found valid status code + return nil + } + } + // Invalid status code, try to parse arango error response. + var aerr driver.ArangoError + if err := r.ParseBody("", &aerr); err == nil && aerr.HasError { + // Found correct arango error. + return aerr + } + + // We do not have a valid error code, so we can only create one based on the HTTP status code. + return driver.ArangoError{ + HasError: true, + Code: statusCode, + ErrorMessage: fmt.Sprintf("Unexpected status code %d", statusCode), + } +} + +// Header returns the value of a response header with given key. +// If no such header is found, an empty string is returned. +func (r *httpJSONResponseElement) Header(key string) string { + return "" +} + +// ParseBody performs protocol specific unmarshalling of the response data into the given result. +// If the given field is non-empty, the contents of that field will be parsed into the given result. +func (r *httpJSONResponseElement) ParseBody(field string, result interface{}) error { + if result != nil { + if err := parseBody(r.bodyObject, field, result); err != nil { + return driver.WithStack(err) + } + } + return nil +} + +// ParseArrayBody performs protocol specific unmarshalling of the response array data into individual response objects. +// This can only be used for requests that return an array of objects. +func (r *httpJSONResponseElement) ParseArrayBody() ([]driver.Response, error) { + return nil, driver.WithStack(driver.InvalidArgumentError{Message: "ParseArrayBody not allowed"}) +} diff --git a/deps/github.com/arangodb/go-driver/http/response_vpack.go b/deps/github.com/arangodb/go-driver/http/response_vpack.go new file mode 100644 index 000000000..f87cf261b --- /dev/null +++ b/deps/github.com/arangodb/go-driver/http/response_vpack.go @@ -0,0 +1,149 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "fmt" + "net/http" + + driver "github.com/arangodb/go-driver" + velocypack "github.com/arangodb/go-velocypack" +) + +// httpVPackResponse implements driver.Response for standard golang Velocypack encoded http responses. +type httpVPackResponse struct { + resp *http.Response + rawResponse []byte + slice velocypack.Slice + bodyArray []driver.Response +} + +// StatusCode returns an HTTP compatible status code of the response. +func (r *httpVPackResponse) StatusCode() int { + return r.resp.StatusCode +} + +// Endpoint returns the endpoint that handled the request. +func (r *httpVPackResponse) Endpoint() string { + u := *r.resp.Request.URL + u.Path = "" + return u.String() +} + +// CheckStatus checks if the status of the response equals to one of the given status codes. +// If so, nil is returned. +// If not, an attempt is made to parse an error response in the body and an error is returned. +func (r *httpVPackResponse) CheckStatus(validStatusCodes ...int) error { + for _, x := range validStatusCodes { + if x == r.resp.StatusCode { + // Found valid status code + return nil + } + } + // Invalid status code, try to parse arango error response. + var aerr driver.ArangoError + if err := r.ParseBody("", &aerr); err == nil && aerr.HasError { + // Found correct arango error. + return aerr + } + + // We do not have a valid error code, so we can only create one based on the HTTP status code. + return driver.ArangoError{ + HasError: true, + Code: r.resp.StatusCode, + ErrorMessage: fmt.Sprintf("Unexpected status code %d", r.resp.StatusCode), + } +} + +// Header returns the value of a response header with given key. +// If no such header is found, an empty string is returned. +func (r *httpVPackResponse) Header(key string) string { + return r.resp.Header.Get(key) +} + +// ParseBody performs protocol specific unmarshalling of the response data into the given result. +// If the given field is non-empty, the contents of that field will be parsed into the given result. +func (r *httpVPackResponse) ParseBody(field string, result interface{}) error { + slice, err := r.getSlice() + if err != nil { + return driver.WithStack(err) + } + if field != "" { + var err error + slice, err = slice.Get(field) + if err != nil { + return driver.WithStack(err) + } + if slice.IsNone() { + // Field not found + return nil + } + } + if result != nil { + if err := velocypack.Unmarshal(slice, result); err != nil { + return driver.WithStack(err) + } + } + return nil +} + +// ParseArrayBody performs protocol specific unmarshalling of the response array data into individual response objects. +// This can only be used for requests that return an array of objects. +func (r *httpVPackResponse) ParseArrayBody() ([]driver.Response, error) { + if r.bodyArray == nil { + slice, err := r.getSlice() + if err != nil { + return nil, driver.WithStack(err) + } + l, err := slice.Length() + if err != nil { + return nil, driver.WithStack(err) + } + + bodyArray := make([]driver.Response, 0, l) + it, err := velocypack.NewArrayIterator(slice) + if err != nil { + return nil, driver.WithStack(err) + } + for it.IsValid() { + v, err := it.Value() + if err != nil { + return nil, driver.WithStack(err) + } + bodyArray = append(bodyArray, &httpVPackResponseElement{slice: v}) + it.Next() + } + r.bodyArray = bodyArray + } + + return r.bodyArray, nil +} + +// getSlice reads the slice from the response if needed. +func (r *httpVPackResponse) getSlice() (velocypack.Slice, error) { + if r.slice == nil { + r.slice = velocypack.Slice(r.rawResponse) + //fmt.Println(r.slice) + } + return r.slice, nil +} diff --git a/deps/github.com/arangodb/go-driver/http/response_vpack_element.go b/deps/github.com/arangodb/go-driver/http/response_vpack_element.go new file mode 100644 index 000000000..1b81d7ea6 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/http/response_vpack_element.go @@ -0,0 +1,123 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package http + +import ( + "fmt" + + driver "github.com/arangodb/go-driver" + velocypack "github.com/arangodb/go-velocypack" +) + +// httpVPackResponseElement implements driver.Response for an entry of an array response. +type httpVPackResponseElement struct { + statusCode *int + slice velocypack.Slice +} + +// StatusCode returns an HTTP compatible status code of the response. +func (r *httpVPackResponseElement) StatusCode() int { + if r.statusCode == nil { + statusCode := 200 + // Look for "error" field + if errorFieldSlice, _ := r.slice.Get("error"); !errorFieldSlice.IsNone() { + if hasError, err := errorFieldSlice.GetBool(); err == nil && hasError { + // We have an error, look for code field + statusCode = 500 + if codeFieldSlice, _ := r.slice.Get("code"); !codeFieldSlice.IsNone() { + if code, err := codeFieldSlice.GetInt(); err == nil { + statusCode = int(code) + } + } + } + } + r.statusCode = &statusCode + } + return *r.statusCode +} + +// Endpoint returns the endpoint that handled the request. +func (r *httpVPackResponseElement) Endpoint() string { + return "" +} + +// CheckStatus checks if the status of the response equals to one of the given status codes. +// If so, nil is returned. +// If not, an attempt is made to parse an error response in the body and an error is returned. +func (r *httpVPackResponseElement) CheckStatus(validStatusCodes ...int) error { + statusCode := r.StatusCode() + for _, x := range validStatusCodes { + if x == statusCode { + // Found valid status code + return nil + } + } + // Invalid status code, try to parse arango error response. + var aerr driver.ArangoError + if err := r.ParseBody("", &aerr); err == nil && aerr.HasError { + // Found correct arango error. + return aerr + } + + // We do not have a valid error code, so we can only create one based on the HTTP status code. + return driver.ArangoError{ + HasError: true, + Code: statusCode, + ErrorMessage: fmt.Sprintf("Unexpected status code %d", statusCode), + } +} + +// Header returns the value of a response header with given key. +// If no such header is found, an empty string is returned. +func (r *httpVPackResponseElement) Header(key string) string { + return "" +} + +// ParseBody performs protocol specific unmarshalling of the response data into the given result. +// If the given field is non-empty, the contents of that field will be parsed into the given result. +func (r *httpVPackResponseElement) ParseBody(field string, result interface{}) error { + slice := r.slice + if field != "" { + var err error + slice, err = slice.Get(field) + if err != nil { + return driver.WithStack(err) + } + if slice.IsNone() { + // Field not found + return nil + } + } + if result != nil { + if err := velocypack.Unmarshal(slice, result); err != nil { + return driver.WithStack(err) + } + } + return nil +} + +// ParseArrayBody performs protocol specific unmarshalling of the response array data into individual response objects. +// This can only be used for requests that return an array of objects. +func (r *httpVPackResponseElement) ParseArrayBody() ([]driver.Response, error) { + return nil, driver.WithStack(driver.InvalidArgumentError{Message: "ParseArrayBody not allowed"}) +} diff --git a/deps/github.com/arangodb/go-driver/id.go b/deps/github.com/arangodb/go-driver/id.go new file mode 100644 index 000000000..69f0f4573 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/id.go @@ -0,0 +1,92 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "fmt" + "strings" +) + +// DocumentID references a document in a collection. +// Format: collection/_key +type DocumentID string + +// String returns a string representation of the document ID. +func (id DocumentID) String() string { + return string(id) +} + +// Validate validates the given id. +func (id DocumentID) Validate() error { + if id == "" { + return WithStack(fmt.Errorf("DocumentID is empty")) + } + parts := strings.Split(string(id), "/") + if len(parts) != 2 { + return WithStack(fmt.Errorf("Expected 'collection/key', got '%s'", string(id))) + } + if parts[0] == "" { + return WithStack(fmt.Errorf("Collection part of '%s' is empty", string(id))) + } + if parts[1] == "" { + return WithStack(fmt.Errorf("Key part of '%s' is empty", string(id))) + } + return nil +} + +// ValidateOrEmpty validates the given id unless it is empty. +// In case of empty, nil is returned. +func (id DocumentID) ValidateOrEmpty() error { + if id == "" { + return nil + } + if err := id.Validate(); err != nil { + return WithStack(err) + } + return nil +} + +// IsEmpty returns true if the given ID is empty, false otherwise. +func (id DocumentID) IsEmpty() bool { + return id == "" +} + +// Collection returns the collection part of the ID. +func (id DocumentID) Collection() string { + parts := strings.Split(string(id), "/") + return pathUnescape(parts[0]) +} + +// Key returns the key part of the ID. +func (id DocumentID) Key() string { + parts := strings.Split(string(id), "/") + if len(parts) == 2 { + return pathUnescape(parts[1]) + } + return "" +} + +// NewDocumentID creates a new document ID from the given collection, key pair. +func NewDocumentID(collection, key string) DocumentID { + return DocumentID(pathEscape(collection) + "/" + pathEscape(key)) +} diff --git a/deps/github.com/arangodb/go-driver/index.go b/deps/github.com/arangodb/go-driver/index.go new file mode 100644 index 000000000..47691b6e7 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/index.go @@ -0,0 +1,35 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// Index provides access to a single index in a single collection. +type Index interface { + // Name returns the name of the index. + Name() string + + // Remove removes the entire index. + // If the index does not exist, a NotFoundError is returned. + Remove(ctx context.Context) error +} diff --git a/deps/github.com/arangodb/go-driver/index_impl.go b/deps/github.com/arangodb/go-driver/index_impl.go new file mode 100644 index 000000000..e1301934b --- /dev/null +++ b/deps/github.com/arangodb/go-driver/index_impl.go @@ -0,0 +1,84 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" + "strings" +) + +// newIndex creates a new Index implementation. +func newIndex(id string, col *collection) (Index, error) { + if id == "" { + return nil, WithStack(InvalidArgumentError{Message: "id is empty"}) + } + parts := strings.Split(id, "/") + if len(parts) != 2 { + return nil, WithStack(InvalidArgumentError{Message: "id must be `collection/name`"}) + } + if col == nil { + return nil, WithStack(InvalidArgumentError{Message: "col is nil"}) + } + return &index{ + id: id, + col: col, + db: col.db, + conn: col.conn, + }, nil +} + +type index struct { + id string + db *database + col *collection + conn Connection +} + +// relPath creates the relative path to this index (`_db//_api/index`) +func (i *index) relPath() string { + return path.Join(i.db.relPath(), "_api", "index") +} + +// Name returns the name of the index. +func (i *index) Name() string { + parts := strings.Split(i.id, "/") + return parts[1] +} + +// Remove removes the entire index. +// If the index does not exist, a NotFoundError is returned. +func (i *index) Remove(ctx context.Context) error { + req, err := i.conn.NewRequest("DELETE", path.Join(i.relPath(), i.id)) + if err != nil { + return WithStack(err) + } + resp, err := i.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} diff --git a/deps/github.com/arangodb/go-driver/main_test.go b/deps/github.com/arangodb/go-driver/main_test.go new file mode 100644 index 000000000..89336162f --- /dev/null +++ b/deps/github.com/arangodb/go-driver/main_test.go @@ -0,0 +1,55 @@ +package driver_test + +import ( + "context" + "log" + "os" + "testing" + "time" + + driver "github.com/arangodb/go-driver" + "github.com/arangodb/go-driver/http" +) + +// TestMain creates a simple connection and waits for the server to be ready. +// This avoid a lot of clutter code in the examples. +func TestMain(m *testing.M) { + // Wait for database connection to be ready. + conn, err := http.NewConnection(http.ConnectionConfig{ + Endpoints: []string{"http://localhost:8529"}, + }) + if err != nil { + log.Fatalf("Failed to create HTTP connection: %v", err) + } + c, err := driver.NewClient(driver.ClientConfig{ + Connection: conn, + }) + + waitUntilServerAvailable(context.Background(), c) + + os.Exit(m.Run()) +} + +// waitUntilServerAvailable keeps waiting until the server/cluster that the client is addressing is available. +func waitUntilServerAvailable(ctx context.Context, c driver.Client) bool { + instanceUp := make(chan bool) + go func() { + for { + verCtx, cancel := context.WithTimeout(ctx, time.Second*5) + if _, err := c.Version(verCtx); err == nil { + cancel() + instanceUp <- true + return + } else { + cancel() + time.Sleep(time.Second) + } + } + }() + select { + case up := <-instanceUp: + return up + case <-ctx.Done(): + return false + } +} diff --git a/deps/github.com/arangodb/go-driver/meta.go b/deps/github.com/arangodb/go-driver/meta.go new file mode 100644 index 000000000..550abd9ae --- /dev/null +++ b/deps/github.com/arangodb/go-driver/meta.go @@ -0,0 +1,68 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +// DocumentMeta contains all meta data used to identifier a document. +type DocumentMeta struct { + Key string `json:"_key,omitempty"` + ID DocumentID `json:"_id,omitempty"` + Rev string `json:"_rev,omitempty"` +} + +// validateKey returns an error if the given key is empty otherwise invalid. +func validateKey(key string) error { + if key == "" { + return WithStack(InvalidArgumentError{Message: "key is empty"}) + } + return nil +} + +// DocumentMetaSlice is a slice of DocumentMeta elements +type DocumentMetaSlice []DocumentMeta + +// Keys returns the keys of all elements. +func (l DocumentMetaSlice) Keys() []string { + keys := make([]string, len(l)) + for i, m := range l { + keys[i] = m.Key + } + return keys +} + +// Revs returns the revisions of all elements. +func (l DocumentMetaSlice) Revs() []string { + revs := make([]string, len(l)) + for i, m := range l { + revs[i] = m.Rev + } + return revs +} + +// IDs returns the ID's of all elements. +func (l DocumentMetaSlice) IDs() []DocumentID { + ids := make([]DocumentID, len(l)) + for i, m := range l { + ids[i] = m.ID + } + return ids +} diff --git a/deps/github.com/arangodb/go-driver/protocol.go b/deps/github.com/arangodb/go-driver/protocol.go new file mode 100644 index 000000000..62702b26b --- /dev/null +++ b/deps/github.com/arangodb/go-driver/protocol.go @@ -0,0 +1,56 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +type Protocol int + +const ( + ProtocolHTTP Protocol = iota + ProtocolVST1_0 + ProtocolVST1_1 +) + +// ProtocolSet is a set of protocols. +type ProtocolSet []Protocol + +// Contains returns true if the given protocol is contained in the given set, false otherwise. +func (ps ProtocolSet) Contains(p Protocol) bool { + for _, x := range ps { + if x == p { + return true + } + } + return false +} + +// ContainsAny returns true if any of the given protocols is contained in the given set, false otherwise. +func (ps ProtocolSet) ContainsAny(p ...Protocol) bool { + for _, x := range ps { + for _, y := range p { + if x == y { + return true + } + } + } + return false +} diff --git a/deps/github.com/arangodb/go-driver/query.go b/deps/github.com/arangodb/go-driver/query.go new file mode 100644 index 000000000..43af0152f --- /dev/null +++ b/deps/github.com/arangodb/go-driver/query.go @@ -0,0 +1,154 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "time" +) + +const ( + keyQueryCount = "arangodb-query-count" + keyQueryBatchSize = "arangodb-query-batchSize" + keyQueryCache = "arangodb-query-cache" + keyQueryMemoryLimit = "arangodb-query-memoryLimit" + keyQueryTTL = "arangodb-query-ttl" +) + +// WithQueryCount is used to configure a context that will set the Count of a query request, +// If value is not given it defaults to true. +func WithQueryCount(parent context.Context, value ...bool) context.Context { + v := true + if len(value) > 0 { + v = value[0] + } + return context.WithValue(contextOrBackground(parent), keyQueryCount, v) +} + +// WithQueryBatchSize is used to configure a context that will set the BatchSize of a query request, +func WithQueryBatchSize(parent context.Context, value int) context.Context { + return context.WithValue(contextOrBackground(parent), keyQueryBatchSize, value) +} + +// WithQueryCache is used to configure a context that will set the Cache of a query request, +// If value is not given it defaults to true. +func WithQueryCache(parent context.Context, value ...bool) context.Context { + v := true + if len(value) > 0 { + v = value[0] + } + return context.WithValue(contextOrBackground(parent), keyQueryCache, v) +} + +// WithQueryMemoryLimit is used to configure a context that will set the MemoryList of a query request, +func WithQueryMemoryLimit(parent context.Context, value int64) context.Context { + return context.WithValue(contextOrBackground(parent), keyQueryMemoryLimit, value) +} + +// WithQueryTTL is used to configure a context that will set the TTL of a query request, +func WithQueryTTL(parent context.Context, value time.Duration) context.Context { + return context.WithValue(contextOrBackground(parent), keyQueryTTL, value) +} + +type queryRequest struct { + // indicates whether the number of documents in the result set should be returned in the "count" attribute of the result. + // Calculating the "count" attribute might have a performance impact for some queries in the future so this option is + // turned off by default, and "count" is only returned when requested. + Count bool `json:"count,omitempty"` + // maximum number of result documents to be transferred from the server to the client in one roundtrip. + // If this attribute is not set, a server-controlled default value will be used. A batchSize value of 0 is disallowed. + BatchSize int `json:"batchSize,omitempty"` + // flag to determine whether the AQL query cache shall be used. If set to false, then any query cache lookup + // will be skipped for the query. If set to true, it will lead to the query cache being checked for the query + // if the query cache mode is either on or demand. + Cache bool `json:"cache,omitempty"` + // the maximum number of memory (measured in bytes) that the query is allowed to use. If set, then the query will fail + // with error "resource limit exceeded" in case it allocates too much memory. A value of 0 indicates that there is no memory limit. + MemoryLimit int64 `json:"memoryLimit,omitempty"` + // The time-to-live for the cursor (in seconds). The cursor will be removed on the server automatically after the specified + // amount of time. This is useful to ensure garbage collection of cursors that are not fully fetched by clients. + // If not set, a server-defined value will be used. + TTL float64 `json:"ttl,omitempty"` + // contains the query string to be executed + Query string `json:"query"` + // key/value pairs representing the bind parameters. + BindVars map[string]interface{} `json:"bindVars,omitempty"` + Options struct { + // If set to true, then the additional query profiling information will be returned in the sub-attribute profile of the + // extra return attribute if the query result is not served from the query cache. + Profile bool `json:"profile,omitempty"` + // A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute, telling the optimizer to include or exclude specific rules. + // To disable a rule, prefix its name with a -, to enable a rule, prefix it with a +. There is also a pseudo-rule all, which will match all optimizer rules. + OptimizerRules string `json:"optimizer.rules,omitempty"` + // This enterprise parameter allows to configure how long a DBServer will have time to bring the satellite collections + // involved in the query into sync. The default value is 60.0 (seconds). When the max time has been reached the query will be stopped. + SatelliteSyncWait float64 `json:"satelliteSyncWait,omitempty"` + // if set to true and the query contains a LIMIT clause, then the result will have an extra attribute with the sub-attributes + // stats and fullCount, { ... , "extra": { "stats": { "fullCount": 123 } } }. The fullCount attribute will contain the number + // of documents in the result before the last LIMIT in the query was applied. It can be used to count the number of documents + // that match certain filter criteria, but only return a subset of them, in one go. It is thus similar to MySQL's SQL_CALC_FOUND_ROWS hint. + // Note that setting the option will disable a few LIMIT optimizations and may lead to more documents being processed, and + // thus make queries run longer. Note that the fullCount attribute will only be present in the result if the query has a LIMIT clause + // and the LIMIT clause is actually used in the query. + FullCount bool `json:"fullCount,omitempty"` + // Limits the maximum number of plans that are created by the AQL query optimizer. + MaxPlans int `json:"maxPlans,omitempty"` + } `json:"options,omitempty"` +} + +// applyContextSettings fills fields in the queryRequest from the given context. +func (q *queryRequest) applyContextSettings(ctx context.Context) { + if ctx == nil { + return + } + if rawValue := ctx.Value(keyQueryCount); rawValue != nil { + if value, ok := rawValue.(bool); ok { + q.Count = value + } + } + if rawValue := ctx.Value(keyQueryBatchSize); rawValue != nil { + if value, ok := rawValue.(int); ok { + q.BatchSize = value + } + } + if rawValue := ctx.Value(keyQueryCache); rawValue != nil { + if value, ok := rawValue.(bool); ok { + q.Cache = value + } + } + if rawValue := ctx.Value(keyQueryMemoryLimit); rawValue != nil { + if value, ok := rawValue.(int64); ok { + q.MemoryLimit = value + } + } + if rawValue := ctx.Value(keyQueryTTL); rawValue != nil { + if value, ok := rawValue.(time.Duration); ok { + q.TTL = value.Seconds() + } + } +} + +type parseQueryRequest struct { + // contains the query string to be executed + Query string `json:"query"` +} diff --git a/deps/github.com/arangodb/go-driver/replication.go b/deps/github.com/arangodb/go-driver/replication.go new file mode 100644 index 000000000..18769a01f --- /dev/null +++ b/deps/github.com/arangodb/go-driver/replication.go @@ -0,0 +1,35 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" +) + +// Replication provides access to replication related operations. +type Replication interface { + // Get the inventory of the server containing all collections (with entire details) of a database. + // When this function is called on a coordinator is a cluster, an ID of a DBServer must be provided + // using a context that is prepare with `WithDBServerID`. + DatabaseInventory(ctx context.Context, db Database) (DatabaseInventory, error) +} diff --git a/deps/github.com/arangodb/go-driver/replication_impl.go b/deps/github.com/arangodb/go-driver/replication_impl.go new file mode 100644 index 000000000..3ea72dba1 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/replication_impl.go @@ -0,0 +1,49 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +// Get the inventory of a server containing all collections (with entire details) of a database. +func (c *client) DatabaseInventory(ctx context.Context, db Database) (DatabaseInventory, error) { + req, err := c.conn.NewRequest("GET", path.Join("_db", db.Name(), "_api/replication/inventory")) + if err != nil { + return DatabaseInventory{}, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DatabaseInventory{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return DatabaseInventory{}, WithStack(err) + } + var result DatabaseInventory + if err := resp.ParseBody("", &result); err != nil { + return DatabaseInventory{}, WithStack(err) + } + return result, nil +} diff --git a/deps/github.com/arangodb/go-driver/test/benchmark_collection_test.go b/deps/github.com/arangodb/go-driver/test/benchmark_collection_test.go new file mode 100644 index 000000000..934569f9b --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/benchmark_collection_test.go @@ -0,0 +1,72 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "fmt" + "testing" +) + +// BenchmarkCollectionExists measures the CollectionExists operation. +func BenchmarkCollectionExists(b *testing.B) { + c := createClientFromEnv(b, true) + db := ensureDatabase(nil, c, "collection_test", nil, b) + col := ensureCollection(nil, db, "collection_exist_test", nil, b) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := db.CollectionExists(nil, col.Name()); err != nil { + b.Errorf("CollectionExists failed: %s", describe(err)) + } + } +} + +// BenchmarkCollection measures the Collection operation. +func BenchmarkCollection(b *testing.B) { + c := createClientFromEnv(b, true) + db := ensureDatabase(nil, c, "collection_test", nil, b) + col := ensureCollection(nil, db, "collection_test", nil, b) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := db.Collection(nil, col.Name()); err != nil { + b.Errorf("Collection failed: %s", describe(err)) + } + } +} + +// BenchmarkCollections measures the Collections operation. +func BenchmarkCollections(b *testing.B) { + c := createClientFromEnv(b, true) + db := ensureDatabase(nil, c, "collection_test", nil, b) + for i := 0; i < 10; i++ { + ensureCollection(nil, db, fmt.Sprintf("col%d", i), nil, b) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := db.Collections(nil); err != nil { + b.Errorf("Collections failed: %s", describe(err)) + } + } +} diff --git a/deps/github.com/arangodb/go-driver/test/benchmark_document_test.go b/deps/github.com/arangodb/go-driver/test/benchmark_document_test.go new file mode 100644 index 000000000..414bc39c0 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/benchmark_document_test.go @@ -0,0 +1,138 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import "testing" + +// BenchmarkCreateDocument measures the CreateDocument operation for a simple document. +func BenchmarkCreateDocument(b *testing.B) { + c := createClientFromEnv(b, true) + db := ensureDatabase(nil, c, "document_test", nil, b) + col := ensureCollection(nil, db, "document_test", nil, b) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + doc := UserDoc{ + "Jan", + 40 + i, + } + if _, err := col.CreateDocument(nil, doc); err != nil { + b.Fatalf("Failed to create new document: %s", describe(err)) + } + } +} + +// BenchmarkCreateDocumentParallel measures parallel CreateDocument operations for a simple document. +func BenchmarkCreateDocumentParallel(b *testing.B) { + c := createClientFromEnv(b, true) + db := ensureDatabase(nil, c, "document_test", nil, b) + col := ensureCollection(nil, db, "document_test", nil, b) + + b.SetParallelism(100) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + doc := UserDoc{ + "Jan", + 40, + } + if _, err := col.CreateDocument(nil, doc); err != nil { + b.Fatalf("Failed to create new document: %s", describe(err)) + } + } + }) +} + +// BenchmarkReadDocument measures the ReadDocument operation for a simple document. +func BenchmarkReadDocument(b *testing.B) { + c := createClientFromEnv(b, true) + db := ensureDatabase(nil, c, "document_test", nil, b) + col := ensureCollection(nil, db, "document_test", nil, b) + doc := UserDoc{ + "Jan", + 40, + } + meta, err := col.CreateDocument(nil, doc) + if err != nil { + b.Fatalf("Failed to create new document: %s", describe(err)) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + var result UserDoc + if _, err := col.ReadDocument(nil, meta.Key, &result); err != nil { + b.Errorf("Failed to read document: %s", describe(err)) + } + } +} + +// BenchmarkReadDocumentParallel measures parallel ReadDocument operations for a simple document. +func BenchmarkReadDocumentParallel(b *testing.B) { + c := createClientFromEnv(b, true) + db := ensureDatabase(nil, c, "document_test", nil, b) + col := ensureCollection(nil, db, "document_test", nil, b) + doc := UserDoc{ + "Jan", + 40, + } + meta, err := col.CreateDocument(nil, doc) + if err != nil { + b.Fatalf("Failed to create new document: %s", describe(err)) + } + + b.SetParallelism(100) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + var result UserDoc + if _, err := col.ReadDocument(nil, meta.Key, &result); err != nil { + b.Errorf("Failed to read document: %s", describe(err)) + } + } + }) +} + +// BenchmarkRemoveDocument measures the RemoveDocument operation for a simple document. +func BenchmarkRemoveDocument(b *testing.B) { + c := createClientFromEnv(b, true) + db := ensureDatabase(nil, c, "document_test", nil, b) + col := ensureCollection(nil, db, "document_test", nil, b) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + // Create document (we don't measure that) + b.StopTimer() + doc := UserDoc{ + "Jan", + 40 + i, + } + meta, err := col.CreateDocument(nil, doc) + if err != nil { + b.Fatalf("Failed to create new document: %s", describe(err)) + } + + // Now do the real test + b.StartTimer() + if _, err := col.RemoveDocument(nil, meta.Key); err != nil { + b.Errorf("Failed to remove document: %s", describe(err)) + } + } +} diff --git a/deps/github.com/arangodb/go-driver/test/client_test.go b/deps/github.com/arangodb/go-driver/test/client_test.go new file mode 100644 index 000000000..6e6d9b892 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/client_test.go @@ -0,0 +1,301 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "crypto/tls" + httplib "net/http" + "os" + "strconv" + "strings" + "sync" + "testing" + "time" + + driver "github.com/arangodb/go-driver" + "github.com/arangodb/go-driver/http" + "github.com/arangodb/go-driver/vst" + "github.com/arangodb/go-driver/vst/protocol" +) + +var ( + logEndpointsOnce sync.Once +) + +// skipBelowVersion skips the test if the current server version is less than +// the given version. +func skipBelowVersion(c driver.Client, version driver.Version, t *testing.T) { + x, err := c.Version(nil) + if err != nil { + t.Fatalf("Failed to get version info: %s", describe(err)) + } + if x.Version.CompareTo(version) < 0 { + t.Skipf("Skipping below version '%s', got version '%s'", version, x.Version) + } +} + +// getEndpointsFromEnv returns the endpoints specified in the TEST_ENDPOINTS +// environment variable. +func getEndpointsFromEnv(t testEnv) []string { + eps := strings.Split(os.Getenv("TEST_ENDPOINTS"), ",") + if len(eps) == 0 { + t.Fatal("No endpoints found in environment variable TEST_ENDPOINTS") + } + return eps +} + +// getContentTypeFromEnv returns the content-type specified in the TEST_CONTENT_TYPE +// environment variable (json|vpack). +func getContentTypeFromEnv(t testEnv) driver.ContentType { + switch ct := os.Getenv("TEST_CONTENT_TYPE"); ct { + case "vpack": + return driver.ContentTypeVelocypack + case "json", "": + return driver.ContentTypeJSON + default: + t.Fatalf("Unknown content type '%s'", ct) + return 0 + } +} + +// createAuthenticationFromEnv initializes an authentication specified in the TEST_AUTHENTICATION +// environment variable. +func createAuthenticationFromEnv(t testEnv) driver.Authentication { + authSpec := os.Getenv("TEST_AUTHENTICATION") + if authSpec == "" { + return nil + } + parts := strings.Split(authSpec, ":") + switch parts[0] { + case "basic": + if len(parts) != 3 { + t.Fatalf("Expected username & password for basic authentication") + } + return driver.BasicAuthentication(parts[1], parts[2]) + case "jwt": + if len(parts) != 3 { + t.Fatalf("Expected username & password for jwt authentication") + } + return driver.JWTAuthentication(parts[1], parts[2]) + default: + t.Fatalf("Unknown authentication: '%s'", parts[0]) + return nil + } +} + +// createConnectionFromEnv initializes a Connection from information specified in environment variables. +func createConnectionFromEnv(t testEnv) driver.Connection { + connSpec := os.Getenv("TEST_CONNECTION") + connVer := os.Getenv("TEST_CVERSION") + switch connSpec { + case "vst": + var version protocol.Version + switch connVer { + case "1.0", "": + version = protocol.Version1_0 + case "1.1": + version = protocol.Version1_1 + default: + t.Fatalf("Unknown connection version '%s'", connVer) + } + config := vst.ConnectionConfig{ + Endpoints: getEndpointsFromEnv(t), + TLSConfig: &tls.Config{InsecureSkipVerify: true}, + Transport: protocol.TransportConfig{ + Version: version, + }, + } + conn, err := vst.NewConnection(config) + if err != nil { + t.Fatalf("Failed to create new vst connection: %s", describe(err)) + } + return conn + + case "http", "": + config := http.ConnectionConfig{ + Endpoints: getEndpointsFromEnv(t), + TLSConfig: &tls.Config{InsecureSkipVerify: true}, + ContentType: getContentTypeFromEnv(t), + } + conn, err := http.NewConnection(config) + if err != nil { + t.Fatalf("Failed to create new http connection: %s", describe(err)) + } + return conn + + default: + t.Fatalf("Unknown connection type: '%s'", connSpec) + return nil + } +} + +// createClientFromEnv initializes a Client from information specified in environment variables. +func createClientFromEnv(t testEnv, waitUntilReady bool, connection ...*driver.Connection) driver.Client { + conn := createConnectionFromEnv(t) + if len(connection) == 1 { + *connection[0] = conn + } + c, err := driver.NewClient(driver.ClientConfig{ + Connection: conn, + Authentication: createAuthenticationFromEnv(t), + }) + if err != nil { + t.Fatalf("Failed to create new client: %s", describe(err)) + } + if waitUntilReady { + timeout := 3 * time.Minute + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if up := waitUntilServerAvailable(ctx, c, t); !up { + t.Fatalf("Connection is not available in %s", timeout) + } + // Synchronize endpoints + if err := c.SynchronizeEndpoints(context.Background()); err != nil { + t.Errorf("Failed to synchronize endpoints: %s", describe(err)) + } else { + logEndpointsOnce.Do(func() { + t.Logf("Found endpoints: %v", conn.Endpoints()) + }) + } + } + return c +} + +// waitUntilServerAvailable keeps waiting until the server/cluster that the client is addressing is available. +func waitUntilServerAvailable(ctx context.Context, c driver.Client, t testEnv) bool { + instanceUp := make(chan bool) + go func() { + for { + verCtx, cancel := context.WithTimeout(ctx, time.Second*5) + if _, err := c.Version(verCtx); err == nil { + //t.Logf("Found version %s", v.Version) + cancel() + instanceUp <- true + return + } else { + cancel() + //t.Logf("Version failed: %s %#v", describe(err), err) + time.Sleep(time.Second) + } + } + }() + select { + case up := <-instanceUp: + return up + case <-ctx.Done(): + return false + } +} + +// TestCreateClientHttpConnection creates an HTTP connection to the environment specified +// endpoints and creates a client for that. +func TestCreateClientHttpConnection(t *testing.T) { + conn, err := http.NewConnection(http.ConnectionConfig{ + Endpoints: getEndpointsFromEnv(t), + TLSConfig: &tls.Config{InsecureSkipVerify: true}, + }) + if err != nil { + t.Fatalf("Failed to create new http connection: %s", describe(err)) + } + _, err = driver.NewClient(driver.ClientConfig{ + Connection: conn, + Authentication: createAuthenticationFromEnv(t), + }) + if err != nil { + t.Fatalf("Failed to create new client: %s", describe(err)) + } +} + +// TestCreateClientHttpConnectionCustomTransport creates an HTTP connection to the environment specified +// endpoints with a custom HTTP roundtripper and creates a client for that. +func TestCreateClientHttpConnectionCustomTransport(t *testing.T) { + conn, err := http.NewConnection(http.ConnectionConfig{ + Endpoints: getEndpointsFromEnv(t), + Transport: &httplib.Transport{}, + TLSConfig: &tls.Config{InsecureSkipVerify: true}, + }) + if err != nil { + t.Fatalf("Failed to create new http connection: %s", describe(err)) + } + c, err := driver.NewClient(driver.ClientConfig{ + Connection: conn, + Authentication: createAuthenticationFromEnv(t), + }) + if err != nil { + t.Fatalf("Failed to create new client: %s", describe(err)) + } + timeout := 3 * time.Minute + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if up := waitUntilServerAvailable(ctx, c, t); !up { + t.Fatalf("Connection is not available in %s", timeout) + } + if info, err := c.Version(driver.WithDetails(ctx)); err != nil { + t.Errorf("Version failed: %s", describe(err)) + } else { + t.Logf("Got server version %s", info) + } +} + +// TestResponseHeader checks the Response.Header function. +func TestResponseHeader(t *testing.T) { + c := createClientFromEnv(t, true) + ctx := context.Background() + + version, err := c.Version(nil) + if err != nil { + t.Fatalf("Version failed: %s", describe(err)) + } + isv33p := version.Version.CompareTo("3.3") >= 0 + if !isv33p { + t.Skip("This test requires version 3.3") + } else { + var resp driver.Response + db := ensureDatabase(ctx, c, "_system", nil, t) + col := ensureCollection(ctx, db, "response_header_test", nil, t) + + // `ETag` header must contain the `_rev` of the new document in quotes. + doc := map[string]string{ + "Test": "TestResponseHeader", + "Intent": "Check Response.Header", + } + meta, err := col.CreateDocument(driver.WithResponse(ctx, &resp), doc) + if err != nil { + t.Fatalf("CreateDocument failed: %s", describe(err)) + } + expectedETag := strconv.Quote(meta.Rev) + if x := resp.Header("ETag"); x != expectedETag { + t.Errorf("Unexpected result from Header('ETag'), got '%s', expected '%s'", x, expectedETag) + } + if x := resp.Header("Etag"); x != expectedETag { + t.Errorf("Unexpected result from Header('Etag'), got '%s', expected '%s'", x, expectedETag) + } + if x := resp.Header("etag"); x != expectedETag { + t.Errorf("Unexpected result from Header('etag'), got '%s', expected '%s'", x, expectedETag) + } + if x := resp.Header("ETAG"); x != expectedETag { + t.Errorf("Unexpected result from Header('ETAG'), got '%s', expected '%s'", x, expectedETag) + } + } +} diff --git a/deps/github.com/arangodb/go-driver/test/cluster.sh b/deps/github.com/arangodb/go-driver/test/cluster.sh new file mode 100755 index 000000000..b3c5fa209 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/cluster.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +if [ -z "$TESTCONTAINER" ]; then + echo "TESTCONTAINER environment variable must be set" + exit 1 +fi + +NAMESPACE=${TESTCONTAINER}-ns +STARTERVOLUME=${TESTCONTAINER}-vol +STARTERCONTAINER=${TESTCONTAINER}-s +CMD=$1 +DOCKERARGS= +STARTERARGS= + +# Cleanup +docker rm -f -v $(docker ps -a | grep ${TESTCONTAINER} | awk '{print $1}') &> /dev/null +docker volume rm -f ${STARTERVOLUME} &> /dev/null + +if [ "$CMD" == "start" ]; then + if [ -z "$ARANGODB" ]; then + echo "ARANGODB environment variable must be set" + exit 1 + fi + + # Create volumes + docker volume create ${STARTERVOLUME} &> /dev/null + + # Setup args + if [ -n "$JWTSECRET" ]; then + if [ -z "$TMPDIR" ]; then + echo "TMPDIR environment variable must be set" + exit 1 + fi + JWTSECRETFILE="$TMPDIR/$TESTCONTAINER-jwtsecret" + echo "$JWTSECRET" > ${JWTSECRETFILE} + DOCKERARGS="$DOCKERARGS -v $JWTSECRETFILE:/jwtsecret:ro" + STARTERARGS="$STARTERARGS --auth.jwt-secret=/jwtsecret" + fi + if [ "$SSL" == "auto" ]; then + STARTERARGS="$STARTERARGS --ssl.auto-key" + fi + + # Start network namespace + docker run -d --name=${NAMESPACE} alpine:3.4 sleep 365d + + # Start starters + # arangodb/arangodb-starter 0.7.0 or higher is needed. + docker run -d --name=${STARTERCONTAINER} --net=container:${NAMESPACE} \ + -v ${STARTERVOLUME}:/data -v /var/run/docker.sock:/var/run/docker.sock $DOCKERARGS \ + ${STARTER} \ + --starter.port=7000 --starter.address=127.0.0.1 \ + --docker.image=${ARANGODB} \ + --starter.local --starter.mode=${STARTERMODE} $STARTERARGS +fi diff --git a/deps/github.com/arangodb/go-driver/test/cluster_test.go b/deps/github.com/arangodb/go-driver/test/cluster_test.go new file mode 100644 index 000000000..3a9a253ef --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/cluster_test.go @@ -0,0 +1,205 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "testing" + "time" + + driver "github.com/arangodb/go-driver" +) + +// TestClusterHealth tests the Cluster.Health method. +func TestClusterHealth(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + cl, err := c.Cluster(ctx) + if driver.IsPreconditionFailed(err) { + t.Skip("Not a cluster") + } else { + h, err := cl.Health(ctx) + if err != nil { + t.Fatalf("Health failed: %s", describe(err)) + } + if h.ID == "" { + t.Error("Expected cluster ID to be non-empty") + } + agents := 0 + dbservers := 0 + coordinators := 0 + for _, sh := range h.Health { + switch sh.Role { + case driver.ServerRoleAgent: + agents++ + case driver.ServerRoleDBServer: + dbservers++ + case driver.ServerRoleCoordinator: + coordinators++ + } + } + if agents == 0 { + t.Error("Expected at least 1 agent") + } + if dbservers == 0 { + t.Error("Expected at least 1 dbserver") + } + if coordinators == 0 { + t.Error("Expected at least 1 coordinator") + } + } +} + +// TestClusterDatabaseInventory tests the Cluster.DatabaseInventory method. +func TestClusterDatabaseInventory(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + cl, err := c.Cluster(ctx) + if driver.IsPreconditionFailed(err) { + t.Skip("Not a cluster") + } else { + db, err := c.Database(ctx, "_system") + if err != nil { + t.Fatalf("Failed to open _system database: %s", describe(err)) + } + h, err := cl.Health(ctx) + if err != nil { + t.Fatalf("Health failed: %s", describe(err)) + } + inv, err := cl.DatabaseInventory(ctx, db) + if err != nil { + t.Fatalf("DatabaseInventory failed: %s", describe(err)) + } + if len(inv.Collections) == 0 { + t.Error("Expected multiple collections, got 0") + } + for _, col := range inv.Collections { + if len(col.Parameters.Shards) == 0 { + t.Errorf("Expected 1 or more shards in collection %s, got 0", col.Parameters.Name) + } + for shardID, dbServers := range col.Parameters.Shards { + for _, serverID := range dbServers { + if _, found := h.Health[serverID]; !found { + t.Errorf("Unexpected dbserver ID for shard '%s': %s", shardID, serverID) + } + } + } + } + } +} + +// TestClusterMoveShard tests the Cluster.MoveShard method. +func TestClusterMoveShard(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + cl, err := c.Cluster(ctx) + if driver.IsPreconditionFailed(err) { + t.Skip("Not a cluster") + } else { + db, err := c.Database(ctx, "_system") + if err != nil { + t.Fatalf("Failed to open _system database: %s", describe(err)) + } + col, err := db.CreateCollection(ctx, "test_move_shard", &driver.CreateCollectionOptions{ + NumberOfShards: 12, + }) + if err != nil { + t.Fatalf("CreateCollection failed: %s", describe(err)) + } + h, err := cl.Health(ctx) + if err != nil { + t.Fatalf("Health failed: %s", describe(err)) + } + inv, err := cl.DatabaseInventory(ctx, db) + if err != nil { + t.Fatalf("DatabaseInventory failed: %s", describe(err)) + } + if len(inv.Collections) == 0 { + t.Error("Expected multiple collections, got 0") + } + var targetServerID driver.ServerID + for id, s := range h.Health { + if s.Role == driver.ServerRoleDBServer { + targetServerID = id + break + } + } + if len(targetServerID) == 0 { + t.Fatalf("Failed to find any dbserver") + } + movedShards := 0 + for _, colInv := range inv.Collections { + if colInv.Parameters.Name == col.Name() { + for shardID, dbServers := range colInv.Parameters.Shards { + if dbServers[0] != targetServerID { + movedShards++ + var rawResponse []byte + if err := cl.MoveShard(driver.WithRawResponse(ctx, &rawResponse), col, shardID, dbServers[0], targetServerID); err != nil { + t.Errorf("MoveShard for shard %s in collection %s failed: %s (raw response '%s' %x)", shardID, col.Name(), describe(err), string(rawResponse), rawResponse) + } + } + } + } + } + if movedShards == 0 { + t.Fatal("Expected to have moved at least 1 shard, all seem to be on target server already") + } + // Wait until all shards are on the targetServerID + start := time.Now() + maxTestTime := time.Minute + lastShardsNotOnTargetServerID := movedShards + for { + shardsNotOnTargetServerID := 0 + inv, err := cl.DatabaseInventory(ctx, db) + if err != nil { + t.Errorf("DatabaseInventory failed: %s", describe(err)) + } else { + for _, colInv := range inv.Collections { + if colInv.Parameters.Name == col.Name() { + for shardID, dbServers := range colInv.Parameters.Shards { + if dbServers[0] != targetServerID { + shardsNotOnTargetServerID++ + t.Logf("Shard %s in on %s, wanted %s", shardID, dbServers[0], targetServerID) + } + } + } + } + } + if shardsNotOnTargetServerID == 0 { + // We're done + break + } + if shardsNotOnTargetServerID != lastShardsNotOnTargetServerID { + // Something changed, we give a bit more time + maxTestTime = maxTestTime + time.Second*15 + lastShardsNotOnTargetServerID = shardsNotOnTargetServerID + } + if time.Since(start) > maxTestTime { + t.Errorf("%d shards did not move within %s", shardsNotOnTargetServerID, maxTestTime) + break + } + t.Log("Waiting a bit") + time.Sleep(time.Second * 5) + } + } +} diff --git a/deps/github.com/arangodb/go-driver/test/collection_test.go b/deps/github.com/arangodb/go-driver/test/collection_test.go new file mode 100644 index 000000000..f2d6113cc --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/collection_test.go @@ -0,0 +1,408 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "fmt" + "testing" + "time" + + driver "github.com/arangodb/go-driver" +) + +// ensureCollection is a helper to check if a collection exists and create if if needed. +// It will fail the test when an error occurs. +func ensureCollection(ctx context.Context, db driver.Database, name string, options *driver.CreateCollectionOptions, t testEnv) driver.Collection { + c, err := db.Collection(ctx, name) + if driver.IsNotFound(err) { + c, err = db.CreateCollection(ctx, name, options) + if err != nil { + t.Fatalf("Failed to create collection '%s': %s", name, describe(err)) + } + } else if err != nil { + t.Fatalf("Failed to open collection '%s': %s", name, describe(err)) + } + return c +} + +// assertCollection is a helper to check if a collection exists and fail if it does not. +func assertCollection(ctx context.Context, db driver.Database, name string, t *testing.T) driver.Collection { + c, err := db.Collection(ctx, name) + if driver.IsNotFound(err) { + t.Fatalf("Collection '%s': does not exist", name) + } else if err != nil { + t.Fatalf("Failed to open collection '%s': %s", name, describe(err)) + } + return c +} + +// TestCreateCollection creates a collection and then checks that it exists. +func TestCreateCollection(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "collection_test", nil, t) + name := "test_create_collection" + if _, err := db.CreateCollection(nil, name, nil); err != nil { + t.Fatalf("Failed to create collection '%s': %s", name, describe(err)) + } + // Collection must exist now + if found, err := db.CollectionExists(nil, name); err != nil { + t.Errorf("CollectionExists('%s') failed: %s", name, describe(err)) + } else if !found { + t.Errorf("CollectionExists('%s') return false, expected true", name) + } +} + +// TestRemoveCollection creates a collection and then removes it. +func TestRemoveCollection(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "collection_test", nil, t) + name := "test_remove_collection" + col, err := db.CreateCollection(nil, name, nil) + if err != nil { + t.Fatalf("Failed to create collection '%s': %s", name, describe(err)) + } + // Collection must exist now + if found, err := db.CollectionExists(nil, name); err != nil { + t.Errorf("CollectionExists('%s') failed: %s", name, describe(err)) + } else if !found { + t.Errorf("CollectionExists('%s') return false, expected true", name) + } + // Now remove it + if err := col.Remove(nil); err != nil { + t.Fatalf("Failed to remove collection '%s': %s", name, describe(err)) + } + // Collection must not exist now + if found, err := db.CollectionExists(nil, name); err != nil { + t.Errorf("CollectionExists('%s') failed: %s", name, describe(err)) + } else if found { + t.Errorf("CollectionExists('%s') return true, expected false", name) + } +} + +// TestLoadUnloadCollection creates a collection and unloads, loads & unloads it. +func TestLoadUnloadCollection(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "collection_test", nil, t) + name := "test_load_collection" + col, err := db.CreateCollection(nil, name, nil) + if err != nil { + t.Fatalf("Failed to create collection '%s': %s", name, describe(err)) + } + // Collection must be loaded + if status, err := col.Status(nil); err != nil { + t.Errorf("Status failed: %s", describe(err)) + } else if status != driver.CollectionStatusLoaded { + t.Errorf("Expected status loaded, got %v", status) + } + + // Unload the collection now + if err := col.Unload(nil); err != nil { + t.Errorf("Unload failed: %s", describe(err)) + } + + // Collection must be unloaded + deadline := time.Now().Add(time.Second * 15) + for { + if status, err := col.Status(nil); err != nil { + t.Fatalf("Status failed: %s", describe(err)) + } else if status != driver.CollectionStatusUnloaded { + if time.Now().After(deadline) { + t.Errorf("Expected status unloaded, got %v", status) + break + } else { + time.Sleep(time.Millisecond * 10) + } + } else { + break + } + } + + // Load the collection now + if err := col.Load(nil); err != nil { + t.Errorf("Load failed: %s", describe(err)) + } + + // Collection must be loaded + deadline = time.Now().Add(time.Second * 15) + for { + if status, err := col.Status(nil); err != nil { + t.Fatalf("Status failed: %s", describe(err)) + } else if status != driver.CollectionStatusLoaded { + if time.Now().After(deadline) { + t.Errorf("Expected status loaded, got %v", status) + break + } else { + time.Sleep(time.Millisecond * 10) + } + } else { + break + } + } +} + +// TestCollectionName creates a collection and checks its name +func TestCollectionName(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "collection_test", nil, t) + name := "test_collection_name" + col, err := db.CreateCollection(nil, name, nil) + if err != nil { + t.Fatalf("Failed to create collection '%s': %s", name, describe(err)) + } + if col.Name() != name { + t.Errorf("Collection.Name() is wrong, got '%s', expected '%s'", col.Name(), name) + } +} + +// TestCollectionTruncate creates a collection, adds some documents and truncates it. +func TestCollectionTruncate(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "collection_test", nil, t) + name := "test_collection_truncate" + col, err := db.CreateCollection(nil, name, nil) + if err != nil { + t.Fatalf("Failed to create collection '%s': %s", name, describe(err)) + } + + // create some documents + for i := 0; i < 10; i++ { + doc := Book{Title: fmt.Sprintf("Book %d", i)} + if _, err := col.CreateDocument(nil, doc); err != nil { + t.Fatalf("Failed to create document: %s", describe(err)) + } + } + + // count before truncation + if c, err := col.Count(nil); err != nil { + t.Errorf("Failed to count documents: %s", describe(err)) + } else if c != 10 { + t.Errorf("Expected 10 documents, got %d", c) + } + + // Truncate collection + if err := col.Truncate(nil); err != nil { + t.Errorf("Failed to truncate collection: %s", describe(err)) + } + + // count after truncation + if c, err := col.Count(nil); err != nil { + t.Errorf("Failed to count documents: %s", describe(err)) + } else if c != 0 { + t.Errorf("Expected 0 documents, got %d", c) + } +} + +// TestCollectionProperties creates a collection and checks its properties +func TestCollectionProperties(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "collection_test", nil, t) + name := "test_collection_properties" + col, err := db.CreateCollection(nil, name, nil) + if err != nil { + t.Fatalf("Failed to create collection '%s': %s", name, describe(err)) + } + if p, err := col.Properties(nil); err != nil { + t.Errorf("Failed to fetch collection properties: %s", describe(err)) + } else { + if p.ID == "" { + t.Errorf("Got empty collection ID") + } + if p.Name != name { + t.Errorf("Expected name '%s', got '%s'", name, p.Name) + } + if p.Type != driver.CollectionTypeDocument { + t.Errorf("Expected type %d, got %d", driver.CollectionTypeDocument, p.Type) + } + } +} + +// TestCollectionSetProperties creates a collection and modifies its properties +func TestCollectionSetProperties(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "collection_test", nil, t) + name := "test_collection_set_properties" + col, err := db.CreateCollection(nil, name, nil) + if err != nil { + t.Fatalf("Failed to create collection '%s': %s", name, describe(err)) + } + + // Set WaitForSync to false + waitForSync := false + if err := col.SetProperties(nil, driver.SetCollectionPropertiesOptions{WaitForSync: &waitForSync}); err != nil { + t.Fatalf("Failed to set properties: %s", describe(err)) + } + if p, err := col.Properties(nil); err != nil { + t.Errorf("Failed to fetch collection properties: %s", describe(err)) + } else { + if p.WaitForSync != waitForSync { + t.Errorf("Expected WaitForSync %v, got %v", waitForSync, p.WaitForSync) + } + } + + // Set WaitForSync to true + waitForSync = true + if err := col.SetProperties(nil, driver.SetCollectionPropertiesOptions{WaitForSync: &waitForSync}); err != nil { + t.Fatalf("Failed to set properties: %s", describe(err)) + } + if p, err := col.Properties(nil); err != nil { + t.Errorf("Failed to fetch collection properties: %s", describe(err)) + } else { + if p.WaitForSync != waitForSync { + t.Errorf("Expected WaitForSync %v, got %v", waitForSync, p.WaitForSync) + } + } + + // Query engine info (on rocksdb, JournalSize is always 0) + info, err := db.EngineInfo(nil) + if err != nil { + t.Fatalf("Failed to get engine info: %s", describe(err)) + } + + if info.Type == driver.EngineTypeMMFiles { + // Set JournalSize + journalSize := int64(1048576 * 17) + if err := col.SetProperties(nil, driver.SetCollectionPropertiesOptions{JournalSize: journalSize}); err != nil { + t.Fatalf("Failed to set properties: %s", describe(err)) + } + if p, err := col.Properties(nil); err != nil { + t.Errorf("Failed to fetch collection properties: %s", describe(err)) + } else { + if p.JournalSize != journalSize { + t.Errorf("Expected JournalSize %v, got %v", journalSize, p.JournalSize) + } + } + + // Set JournalSize again + journalSize = int64(1048576 * 21) + if err := col.SetProperties(nil, driver.SetCollectionPropertiesOptions{JournalSize: journalSize}); err != nil { + t.Fatalf("Failed to set properties: %s", describe(err)) + } + if p, err := col.Properties(nil); err != nil { + t.Errorf("Failed to fetch collection properties: %s", describe(err)) + } else { + if p.JournalSize != journalSize { + t.Errorf("Expected JournalSize %v, got %v", journalSize, p.JournalSize) + } + } + } else { + t.Skipf("JournalSize tests are being skipped on engine type '%s'", info.Type) + } + + // Test replication factor + if _, err := c.Cluster(nil); err == nil { + // Set ReplicationFactor to 2 + replFact := 2 + ctx := driver.WithEnforceReplicationFactor(context.Background(), false) + if err := col.SetProperties(ctx, driver.SetCollectionPropertiesOptions{ReplicationFactor: replFact}); err != nil { + t.Fatalf("Failed to set properties: %s", describe(err)) + } + if p, err := col.Properties(nil); err != nil { + t.Errorf("Failed to fetch collection properties: %s", describe(err)) + } else { + if p.ReplicationFactor != replFact { + t.Errorf("Expected ReplicationFactor %d, got %d", replFact, p.ReplicationFactor) + } + } + + // Set ReplicationFactor back 1 + replFact = 1 + if err := col.SetProperties(ctx, driver.SetCollectionPropertiesOptions{ReplicationFactor: replFact}); err != nil { + t.Fatalf("Failed to set properties: %s", describe(err)) + } + if p, err := col.Properties(nil); err != nil { + t.Errorf("Failed to fetch collection properties: %s", describe(err)) + } else { + if p.ReplicationFactor != replFact { + t.Errorf("Expected ReplicationFactor %d, got %d", replFact, p.ReplicationFactor) + } + } + } else if driver.IsPreconditionFailed(err) { + t.Logf("ReplicationFactor tests skipped because we're not running in a cluster") + } else { + t.Errorf("Cluster failed: %s", describe(err)) + } +} + +// TestCollectionRevision creates a collection, checks revision after adding documents. +func TestCollectionRevision(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "collection_test", nil, t) + name := "test_collection_revision" + col, err := db.CreateCollection(nil, name, nil) + if err != nil { + t.Fatalf("Failed to create collection '%s': %s", name, describe(err)) + } + + // create some documents + for i := 0; i < 10; i++ { + before, err := col.Revision(nil) + if err != nil { + t.Fatalf("Failed to fetch before revision: %s", describe(err)) + } + doc := Book{Title: fmt.Sprintf("Book %d", i)} + if _, err := col.CreateDocument(nil, doc); err != nil { + t.Fatalf("Failed to create document: %s", describe(err)) + } + after, err := col.Revision(nil) + if err != nil { + t.Fatalf("Failed to fetch after revision: %s", describe(err)) + } + if before == after { + t.Errorf("Expected revision before, after to be different. Got '%s', '%s'", before, after) + } + } +} + +// TestCollectionStatistics creates a collection, checks statistics after adding documents. +func TestCollectionStatistics(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "collection_test", nil, t) + name := "test_collection_statistics" + col, err := db.CreateCollection(nil, name, nil) + if err != nil { + t.Fatalf("Failed to create collection '%s': %s", name, describe(err)) + } + + // create some documents + for i := 0; i < 10; i++ { + before, err := col.Statistics(nil) + if err != nil { + t.Fatalf("Failed to fetch before statistics: %s", describe(err)) + } + doc := Book{Title: fmt.Sprintf("Book %d", i)} + if _, err := col.CreateDocument(nil, doc); err != nil { + t.Fatalf("Failed to create document: %s", describe(err)) + } + after, err := col.Statistics(nil) + if err != nil { + t.Fatalf("Failed to fetch after statistics: %s", describe(err)) + } + if before.Count+1 != after.Count { + t.Errorf("Expected Count before, after to be 1 different. Got %d, %d", before.Count, after.Count) + } + if before.Figures.DataFiles.FileSize > after.Figures.DataFiles.FileSize { + t.Errorf("Expected DataFiles.FileSize before <= after. Got %d, %d", before.Figures.DataFiles.FileSize, after.Figures.DataFiles.FileSize) + } + } +} diff --git a/deps/github.com/arangodb/go-driver/test/context_test.go b/deps/github.com/arangodb/go-driver/test/context_test.go new file mode 100644 index 000000000..b7183e8c1 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/context_test.go @@ -0,0 +1,49 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestContextParentNil calls all WithXyz context functions with a nil parent context. +// This must not crash. +func TestContextParentNil(t *testing.T) { + testValue := func(ctx context.Context) { + ctx.Value("foo") + } + + testValue(driver.WithRevision(nil, "rev")) + testValue(driver.WithRevisions(nil, []string{"rev1", "rev2"})) + testValue(driver.WithReturnNew(nil, make(map[string]interface{}))) + testValue(driver.WithReturnOld(nil, make(map[string]interface{}))) + testValue(driver.WithDetails(nil)) + testValue(driver.WithKeepNull(nil, false)) + testValue(driver.WithMergeObjects(nil, true)) + testValue(driver.WithSilent(nil)) + testValue(driver.WithWaitForSync(nil)) + testValue(driver.WithRawResponse(nil, &[]byte{})) +} diff --git a/deps/github.com/arangodb/go-driver/test/cursor_test.go b/deps/github.com/arangodb/go-driver/test/cursor_test.go new file mode 100644 index 000000000..7fc86bfcd --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/cursor_test.go @@ -0,0 +1,230 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "reflect" + "testing" + "time" + + driver "github.com/arangodb/go-driver" +) + +type queryTest struct { + Query string + BindVars map[string]interface{} + ExpectSuccess bool + ExpectedDocuments []interface{} + DocumentType reflect.Type +} + +type queryTestContext struct { + Context context.Context + ExpectCount bool +} + +// TestCreateCursor creates several cursors. +func TestCreateCursor(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "cursor_test", nil, t) + + // Create data set + collectionData := map[string][]interface{}{ + "books": []interface{}{ + Book{Title: "Book 01"}, + Book{Title: "Book 02"}, + Book{Title: "Book 03"}, + Book{Title: "Book 04"}, + Book{Title: "Book 05"}, + Book{Title: "Book 06"}, + Book{Title: "Book 07"}, + Book{Title: "Book 08"}, + Book{Title: "Book 09"}, + Book{Title: "Book 10"}, + Book{Title: "Book 11"}, + Book{Title: "Book 12"}, + Book{Title: "Book 13"}, + Book{Title: "Book 14"}, + Book{Title: "Book 15"}, + Book{Title: "Book 16"}, + Book{Title: "Book 17"}, + Book{Title: "Book 18"}, + Book{Title: "Book 19"}, + Book{Title: "Book 20"}, + }, + "users": []interface{}{ + UserDoc{Name: "John", Age: 13}, + UserDoc{Name: "Jake", Age: 25}, + UserDoc{Name: "Clair", Age: 12}, + UserDoc{Name: "Johnny", Age: 42}, + UserDoc{Name: "Blair", Age: 67}, + UserDoc{Name: "Zz", Age: 12}, + }, + } + for colName, colDocs := range collectionData { + col := ensureCollection(ctx, db, colName, nil, t) + if _, _, err := col.CreateDocuments(ctx, colDocs); err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + } + + // Setup tests + tests := []queryTest{ + queryTest{ + Query: "FOR d IN books SORT d.Title RETURN d", + ExpectSuccess: true, + ExpectedDocuments: collectionData["books"], + DocumentType: reflect.TypeOf(Book{}), + }, + queryTest{ + Query: "FOR d IN books FILTER d.Title==@title SORT d.Title RETURN d", + BindVars: map[string]interface{}{"title": "Book 02"}, + ExpectSuccess: true, + ExpectedDocuments: []interface{}{collectionData["books"][1]}, + DocumentType: reflect.TypeOf(Book{}), + }, + queryTest{ + Query: "FOR d IN books FILTER d.Title==@title SORT d.Title RETURN d", + BindVars: map[string]interface{}{"somethingelse": "Book 02"}, + ExpectSuccess: false, // Unknown `@title` + }, + queryTest{ + Query: "FOR u IN users FILTER u.age>100 SORT u.name RETURN u", + ExpectSuccess: true, + ExpectedDocuments: []interface{}{}, + DocumentType: reflect.TypeOf(UserDoc{}), + }, + queryTest{ + Query: "FOR u IN users FILTER u.age<@maxAge SORT u.name RETURN u", + BindVars: map[string]interface{}{"maxAge": 20}, + ExpectSuccess: true, + ExpectedDocuments: []interface{}{collectionData["users"][2], collectionData["users"][0], collectionData["users"][5]}, + DocumentType: reflect.TypeOf(UserDoc{}), + }, + queryTest{ + Query: "FOR u IN users FILTER u.age<@maxAge SORT u.name RETURN u", + BindVars: map[string]interface{}{"maxage": 20}, + ExpectSuccess: false, // `@maxage` versus `@maxAge` + }, + queryTest{ + Query: "FOR u IN users SORT u.age RETURN u.age", + ExpectedDocuments: []interface{}{12, 12, 13, 25, 42, 67}, + DocumentType: reflect.TypeOf(12), + ExpectSuccess: true, + }, + queryTest{ + Query: "FOR p IN users COLLECT a = p.age WITH COUNT INTO c SORT a RETURN [a, c]", + ExpectedDocuments: []interface{}{[]int{12, 2}, []int{13, 1}, []int{25, 1}, []int{42, 1}, []int{67, 1}}, + DocumentType: reflect.TypeOf([]int{}), + ExpectSuccess: true, + }, + queryTest{ + Query: "FOR u IN users SORT u.name RETURN u.name", + ExpectedDocuments: []interface{}{"Blair", "Clair", "Jake", "John", "Johnny", "Zz"}, + DocumentType: reflect.TypeOf("foo"), + ExpectSuccess: true, + }, + } + + // Setup context alternatives + contexts := []queryTestContext{ + queryTestContext{nil, false}, + queryTestContext{context.Background(), false}, + queryTestContext{driver.WithQueryCount(nil), true}, + queryTestContext{driver.WithQueryCount(nil, true), true}, + queryTestContext{driver.WithQueryCount(nil, false), false}, + queryTestContext{driver.WithQueryBatchSize(nil, 1), false}, + queryTestContext{driver.WithQueryCache(nil), false}, + queryTestContext{driver.WithQueryCache(nil, true), false}, + queryTestContext{driver.WithQueryCache(nil, false), false}, + queryTestContext{driver.WithQueryMemoryLimit(nil, 60000), false}, + queryTestContext{driver.WithQueryTTL(nil, time.Minute), false}, + queryTestContext{driver.WithQueryBatchSize(driver.WithQueryCount(nil), 1), true}, + queryTestContext{driver.WithQueryCache(driver.WithQueryCount(driver.WithQueryBatchSize(nil, 2))), true}, + } + + // Run tests for every context alternative + for _, qctx := range contexts { + ctx := qctx.Context + for i, test := range tests { + cursor, err := db.Query(ctx, test.Query, test.BindVars) + if err == nil { + // Close upon exit of the function + defer cursor.Close() + } + if test.ExpectSuccess { + if err != nil { + t.Errorf("Expected success in query %d (%s), got '%s'", i, test.Query, describe(err)) + continue + } + count := cursor.Count() + if qctx.ExpectCount { + if count != int64(len(test.ExpectedDocuments)) { + t.Errorf("Expected count of %d, got %d in query %d (%s)", len(test.ExpectedDocuments), count, i, test.Query) + } + } else { + if count != 0 { + t.Errorf("Expected count of 0, got %d in query %d (%s)", count, i, test.Query) + } + } + var result []interface{} + for { + hasMore := cursor.HasMore() + doc := reflect.New(test.DocumentType) + if _, err := cursor.ReadDocument(ctx, doc.Interface()); driver.IsNoMoreDocuments(err) { + if hasMore { + t.Error("HasMore returned true, but ReadDocument returns a IsNoMoreDocuments error") + } + break + } else if err != nil { + t.Errorf("Failed to result document %d: %s", len(result), describe(err)) + } + if !hasMore { + t.Error("HasMore returned false, but ReadDocument returns a document") + } + result = append(result, doc.Elem().Interface()) + } + if len(result) != len(test.ExpectedDocuments) { + t.Errorf("Expected %d documents, got %d in query %d (%s)", len(test.ExpectedDocuments), len(result), i, test.Query) + } else { + for resultIdx, resultDoc := range result { + if !reflect.DeepEqual(resultDoc, test.ExpectedDocuments[resultIdx]) { + t.Errorf("Unexpected document in query %d (%s) at index %d: got %+v, expected %+v", i, test.Query, resultIdx, resultDoc, test.ExpectedDocuments[resultIdx]) + } + } + } + // Close anyway (this tests calling Close more than once) + if err := cursor.Close(); err != nil { + t.Errorf("Expected success in Close of cursor from query %d (%s), got '%s'", i, test.Query, describe(err)) + } + } else { + if err == nil { + t.Errorf("Expected error in query %d (%s), got '%s'", i, test.Query, describe(err)) + continue + } + } + } + } +} diff --git a/deps/github.com/arangodb/go-driver/test/database_test.go b/deps/github.com/arangodb/go-driver/test/database_test.go new file mode 100644 index 000000000..204a1a1e8 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/database_test.go @@ -0,0 +1,138 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// ensureDatabase is a helper to check if a database exists and create it if needed. +// It will fail the test when an error occurs. +func ensureDatabase(ctx context.Context, c driver.Client, name string, options *driver.CreateDatabaseOptions, t testEnv) driver.Database { + db, err := c.Database(ctx, name) + if driver.IsNotFound(err) { + db, err = c.CreateDatabase(ctx, name, options) + if err != nil { + if driver.IsConflict(err) { + t.Fatalf("Failed to create database (conflict) '%s': %s %#v", name, describe(err), err) + } else { + t.Fatalf("Failed to create database '%s': %s %#v", name, describe(err), err) + } + } + } else if err != nil { + t.Fatalf("Failed to open database '%s': %s", name, describe(err)) + } + return db +} + +// TestCreateDatabase creates a database and then checks that it exists. +func TestCreateDatabase(t *testing.T) { + c := createClientFromEnv(t, true) + name := "create_test1" + if _, err := c.CreateDatabase(nil, name, nil); err != nil { + t.Fatalf("Failed to create database '%s': %s", name, describe(err)) + } + // Database must exist now + if found, err := c.DatabaseExists(nil, name); err != nil { + t.Errorf("DatabaseExists('%s') failed: %s", name, describe(err)) + } else if !found { + t.Errorf("DatabaseExists('%s') return false, expected true", name) + } +} + +// TestRemoveDatabase creates a database and then removes it. +func TestRemoveDatabase(t *testing.T) { + c := createClientFromEnv(t, true) + name := "remove_test1" + d, err := c.CreateDatabase(nil, name, nil) + if err != nil { + t.Fatalf("Failed to create database '%s': %s", name, describe(err)) + } + // Database must exist now + if found, err := c.DatabaseExists(nil, name); err != nil { + t.Errorf("DatabaseExists('%s') failed: %s", name, describe(err)) + } else if !found { + t.Errorf("DatabaseExists('%s') return false, expected true", name) + } + + // Remove database + if err := d.Remove(context.Background()); err != nil { + t.Fatalf("Failed to remove database: %s", describe(err)) + } + + // Database must not exist now + if found, err := c.DatabaseExists(nil, name); err != nil { + t.Errorf("DatabaseExists('%s') failed: %s", name, describe(err)) + } else if found { + t.Errorf("DatabaseExists('%s') return true, expected false", name) + } +} + +// TestDatabaseInfo tests Database.Info. +func TestDatabaseInfo(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + + // Test system DB + db := ensureDatabase(ctx, c, "_system", nil, t) + info, err := db.Info(ctx) + if err != nil { + t.Fatalf("Failed to get _system database info: %s", describe(err)) + } + if info.Name != "_system" { + t.Errorf("Invalid Name. Got '%s', expected '_system'", info.Name) + } + if !info.IsSystem { + t.Error("Invalid IsSystem. Got false, expected true") + } + if info.ID == "" { + t.Error("Empty ID") + } + + name := "info_test" + d, err := c.CreateDatabase(ctx, name, nil) + if err != nil { + t.Fatalf("Failed to create database '%s': %s", name, describe(err)) + } + info, err = d.Info(ctx) + if err != nil { + t.Fatalf("Failed to get %s database info: %s", name, describe(err)) + } + if info.Name != name { + t.Errorf("Invalid Name. Got '%s', expected '%s'", info.Name, name) + } + if info.IsSystem { + t.Error("Invalid IsSystem. Got true, expected false") + } + if info.ID == "" { + t.Error("Empty ID") + } + + // Cleanup: Remove database + if err := d.Remove(context.Background()); err != nil { + t.Fatalf("Failed to remove database: %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/database_transaction_test.go b/deps/github.com/arangodb/go-driver/test/database_transaction_test.go new file mode 100644 index 000000000..be5a2e9dc --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/database_transaction_test.go @@ -0,0 +1,40 @@ +package test + +import ( + "fmt" + "reflect" + "testing" + + driver "github.com/arangodb/go-driver" +) + +func TestDatabaseTransaction(t *testing.T) { + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.2", t) + db := ensureDatabase(nil, c, "transaction_test", nil, t) + + testCases := []struct { + name string + action string + options *driver.TransactionOptions + expectResult interface{} + expectError error + }{ + {"ReturnValue", "function () { return 'worked!'; }", nil, "worked!", nil}, + {"ReturnError", "function () { error error; }", nil, nil, fmt.Errorf("missing/invalid action definition for transaction - Uncaught SyntaxError: Unexpected identifier - SyntaxError: Unexpected identifier\n at new Function ()")}, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + result, err := db.Transaction(nil, testCase.action, testCase.options) + if !reflect.DeepEqual(testCase.expectResult, result) { + t.Errorf("expected result %v, got %v", testCase.expectResult, result) + } + if testCase.expectError != nil { + if testCase.expectError.Error() != err.Error() { + t.Errorf("expected error %v, got %v", testCase.expectError.Error(), err.Error()) + } + } + }) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/document_create_test.go b/deps/github.com/arangodb/go-driver/test/document_create_test.go new file mode 100644 index 000000000..3d5b2e178 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/document_create_test.go @@ -0,0 +1,158 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "reflect" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// createDocument creates a document in the given collection, failing the test on error. +func createDocument(ctx context.Context, col driver.Collection, document interface{}, t *testing.T) driver.DocumentMeta { + meta, err := col.CreateDocument(ctx, document) + if err != nil { + t.Fatalf("Failed to create document: %s", describe(err)) + } + return meta +} + +// TestCreateDocument creates a document and then checks that it exists. +func TestCreateDocument(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "document_test", nil, t) + doc := UserDoc{ + "Jan", + 40, + } + meta, err := col.CreateDocument(nil, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Document must exists now + if found, err := col.DocumentExists(nil, meta.Key); err != nil { + t.Fatalf("DocumentExists failed for '%s': %s", meta.Key, describe(err)) + } else if !found { + t.Errorf("DocumentExists returned false for '%s', expected true", meta.Key) + } + // Read document + var readDoc UserDoc + if _, err := col.ReadDocument(nil, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + if !reflect.DeepEqual(doc, readDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", doc, readDoc) + } +} + +// TestCreateDocumentWithKey creates a document with given key and then checks that it exists. +func TestCreateDocumentWithKey(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "document_withKey_test", nil, t) + doc := UserDocWithKey{ + "jan", + "Jan", + 40, + } + meta, err := col.CreateDocument(nil, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Key must be given key + if meta.Key != doc.Key { + t.Errorf("Expected key to be '%s', got '%s'", doc.Key, meta.Key) + } + // Document must exists now + var readDoc UserDocWithKey + if _, err := col.ReadDocument(nil, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + if !reflect.DeepEqual(doc, readDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", doc, readDoc) + } + + // Retry creating the document with same key. This must fail. + if _, err := col.CreateDocument(nil, doc); !driver.IsConflict(err) { + t.Fatalf("Expected ConflictError, got %s", describe(err)) + } +} + +// TestCreateDocumentReturnNew creates a document and checks the document returned in in ReturnNew. +func TestCreateDocumentReturnNew(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "document_test", nil, t) + doc := UserDoc{ + "JanNew", + 1, + } + var newDoc UserDoc + meta, err := col.CreateDocument(driver.WithReturnNew(ctx, &newDoc), doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // NewDoc must equal doc + if !reflect.DeepEqual(doc, newDoc) { + t.Errorf("Got wrong ReturnNew document. Expected %+v, got %+v", doc, newDoc) + } + // Document must exists now + var readDoc UserDoc + if _, err := col.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + if !reflect.DeepEqual(doc, readDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", doc, readDoc) + } +} + +// TestCreateDocumentSilent creates a document with WithSilent. +func TestCreateDocumentSilent(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "document_test", nil, t) + doc := UserDoc{ + "Sjjjj", + 1, + } + if meta, err := col.CreateDocument(driver.WithSilent(ctx), doc); err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } else if meta.Key != "" { + t.Errorf("Expected empty meta, got %v", meta) + } +} + +// TestCreateDocumentNil creates a document with a nil document. +func TestCreateDocumentNil(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "document_test", nil, t) + if _, err := col.CreateDocument(nil, nil); !driver.IsInvalidArgument(err) { + t.Fatalf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/document_remove_test.go b/deps/github.com/arangodb/go-driver/test/document_remove_test.go new file mode 100644 index 000000000..8b3b227d1 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/document_remove_test.go @@ -0,0 +1,171 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "reflect" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestReplaceDocument creates a document, remove it and then checks the removal has succeeded. +func TestRemoveDocument(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "document_test", nil, t) + doc := UserDoc{ + "Piere", + 23, + } + meta, err := col.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + if _, err := col.RemoveDocument(ctx, meta.Key); err != nil { + t.Fatalf("Failed to remove document '%s': %s", meta.Key, describe(err)) + } + // Should not longer exist + var readDoc Account + if _, err := col.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Fatalf("Expected NotFoundError, got %s", describe(err)) + } + // Document must exists now + if found, err := col.DocumentExists(ctx, meta.Key); err != nil { + t.Fatalf("DocumentExists failed for '%s': %s", meta.Key, describe(err)) + } else if found { + t.Errorf("DocumentExists returned true for '%s', expected false", meta.Key) + } +} + +// TestRemoveDocumentReturnOld creates a document, removes it checks the ReturnOld value. +func TestRemoveDocumentReturnOld(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "document_test", nil, t) + doc := UserDoc{ + "Tim", + 27, + } + meta, err := col.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + var old UserDoc + ctx = driver.WithReturnOld(ctx, &old) + if _, err := col.RemoveDocument(ctx, meta.Key); err != nil { + t.Fatalf("Failed to remove document '%s': %s", meta.Key, describe(err)) + } + // Check old document + if !reflect.DeepEqual(doc, old) { + t.Errorf("Got wrong document. Expected %+v, got %+v", doc, old) + } + // Should not longer exist + var readDoc Account + if _, err := col.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Fatalf("Expected NotFoundError, got %s", describe(err)) + } +} + +// TestRemoveDocumentSilent creates a document, removes it with Silent() and then checks the meta is indeed empty. +func TestRemoveDocumentSilent(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "document_test", nil, t) + doc := UserDoc{ + "Angela", + 91, + } + meta, err := col.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + ctx = driver.WithSilent(ctx) + if rmeta, err := col.RemoveDocument(ctx, meta.Key); err != nil { + t.Fatalf("Failed to remove document '%s': %s", meta.Key, describe(err)) + } else if rmeta.Key != "" { + t.Errorf("Expected empty meta, got %v", rmeta) + } + // Should not longer exist + var readDoc Account + if _, err := col.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Fatalf("Expected NotFoundError, got %s", describe(err)) + } +} + +// TestRemoveDocumentRevision creates a document, removes it with an incorrect revision. +func TestRemoveDocumentRevision(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "document_test", nil, t) + doc := UserDoc{ + "DryLake", + 91, + } + meta, err := col.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + + // Replace the document to get another revision + replacement := Book{ + Title: "Jungle book", + } + meta2, err := col.ReplaceDocument(ctx, meta.Key, replacement) + if err != nil { + t.Fatalf("Failed to replace document '%s': %s", meta.Key, describe(err)) + } + + // Try to remove document with initial revision (must fail) + initialRevCtx := driver.WithRevision(ctx, meta.Rev) + if _, err := col.RemoveDocument(initialRevCtx, meta.Key); !driver.IsPreconditionFailed(err) { + t.Fatalf("Expected PreconditionFailedError, got %s", describe(err)) + } + + // Try to remove document with correct revision (must succeed) + replacedRevCtx := driver.WithRevision(ctx, meta2.Rev) + if _, err := col.RemoveDocument(replacedRevCtx, meta.Key); err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + + // Should not longer exist + var readDoc Account + if _, err := col.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Fatalf("Expected NotFoundError, got %s", describe(err)) + } +} + +// TestRemoveDocumentKeyEmpty removes a document it with an empty key. +func TestRemoveDocumentKeyEmpty(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "document_test", nil, t) + if _, err := col.RemoveDocument(nil, ""); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/document_replace_test.go b/deps/github.com/arangodb/go-driver/test/document_replace_test.go new file mode 100644 index 000000000..f6d7bbeb9 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/document_replace_test.go @@ -0,0 +1,216 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "reflect" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestReplaceDocument creates a document, replaces it and then checks the replacement has succeeded. +func TestReplaceDocument(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "document_test", nil, t) + doc := UserDoc{ + "Piere", + 23, + } + meta, err := col.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Replacement doc + replacement := Account{ + ID: "foo", + User: &UserDoc{}, + } + if _, err := col.ReplaceDocument(ctx, meta.Key, replacement); err != nil { + t.Fatalf("Failed to replace document '%s': %s", meta.Key, describe(err)) + } + // Read replaces document + var readDoc Account + if _, err := col.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + if !reflect.DeepEqual(replacement, readDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", replacement, readDoc) + } +} + +// TestReplaceDocumentReturnOld creates a document, replaces it checks the ReturnOld value. +func TestReplaceDocumentReturnOld(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "document_test", nil, t) + doc := UserDoc{ + "Tim", + 27, + } + meta, err := col.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Replace document + replacement := Book{ + Title: "Golang 1.8", + } + var old UserDoc + ctx = driver.WithReturnOld(ctx, &old) + if _, err := col.ReplaceDocument(ctx, meta.Key, replacement); err != nil { + t.Fatalf("Failed to replace document '%s': %s", meta.Key, describe(err)) + } + // Check old document + if !reflect.DeepEqual(doc, old) { + t.Errorf("Got wrong document. Expected %+v, got %+v", doc, old) + } +} + +// TestReplaceDocumentReturnNew creates a document, replaces it checks the ReturnNew value. +func TestReplaceDocumentReturnNew(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "document_test", nil, t) + doc := UserDoc{ + "Tim", + 27, + } + meta, err := col.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + replacement := Book{ + Title: "Golang 1.8", + } + var newDoc Book + ctx = driver.WithReturnNew(ctx, &newDoc) + if _, err := col.ReplaceDocument(ctx, meta.Key, replacement); err != nil { + t.Fatalf("Failed to replace document '%s': %s", meta.Key, describe(err)) + } + // Check new document + expected := replacement + if !reflect.DeepEqual(expected, newDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", expected, newDoc) + } +} + +// TestReplaceDocumentSilent creates a document, replaces it with Silent() and then checks the meta is indeed empty. +func TestReplaceDocumentSilent(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "document_test", nil, t) + doc := UserDoc{ + "Angela", + 91, + } + meta, err := col.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + replacement := Book{ + Title: "Jungle book", + } + ctx = driver.WithSilent(ctx) + if meta, err := col.ReplaceDocument(ctx, meta.Key, replacement); err != nil { + t.Fatalf("Failed to replace document '%s': %s", meta.Key, describe(err)) + } else if meta.Key != "" { + t.Errorf("Expected empty meta, got %v", meta) + } +} + +// TestReplaceDocumentRevision creates a document, replaces it with a specific (correct) revision. +// Then it attempts a replacement with an incorrect revision which must fail. +func TestReplaceDocumentRevision(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "document_test", nil, t) + doc := UserDoc{ + "Revision", + 33, + } + meta, err := col.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + + // Replace document with correct revision + replacement := Book{ + Title: "Jungle book", + } + initialRevCtx := driver.WithRevision(ctx, meta.Rev) + var replacedRevCtx context.Context + if meta2, err := col.ReplaceDocument(initialRevCtx, meta.Key, replacement); err != nil { + t.Fatalf("Failed to replace document '%s': %s", meta.Key, describe(err)) + } else { + replacedRevCtx = driver.WithRevision(ctx, meta2.Rev) + if meta2.Rev == meta.Rev { + t.Errorf("Expected revision to change, got initial revision '%s', replaced revision '%s'", meta.Rev, meta2.Rev) + } + } + + // Replace document with incorrect revision + replacement.Title = "Wrong deal" + if _, err := col.ReplaceDocument(initialRevCtx, meta.Key, replacement); !driver.IsPreconditionFailed(err) { + t.Errorf("Expected PreconditionFailedError, got %s", describe(err)) + } + + // Replace document once more with correct revision + replacement.Title = "Good deal" + if _, err := col.ReplaceDocument(replacedRevCtx, meta.Key, replacement); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } +} + +// TestReplaceDocumentKeyEmpty replaces a document it with an empty key. +func TestReplaceDocumentKeyEmpty(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "document_test", nil, t) + // Update document + replacement := map[string]interface{}{ + "name": "Updated", + } + if _, err := col.ReplaceDocument(nil, "", replacement); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestReplaceDocumentUpdateNil replaces a document it with a nil update. +func TestReplaceDocumentUpdateNil(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "document_test", nil, t) + if _, err := col.ReplaceDocument(nil, "validKey", nil); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/document_update_test.go b/deps/github.com/arangodb/go-driver/test/document_update_test.go new file mode 100644 index 000000000..6c3a7fe5d --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/document_update_test.go @@ -0,0 +1,298 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "reflect" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestUpdateDocument1 creates a document, updates it and then checks the update has succeeded. +func TestUpdateDocument1(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "document_test", nil, t) + doc := UserDoc{ + "Piere", + 23, + } + meta, err := col.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + update := map[string]interface{}{ + "name": "Updated", + } + if _, err := col.UpdateDocument(ctx, meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } + // Read updated document + var readDoc UserDoc + if _, err := col.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + doc.Name = "Updated" + if !reflect.DeepEqual(doc, readDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", doc, readDoc) + } +} + +// TestUpdateDocumentReturnOld creates a document, updates it checks the ReturnOld value. +func TestUpdateDocumentReturnOld(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "document_test", nil, t) + doc := UserDoc{ + "Tim", + 27, + } + meta, err := col.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + update := map[string]interface{}{ + "name": "Updated", + } + var old UserDoc + ctx = driver.WithReturnOld(ctx, &old) + if _, err := col.UpdateDocument(ctx, meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } + // Check old document + if !reflect.DeepEqual(doc, old) { + t.Errorf("Got wrong document. Expected %+v, got %+v", doc, old) + } +} + +// TestUpdateDocumentReturnNew creates a document, updates it checks the ReturnNew value. +func TestUpdateDocumentReturnNew(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "document_test", nil, t) + doc := UserDoc{ + "Tim", + 27, + } + meta, err := col.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + update := map[string]interface{}{ + "name": "Updated", + } + var newDoc UserDoc + ctx = driver.WithReturnNew(ctx, &newDoc) + if _, err := col.UpdateDocument(ctx, meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } + // Check new document + expected := doc + expected.Name = "Updated" + if !reflect.DeepEqual(expected, newDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", expected, newDoc) + } +} + +// TestUpdateDocumentKeepNullTrue creates a document, updates it with KeepNull(true) and then checks the update has succeeded. +func TestUpdateDocumentKeepNullTrue(t *testing.T) { + ctx := context.Background() + var conn driver.Connection + c := createClientFromEnv(t, true, &conn) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "document_test", nil, t) + doc := Account{ + ID: "1234", + User: &UserDoc{ + "Mathilda", + 45, + }, + } + meta, err := col.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + update := map[string]interface{}{ + "id": "5678", + "user": nil, + } + if _, err := col.UpdateDocument(driver.WithKeepNull(ctx, true), meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } + // Read updated document + var readDoc map[string]interface{} + var rawResponse []byte + ctx = driver.WithRawResponse(ctx, &rawResponse) + if _, err := col.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + // We parse to this type of map, since unmarshalling nil values to a map of type map[string]interface{} + // will cause the entry to be deleted. + var jsonMap map[string]*driver.RawObject + if err := conn.Unmarshal(rawResponse, &jsonMap); err != nil { + t.Fatalf("Failed to parse raw response: %s", describe(err)) + } + if raw, found := jsonMap["user"]; !found { + t.Errorf("Expected user to be found but got not found") + } else if raw != nil { + t.Errorf("Expected user to be found and nil, got %s", string(*raw)) + } +} + +// TestUpdateDocumentKeepNullFalse creates a document, updates it with KeepNull(false) and then checks the update has succeeded. +func TestUpdateDocumentKeepNullFalse(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "document_test", nil, t) + doc := Account{ + ID: "1234", + User: &UserDoc{ + "Mathilda", + 45, + }, + } + meta, err := col.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + update := map[string]interface{}{ + "id": "5678", + "user": nil, + } + if _, err := col.UpdateDocument(driver.WithKeepNull(ctx, false), meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } + // Read updated document + readDoc := doc + if _, err := col.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + if readDoc.User == nil { + t.Errorf("Expected user to be untouched, got %v", readDoc.User) + } +} + +// TestUpdateDocumentSilent creates a document, updates it with Silent() and then checks the meta is indeed empty. +func TestUpdateDocumentSilent(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "document_test", nil, t) + doc := UserDoc{ + "Angela", + 91, + } + meta, err := col.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + update := map[string]interface{}{ + "age": "61", + } + ctx = driver.WithSilent(ctx) + if meta, err := col.UpdateDocument(ctx, meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } else if meta.Key != "" { + t.Errorf("Expected empty meta, got %v", meta) + } +} + +// TestUpdateDocumentRevision creates a document, updates it with a specific (correct) revision. +// Then it attempts an update with an incorrect revision which must fail. +func TestUpdateDocumentRevision(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "document_test", nil, t) + doc := UserDoc{ + "Revision", + 33, + } + meta, err := col.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + + // Update document with correct revision + update := map[string]interface{}{ + "age": 34, + } + initialRevCtx := driver.WithRevision(ctx, meta.Rev) + var updatedRevCtx context.Context + if meta2, err := col.UpdateDocument(initialRevCtx, meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } else { + updatedRevCtx = driver.WithRevision(ctx, meta2.Rev) + if meta2.Rev == meta.Rev { + t.Errorf("Expected revision to change, got initial revision '%s', updated revision '%s'", meta.Rev, meta2.Rev) + } + } + + // Update document with incorrect revision + update["age"] = 35 + if _, err := col.UpdateDocument(initialRevCtx, meta.Key, update); !driver.IsPreconditionFailed(err) { + t.Errorf("Expected PreconditionFailedError, got %s", describe(err)) + } + + // Update document once more with correct revision + update["age"] = 36 + if _, err := col.UpdateDocument(updatedRevCtx, meta.Key, update); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } +} + +// TestUpdateDocumentKeyEmpty updates a document it with an empty key. +func TestUpdateDocumentKeyEmpty(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "document_test", nil, t) + // Update document + update := map[string]interface{}{ + "name": "Updated", + } + if _, err := col.UpdateDocument(nil, "", update); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestUpdateDocumentUpdateNil updates a document it with a nil update. +func TestUpdateDocumentUpdateNil(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "document_test", nil, t) + if _, err := col.UpdateDocument(nil, "validKey", nil); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/documents_create_test.go b/deps/github.com/arangodb/go-driver/test/documents_create_test.go new file mode 100644 index 000000000..e10b36075 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/documents_create_test.go @@ -0,0 +1,169 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "reflect" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestCreateDocuments creates a document and then checks that it exists. +func TestCreateDocuments(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "documents_test", nil, t) + docs := []UserDoc{ + UserDoc{ + "Jan", + 40, + }, + UserDoc{ + "Foo", + 41, + }, + UserDoc{ + "Frank", + 42, + }, + } + metas, errs, err := col.CreateDocuments(nil, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if len(metas) != len(docs) { + t.Errorf("Expected %d metas, got %d", len(docs), len(metas)) + } else { + for i := 0; i < len(docs); i++ { + if err := errs[i]; err != nil { + t.Errorf("Expected no error at index %d, got %s", i, describe(err)) + } + + // Document must exists now + var readDoc UserDoc + if _, err := col.ReadDocument(nil, metas[i].Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", metas[i].Key, describe(err)) + } + if !reflect.DeepEqual(docs[i], readDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", docs[i], readDoc) + } + } + } +} + +// TestCreateDocumentsReturnNew creates a document and checks the document returned in in ReturnNew. +func TestCreateDocumentsReturnNew(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "documents_test", nil, t) + docs := []UserDoc{ + UserDoc{ + "Sjjjj", + 1, + }, + UserDoc{ + "Mies", + 2, + }, + } + newDocs := make([]UserDoc, len(docs)) + metas, errs, err := col.CreateDocuments(driver.WithReturnNew(ctx, newDocs), docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if len(metas) != len(docs) { + t.Errorf("Expected %d metas, got %d", len(docs), len(metas)) + } else { + for i := 0; i < len(docs); i++ { + if err := errs[i]; err != nil { + t.Errorf("Expected no error at index %d, got %s", i, describe(err)) + } + // NewDoc must equal doc + if !reflect.DeepEqual(docs[i], newDocs[i]) { + t.Errorf("Got wrong ReturnNew document. Expected %+v, got %+v", docs[i], newDocs[i]) + } + // Document must exists now + var readDoc UserDoc + if _, err := col.ReadDocument(ctx, metas[i].Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", metas[i].Key, describe(err)) + } + if !reflect.DeepEqual(docs[i], readDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", docs[i], readDoc) + } + } + } +} + +// TestCreateDocumentsSilent creates a document with WithSilent. +func TestCreateDocumentsSilent(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "documents_test", nil, t) + docs := []UserDoc{ + UserDoc{ + "Sjjjj", + 1, + }, + UserDoc{ + "Mies", + 2, + }, + } + if metas, errs, err := col.CreateDocuments(driver.WithSilent(ctx), docs); err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else { + if len(metas) != 0 { + t.Errorf("Expected 0 metas, got %d", len(metas)) + } + if len(errs) != 0 { + t.Errorf("Expected 0 errors, got %d", len(errs)) + } + } +} + +// TestCreateDocumentsNil creates multiple documents with a nil documents input. +func TestCreateDocumentsNil(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "documents_test", nil, t) + if _, _, err := col.CreateDocuments(nil, nil); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestCreateDocumentsNonSlice creates multiple documents with a non-slice documents input. +func TestCreateDocumentsNonSlice(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "documents_test", nil, t) + var obj UserDoc + if _, _, err := col.CreateDocuments(nil, &obj); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } + var m map[string]interface{} + if _, _, err := col.CreateDocuments(nil, &m); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/documents_import_test.go b/deps/github.com/arangodb/go-driver/test/documents_import_test.go new file mode 100644 index 000000000..1d0840412 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/documents_import_test.go @@ -0,0 +1,561 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestImportDocumentsWithKeys imports documents and then checks that it exists. +func TestImportDocumentsWithKeys(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "import_withKeys_test", nil, t) + docs := []UserDocWithKey{ + UserDocWithKey{ + "jan", + "Jan", + 40, + }, + UserDocWithKey{ + "foo", + "Foo", + 41, + }, + UserDocWithKey{ + "frank", + "Frank", + 42, + }, + } + + var raw []byte + ctx := driver.WithRawResponse(nil, &raw) + stats, err := col.ImportDocuments(ctx, docs, nil) + if err != nil { + t.Fatalf("Failed to import documents: %s %#v", describe(err), err) + } else { + if stats.Created != int64(len(docs)) { + t.Errorf("Expected %d created documents, got %d (json %s)", len(docs), stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 0 { + t.Errorf("Expected %d error documents, got %d (json %s)", 0, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + } +} + +// TestImportDocumentsWithoutKeys imports documents and then checks that it exists. +func TestImportDocumentsWithoutKeys(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "import_withoutKeys_test", nil, t) + docs := []UserDoc{ + UserDoc{ + "Jan", + 40, + }, + UserDoc{ + "Foo", + 41, + }, + UserDoc{ + "Frank", + 42, + }, + } + + var raw []byte + ctx := driver.WithRawResponse(nil, &raw) + stats, err := col.ImportDocuments(ctx, docs, nil) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != int64(len(docs)) { + t.Errorf("Expected %d created documents, got %d (json %s)", len(docs), stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 0 { + t.Errorf("Expected %d error documents, got %d (json %s)", 0, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + } +} + +// TestImportDocumentsEmptyEntries imports documents and then checks that it exists. +func TestImportDocumentsEmptyEntries(t *testing.T) { + if getContentTypeFromEnv(t) == driver.ContentTypeVelocypack { + t.Skip("Not supported on vpack") + } + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "import_emptyEntries_test", nil, t) + docs := []*UserDocWithKey{ + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + &UserDocWithKey{ + "foo", + "Foo", + 41, + }, + nil, + &UserDocWithKey{ + "frank", + "Frank", + 42, + }, + } + + var raw []byte + ctx := driver.WithRawResponse(nil, &raw) + stats, err := col.ImportDocuments(ctx, docs, nil) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != int64(len(docs))-1 { + t.Errorf("Expected %d created documents, got %d (json %s)", len(docs)-1, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 0 { + t.Errorf("Expected %d error documents, got %d (json %s)", 0, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 1 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 1, stats.Empty, formatRawResponse(raw)) + } + } +} + +// TestImportDocumentsInvalidEntries imports documents and then checks that it exists. +func TestImportDocumentsInvalidEntries(t *testing.T) { + if getContentTypeFromEnv(t) == driver.ContentTypeVelocypack { + t.Skip("Not supported on vpack") + } + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "import_invalidEntries_test", nil, t) + docs := []interface{}{ + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + []string{"array", "is", "invalid"}, + &UserDocWithKey{ + "foo", + "Foo", + 41, + }, + "string is not valid", + nil, + &UserDocWithKey{ + "frank", + "Frank", + 42, + }, + } + + var raw []byte + ctx := driver.WithRawResponse(nil, &raw) + stats, err := col.ImportDocuments(ctx, docs, nil) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != int64(len(docs))-3 { + t.Errorf("Expected %d created documents, got %d (json %s)", len(docs)-3, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 2 { + t.Errorf("Expected %d error documents, got %d (json %s)", 2, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 1 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 1, stats.Empty, formatRawResponse(raw)) + } + } +} + +// TestImportDocumentsDuplicateEntries imports documents and then checks that it exists. +func TestImportDocumentsDuplicateEntries(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "import_duplicateEntries_test", nil, t) + docs := []interface{}{ + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + } + + var raw []byte + ctx := driver.WithRawResponse(nil, &raw) + stats, err := col.ImportDocuments(ctx, docs, nil) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 1 { + t.Errorf("Expected %d created documents, got %d (json %s)", 1, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 1 { + t.Errorf("Expected %d error documents, got %d (json %s)", 1, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + if stats.Updated != 0 { + t.Errorf("Expected %d updated documents, got %d (json %s)", 0, stats.Updated, formatRawResponse(raw)) + } + if stats.Ignored != 0 { + t.Errorf("Expected %d ignored documents, got %d (json %s)", 0, stats.Ignored, formatRawResponse(raw)) + } + } +} + +// TestImportDocumentsDuplicateEntriesComplete imports documents and then checks that it exists. +func TestImportDocumentsDuplicateEntriesComplete(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "import_duplicateEntriesComplete_test", nil, t) + docs := []interface{}{ + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + } + + var raw []byte + ctx := driver.WithRawResponse(nil, &raw) + if _, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + Complete: true, + }); !driver.IsConflict(err) { + t.Errorf("Expected ConflictError, got %s", describe(err)) + } +} + +// TestImportDocumentsDuplicateEntriesUpdate imports documents and then checks that it exists. +func TestImportDocumentsDuplicateEntriesUpdate(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "import_duplicateEntriesUpdate_test", nil, t) + docs := []interface{}{ + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + map[string]interface{}{ + "_key": "jan", + "name": "Jan2", + }, + } + + var raw []byte + ctx := driver.WithRawResponse(nil, &raw) + stats, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + OnDuplicate: driver.ImportOnDuplicateUpdate, + }) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 1 { + t.Errorf("Expected %d created documents, got %d (json %s)", 1, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 0 { + t.Errorf("Expected %d error documents, got %d (json %s)", 0, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + if stats.Updated != 1 { + t.Errorf("Expected %d updated documents, got %d (json %s)", 1, stats.Updated, formatRawResponse(raw)) + } + if stats.Ignored != 0 { + t.Errorf("Expected %d ignored documents, got %d (json %s)", 0, stats.Ignored, formatRawResponse(raw)) + } + + var user UserDocWithKey + if _, err := col.ReadDocument(nil, "jan", &user); err != nil { + t.Errorf("ReadDocument failed: %s", describe(err)) + } else { + if user.Name != "Jan2" { + t.Errorf("Expected Name to be 'Jan2', got '%s'", user.Name) + } + if user.Age != 40 { + t.Errorf("Expected Age to be 40, got %d", user.Age) + } + } + } +} + +// TestImportDocumentsDuplicateEntriesReplace imports documents and then checks that it exists. +func TestImportDocumentsDuplicateEntriesReplace(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "import_duplicateEntriesReplace_test", nil, t) + docs := []interface{}{ + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + map[string]interface{}{ + "_key": "jan", + "name": "Jan2", + }, + } + + var raw []byte + ctx := driver.WithRawResponse(nil, &raw) + stats, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + OnDuplicate: driver.ImportOnDuplicateReplace, + }) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 1 { + t.Errorf("Expected %d created documents, got %d (json %s)", 1, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 0 { + t.Errorf("Expected %d error documents, got %d (json %s)", 0, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + if stats.Updated != 1 { + t.Errorf("Expected %d updated documents, got %d (json %s)", 1, stats.Updated, formatRawResponse(raw)) + } + if stats.Ignored != 0 { + t.Errorf("Expected %d ignored documents, got %d (json %s)", 0, stats.Ignored, formatRawResponse(raw)) + } + + var user UserDocWithKey + if _, err := col.ReadDocument(nil, "jan", &user); err != nil { + t.Errorf("ReadDocument failed: %s", describe(err)) + } else { + if user.Name != "Jan2" { + t.Errorf("Expected Name to be 'Jan2', got '%s'", user.Name) + } + if user.Age != 0 { + t.Errorf("Expected Age to be 0, got %d", user.Age) + } + } + } +} + +// TestImportDocumentsDuplicateEntriesIgnore imports documents and then checks that it exists. +func TestImportDocumentsDuplicateEntriesIgnore(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "import_duplicateEntriesIgnore_test", nil, t) + docs := []interface{}{ + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + map[string]interface{}{ + "_key": "jan", + "name": "Jan2", + }, + } + + var raw []byte + ctx := driver.WithRawResponse(nil, &raw) + stats, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + OnDuplicate: driver.ImportOnDuplicateIgnore, + }) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 1 { + t.Errorf("Expected %d created documents, got %d (json %s)", 1, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 0 { + t.Errorf("Expected %d error documents, got %d (json %s)", 0, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + if stats.Updated != 0 { + t.Errorf("Expected %d updated documents, got %d (json %s)", 0, stats.Updated, formatRawResponse(raw)) + } + if stats.Ignored != 1 { + t.Errorf("Expected %d ignored documents, got %d (json %s)", 1, stats.Ignored, formatRawResponse(raw)) + } + + var user UserDocWithKey + if _, err := col.ReadDocument(nil, "jan", &user); err != nil { + t.Errorf("ReadDocument failed: %s", describe(err)) + } else { + if user.Name != "Jan" { + t.Errorf("Expected Name to be 'Jan', got '%s'", user.Name) + } + if user.Age != 40 { + t.Errorf("Expected Age to be 40, got %d", user.Age) + } + } + } +} + +// TestImportDocumentsDetails imports documents and then checks that it exists. +func TestImportDocumentsDetails(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "import_details_test", nil, t) + docs := []interface{}{ + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + map[string]interface{}{ + "_key": "jan", + "name": "Jan2", + }, + } + + var raw []byte + var details []string + ctx := driver.WithImportDetails(driver.WithRawResponse(nil, &raw), &details) + stats, err := col.ImportDocuments(ctx, docs, nil) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 1 { + t.Errorf("Expected %d created documents, got %d (json %s)", 1, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 1 { + t.Errorf("Expected %d error documents, got %d (json %s)", 1, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + if stats.Updated != 0 { + t.Errorf("Expected %d updated documents, got %d (json %s)", 0, stats.Updated, formatRawResponse(raw)) + } + if stats.Ignored != 0 { + t.Errorf("Expected %d ignored documents, got %d (json %s)", 0, stats.Ignored, formatRawResponse(raw)) + } + + detailsExpected := `at position 1: creating document failed with error 'unique constraint violated', offending document: {"_key":"jan","name":"Jan2"}` + if len(details) != 1 { + t.Errorf("Expected 1 details, to %d", len(details)) + } else if details[0] != detailsExpected { + t.Errorf("Expected details[0] to be '%s', got '%s'", detailsExpected, details[0]) + } + } +} + +// TestImportDocumentsOverwriteYes imports documents and then checks that it exists. +func TestImportDocumentsOverwriteYes(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "import_overwriteYes_test", nil, t) + docs := []interface{}{ + &UserDoc{ + "Jan", + 40, + }, + map[string]interface{}{ + "name": "Jan2", + }, + } + + for i := 0; i < 3; i++ { + var raw []byte + var details []string + ctx := driver.WithImportDetails(driver.WithRawResponse(nil, &raw), &details) + stats, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + Overwrite: true, + }) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 2 { + t.Errorf("Expected %d created documents, got %d (json %s)", 2, stats.Created, formatRawResponse(raw)) + } + } + + countExpected := int64(2) + if count, err := col.Count(nil); err != nil { + t.Errorf("Failed to count documents: %s", describe(err)) + } else if count != countExpected { + t.Errorf("Expected count to be %d in round %d, got %d", countExpected, i, count) + } + } +} + +// TestImportDocumentsOverwriteNo imports documents and then checks that it exists. +func TestImportDocumentsOverwriteNo(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "import_overwriteNo_test", nil, t) + docs := []interface{}{ + &UserDoc{ + "Jan", + 40, + }, + map[string]interface{}{ + "name": "Jan2", + }, + } + + for i := 0; i < 3; i++ { + var raw []byte + var details []string + ctx := driver.WithImportDetails(driver.WithRawResponse(nil, &raw), &details) + stats, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + Overwrite: false, + }) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 2 { + t.Errorf("Expected %d created documents, got %d (json %s)", 2, stats.Created, formatRawResponse(raw)) + } + } + + countExpected := int64(2 * (i + 1)) + if count, err := col.Count(nil); err != nil { + t.Errorf("Failed to count documents: %s", describe(err)) + } else if count != countExpected { + t.Errorf("Expected count to be %d in round %d, got %d", countExpected, i, count) + } + } +} diff --git a/deps/github.com/arangodb/go-driver/test/documents_remove_test.go b/deps/github.com/arangodb/go-driver/test/documents_remove_test.go new file mode 100644 index 000000000..c32391817 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/documents_remove_test.go @@ -0,0 +1,229 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "reflect" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestReplaceDocuments creates documents, removes them and then checks the removal has succeeded. +func TestRemoveDocuments(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "documents_test", nil, t) + docs := []UserDoc{ + UserDoc{ + "Piere", + 23, + }, + } + metas, errs, err := col.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + if _, _, err := col.RemoveDocuments(ctx, metas.Keys()); err != nil { + t.Fatalf("Failed to remove documents: %s", describe(err)) + } + // Should not longer exist + for i, meta := range metas { + var readDoc Account + if _, err := col.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Fatalf("Expected NotFoundError at %d, got %s", i, describe(err)) + } + } +} + +// TestRemoveDocumentsReturnOld creates documents, removes them checks the ReturnOld value. +func TestRemoveDocumentsReturnOld(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "documents_test", nil, t) + docs := []UserDoc{ + UserDoc{ + "Tim", + 27, + }, + UserDoc{ + "Tom", + 27, + }, + UserDoc{ + "Tam", + 27, + }, + UserDoc{ + "Tum", + 27, + }, + } + metas, errs, err := col.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + oldDocs := make([]UserDoc, len(docs)) + ctx = driver.WithReturnOld(ctx, oldDocs) + if _, _, err := col.RemoveDocuments(ctx, metas.Keys()); err != nil { + t.Fatalf("Failed to remove documents: %s", describe(err)) + } + // Check old documents + for i, doc := range docs { + if !reflect.DeepEqual(doc, oldDocs[i]) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, doc, oldDocs[i]) + } + // Should not longer exist + var readDoc Account + if _, err := col.ReadDocument(ctx, metas[i].Key, &readDoc); !driver.IsNotFound(err) { + t.Fatalf("Expected NotFoundError at %d, got %s", i, describe(err)) + } + } +} + +// TestRemoveDocumentsSilent creates documents, removes them with Silent() and then checks the meta is indeed empty. +func TestRemoveDocumentsSilent(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "documents_test", nil, t) + docs := []UserDoc{ + UserDoc{ + "Angela", + 91, + }, + UserDoc{ + "Tommy", + 19, + }, + } + metas, errs, err := col.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + ctx = driver.WithSilent(ctx) + if rmetas, rerrs, err := col.RemoveDocuments(ctx, metas.Keys()); err != nil { + t.Fatalf("Failed to remove documents: %s", describe(err)) + } else { + if len(rmetas) > 0 { + t.Errorf("Expected empty metas, got %d", len(rmetas)) + } + if len(rerrs) > 0 { + t.Errorf("Expected empty errors, got %d", len(rerrs)) + } + } + // Should not longer exist + for i, meta := range metas { + var readDoc Account + if _, err := col.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Errorf("Expected NotFoundError at %d, got %s", i, describe(err)) + } + } +} + +// TestRemoveDocumentsRevision creates documents, removes them with an incorrect revisions. +func TestRemoveDocumentsRevision(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "documents_test", nil, t) + docs := []UserDoc{ + UserDoc{ + "DryLake", + 91, + }, + UserDoc{ + "DryBed", + 91, + }, + } + metas, errs, err := col.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + + // Replace the documents to get another revision + replacements := []Book{ + Book{ + Title: "Jungle book", + }, + Book{ + Title: "Another book", + }, + } + metas2, errs2, err := col.ReplaceDocuments(ctx, metas.Keys(), replacements) + if err != nil { + t.Fatalf("Failed to replace documents: %s", describe(err)) + } else if err := errs2.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + + // Try to remove documents with initial revision (must fail) + initialRevCtx := driver.WithRevisions(ctx, metas.Revs()) + if _, errs, err := col.RemoveDocuments(initialRevCtx, metas.Keys()); err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } else { + for i, err := range errs { + if !driver.IsPreconditionFailed(err) { + t.Errorf("Expected PreconditionFailedError at %d, got %s", i, describe(err)) + } + } + } + + // Try to remove documents with correct revision (must succeed) + replacedRevCtx := driver.WithRevisions(ctx, metas2.Revs()) + if _, errs, err := col.RemoveDocuments(replacedRevCtx, metas.Keys()); err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + + // Should not longer exist + for i, meta := range metas { + var readDoc Account + if _, err := col.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Errorf("Expected NotFoundError at %d, got %s", i, describe(err)) + } + } +} + +// TestRemoveDocumentsKeyEmpty removes a document it with an empty key. +func TestRemoveDocumentsKeyEmpty(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "documents_test", nil, t) + if _, _, err := col.RemoveDocuments(nil, []string{""}); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/documents_replace_test.go b/deps/github.com/arangodb/go-driver/test/documents_replace_test.go new file mode 100644 index 000000000..2240a9eb4 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/documents_replace_test.go @@ -0,0 +1,331 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "reflect" + "strings" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestReplaceDocuments creates documents, replaces them and then checks the replacements have succeeded. +func TestReplaceDocuments(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "documents_test", nil, t) + docs := []UserDoc{ + UserDoc{ + "Piere", + 23, + }, + UserDoc{ + "Pioter", + 45, + }, + } + metas, errs, err := col.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Replacement docs + replacements := []Account{ + Account{ + ID: "foo", + User: &UserDoc{}, + }, + Account{ + ID: "foo2", + User: &UserDoc{}, + }, + } + if _, _, err := col.ReplaceDocuments(ctx, metas.Keys(), replacements); err != nil { + t.Fatalf("Failed to replace documents: %s", describe(err)) + } + // Read replaced documents + for i, meta := range metas { + var readDoc Account + if _, err := col.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + if !reflect.DeepEqual(replacements[i], readDoc) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, replacements[i], readDoc) + } + } +} + +// TestReplaceDocumentsReturnOld creates documents, replaces them checks the ReturnOld values. +func TestReplaceDocumentsReturnOld(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "documents_test", nil, t) + docs := []UserDoc{ + UserDoc{ + "Tim", + 27, + }, + UserDoc{ + "George", + 32, + }, + } + metas, errs, err := col.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Replace documents + replacements := []Book{ + Book{ + Title: "Golang 1.8", + }, + Book{ + Title: "Dart 1.0", + }, + } + oldDocs := make([]UserDoc, len(docs)) + ctx = driver.WithReturnOld(ctx, oldDocs) + if _, _, err := col.ReplaceDocuments(ctx, metas.Keys(), replacements); err != nil { + t.Fatalf("Failed to replace documents: %s", describe(err)) + } + // Check old document + for i, doc := range docs { + if !reflect.DeepEqual(doc, oldDocs[i]) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, doc, oldDocs[i]) + } + } +} + +// TestReplaceDocumentsReturnNew creates documents, replaces them checks the ReturnNew values. +func TestReplaceDocumentsReturnNew(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "documents_test", nil, t) + docs := []UserDoc{ + UserDoc{ + "Tim", + 27, + }, + UserDoc{ + "Anna", + 27, + }, + } + metas, errs, err := col.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Replace documents + replacements := []Book{ + Book{ + Title: "Golang 1.8", + }, + Book{ + Title: "C++ made easy", + }, + } + newDocs := make([]Book, len(docs)) + ctx = driver.WithReturnNew(ctx, newDocs) + if _, _, err := col.ReplaceDocuments(ctx, metas.Keys(), replacements); err != nil { + t.Fatalf("Failed to replace documents: %s", describe(err)) + } + // Check new documents + for i, replacement := range replacements { + expected := replacement + if !reflect.DeepEqual(expected, newDocs[i]) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, expected, newDocs[i]) + } + } +} + +// TestReplaceDocumentsSilent creates documents, replaces them with Silent() and then checks the meta is indeed empty. +func TestReplaceDocumentsSilent(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "documents_test", nil, t) + docs := []UserDoc{ + UserDoc{ + "Angela", + 91, + }, + UserDoc{ + "Fiona", + 12, + }, + UserDoc{ + "Roos", + 54, + }, + } + metas, errs, err := col.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Replace documents + replacements := []Book{ + Book{ + Title: "Jungle book", + }, + Book{ + Title: "Database book", + }, + Book{ + Title: "Raft book", + }, + } + ctx = driver.WithSilent(ctx) + if metas, errs, err := col.ReplaceDocuments(ctx, metas.Keys(), replacements); err != nil { + t.Fatalf("Failed to replace documents: %s", describe(err)) + } else { + if len(errs) > 0 { + t.Errorf("Expected 0 errors, got %d", len(errs)) + } + if len(metas) > 0 { + t.Errorf("Expected 0 metas, got %d", len(metas)) + } + } +} + +// TestReplaceDocumentsRevision creates documents, replaces then with a specific (correct) revisions. +// Then it attempts replacements with incorrect revisions which must fail. +func TestReplaceDocumentsRevision(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "documents_test", nil, t) + docs := []UserDoc{ + UserDoc{ + "Revision", + 33, + }, + UserDoc{ + "Other revision", + 33, + }, + } + metas, errs, err := col.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + + // Replace documents with correct revisions + replacements := []Book{ + Book{ + Title: "Jungle book", + }, + Book{ + Title: "Portable book", + }, + } + initialRevCtx := driver.WithRevisions(ctx, metas.Revs()) + var replacedRevCtx context.Context + if metas2, errs, err := col.ReplaceDocuments(initialRevCtx, metas.Keys(), replacements); err != nil { + t.Fatalf("Failed to replace documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } else { + replacedRevCtx = driver.WithRevisions(ctx, metas2.Revs()) + if strings.Join(metas2.Revs(), ",") == strings.Join(metas.Revs(), ",") { + t.Errorf("Expected revisions to change, got initial revisions '%s', replaced revisions '%s'", strings.Join(metas.Revs(), ","), strings.Join(metas2.Revs(), ",")) + } + } + + // Replace documents with incorrect revision + replacements[0].Title = "Wrong deal 1" + replacements[1].Title = "Wrong deal 2" + if _, errs, err := col.ReplaceDocuments(initialRevCtx, metas.Keys(), replacements); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } else { + for i, err := range errs { + if !driver.IsPreconditionFailed(err) { + t.Errorf("Expected PreconditionFailedError at %d, got %s", i, describe(err)) + } + } + } + + // Replace document once more with correct revision + replacements[0].Title = "Good deal 1" + replacements[1].Title = "Good deal 2" + if _, errs, err := col.ReplaceDocuments(replacedRevCtx, metas.Keys(), replacements); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } +} + +// TestReplaceDocumentsKeyEmpty replaces a document it with an empty key. +func TestReplaceDocumentsKeyEmpty(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "documents_test", nil, t) + // Replacement document + replacement := map[string]interface{}{ + "name": "Updated", + } + if _, _, err := col.ReplaceDocuments(nil, []string{""}, replacement); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestReplaceDocumentsUpdateNil replaces a document it with a nil update. +func TestReplaceDocumentsUpdateNil(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "documents_test", nil, t) + if _, _, err := col.ReplaceDocuments(nil, []string{"validKey"}, nil); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestReplaceDocumentsUpdateLenDiff replacements documents with a different number of documents, keys. +func TestReplaceDocumentsUpdateLenDiff(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "documents_test", nil, t) + replacements := []map[string]interface{}{ + map[string]interface{}{ + "name": "name1", + }, + map[string]interface{}{ + "name": "name2", + }, + } + if _, _, err := col.ReplaceDocuments(nil, []string{"only1"}, replacements); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/documents_update_test.go b/deps/github.com/arangodb/go-driver/test/documents_update_test.go new file mode 100644 index 000000000..f601b3758 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/documents_update_test.go @@ -0,0 +1,454 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "fmt" + "reflect" + "strings" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestUpdateDocuments1 creates documents, updates them and then checks the updates have succeeded. +func TestUpdateDocuments1(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "documents_test", nil, t) + docs := []UserDoc{ + UserDoc{ + "Piere", + 23, + }, + UserDoc{ + "Otto", + 43, + }, + } + metas, errs, err := col.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Update documents + updates := []map[string]interface{}{ + map[string]interface{}{ + "name": "Updated1", + }, + map[string]interface{}{ + "name": "Updated2", + }, + } + if _, _, err := col.UpdateDocuments(ctx, metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } + // Read updated documents + for i, meta := range metas { + var readDoc UserDoc + if _, err := col.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + doc := docs[i] + doc.Name = fmt.Sprintf("Updated%d", i+1) + if !reflect.DeepEqual(doc, readDoc) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, doc, readDoc) + } + } +} + +// TestUpdateDocumentsReturnOld creates documents, updates them checks the ReturnOld values. +func TestUpdateDocumentsReturnOld(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "documents_test", nil, t) + docs := []UserDoc{ + UserDoc{ + "Tim", + 27, + }, + UserDoc{ + "Foo", + 70, + }, + UserDoc{ + "Mindy", + 70, + }, + } + metas, errs, err := col.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Update documents + updates := []map[string]interface{}{ + map[string]interface{}{ + "name": "Updated1", + }, + map[string]interface{}{ + "name": "Updated2", + }, + map[string]interface{}{ + "name": "Updated3", + }, + } + oldDocs := make([]UserDoc, len(docs)) + ctx = driver.WithReturnOld(ctx, oldDocs) + if _, _, err := col.UpdateDocuments(ctx, metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } + // Check old documents + for i, doc := range docs { + if !reflect.DeepEqual(doc, oldDocs[i]) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, doc, oldDocs[i]) + } + } +} + +// TestUpdateDocumentsReturnNew creates documents, updates them checks the ReturnNew values. +func TestUpdateDocumentsReturnNew(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "documents_test", nil, t) + docs := []UserDoc{ + UserDoc{ + "Tim", + 27, + }, + UserDoc{ + "Duck", + 21, + }, + UserDoc{ + "Donald", + 53, + }, + } + metas, errs, err := col.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Update documents + updates := []map[string]interface{}{ + map[string]interface{}{ + "name": "Updated1", + }, + map[string]interface{}{ + "name": "Updated2", + }, + map[string]interface{}{ + "name": "Updated3", + }, + } + newDocs := make([]UserDoc, len(docs)) + ctx = driver.WithReturnNew(ctx, newDocs) + if _, _, err := col.UpdateDocuments(ctx, metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } + // Check new documents + for i, doc := range docs { + expected := doc + expected.Name = fmt.Sprintf("Updated%d", i+1) + if !reflect.DeepEqual(expected, newDocs[i]) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, expected, newDocs[i]) + } + } +} + +// TestUpdateDocumentsKeepNullTrue creates documents, updates them with KeepNull(true) and then checks the updates have succeeded. +func TestUpdateDocumentsKeepNullTrue(t *testing.T) { + ctx := context.Background() + var conn driver.Connection + c := createClientFromEnv(t, true, &conn) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "documents_test", nil, t) + docs := []Account{ + Account{ + ID: "1234", + User: &UserDoc{ + "Mathilda", + 45, + }, + }, + Account{ + ID: "432", + User: &UserDoc{ + "Clair", + 12, + }, + }, + } + metas, errs, err := col.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + + // Update documents + updates := []map[string]interface{}{ + map[string]interface{}{ + "id": "5678", + "user": nil, + }, + map[string]interface{}{ + "id": "742", + "user": nil, + }, + } + if _, _, err := col.UpdateDocuments(driver.WithKeepNull(ctx, true), metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } + // Read updated documents + for i, meta := range metas { + var readDoc map[string]interface{} + var rawResponse []byte + ctx = driver.WithRawResponse(ctx, &rawResponse) + if _, err := col.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document %d '%s': %s", i, meta.Key, describe(err)) + } + // We parse to this type of map, since unmarshalling nil values to a map of type map[string]interface{} + // will cause the entry to be deleted. + var jsonMap map[string]*driver.RawObject + if err := conn.Unmarshal(rawResponse, &jsonMap); err != nil { + t.Fatalf("Failed to parse raw response: %s", describe(err)) + } + if raw, found := jsonMap["user"]; !found { + t.Errorf("Expected user to be found but got not found") + } else if raw != nil { + t.Errorf("Expected user to be found and nil, got %s", string(*raw)) + } + } +} + +// TestUpdateDocumentsKeepNullFalse creates documents, updates them with KeepNull(false) and then checks the updates have succeeded. +func TestUpdateDocumentsKeepNullFalse(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "documents_test", nil, t) + docs := []Account{ + Account{ + ID: "1234", + User: &UserDoc{ + "Mathilda", + 45, + }, + }, + Account{ + ID: "364", + User: &UserDoc{ + "Jo", + 42, + }, + }, + } + metas, errs, err := col.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Update document + updates := []map[string]interface{}{ + map[string]interface{}{ + "id": "5678", + "user": nil, + }, + map[string]interface{}{ + "id": "753", + "user": nil, + }, + } + if _, _, err := col.UpdateDocuments(driver.WithKeepNull(ctx, false), metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } + // Read updated documents + for i, meta := range metas { + readDoc := docs[i] + if _, err := col.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + if readDoc.User == nil { + t.Errorf("Expected user to be untouched, got %v", readDoc.User) + } + } +} + +// TestUpdateDocumentsSilent creates documents, updates them with Silent() and then checks the metas are indeed empty. +func TestUpdateDocumentsSilent(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "documents_test", nil, t) + docs := []UserDoc{ + UserDoc{ + "Angela", + 91, + }, + UserDoc{ + "Jo", + 19, + }, + } + metas, _, err := col.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } + // Update documents + updates := []map[string]interface{}{ + map[string]interface{}{ + "age": "61", + }, + map[string]interface{}{ + "age": "16", + }, + } + ctx = driver.WithSilent(ctx) + if metas, errs, err := col.UpdateDocuments(ctx, metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } else if strings.Join(metas.Keys(), "") != "" { + t.Errorf("Expected empty meta, got %v", metas) + } +} + +// TestUpdateDocumentsRevision creates documents, updates them with a specific (correct) revisions. +// Then it attempts an update with an incorrect revisions which must fail. +func TestUpdateDocumentsRevision(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "document_test", nil, t) + col := ensureCollection(ctx, db, "documents_test", nil, t) + docs := []UserDoc{ + UserDoc{ + "Revision", + 33, + }, + UserDoc{ + "Revision2", + 34, + }, + } + metas, errs, err := col.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } else if len(metas) != len(docs) { + t.Fatalf("Expected %d metas, got %d", len(docs), len(metas)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + + // Update documents with correct revisions + updates := []map[string]interface{}{ + map[string]interface{}{ + "age": 34, + }, + map[string]interface{}{ + "age": 77, + }, + } + initialRevCtx := driver.WithRevisions(ctx, metas.Revs()) + var updatedRevCtx context.Context + if metas2, _, err := col.UpdateDocuments(initialRevCtx, metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } else { + updatedRevCtx = driver.WithRevisions(ctx, metas2.Revs()) + if strings.Join(metas2.Revs(), ",") == strings.Join(metas.Revs(), ",") { + t.Errorf("Expected revision to change, got initial revision '%s', updated revision '%s'", strings.Join(metas.Revs(), ","), strings.Join(metas2.Revs(), ",")) + } + } + + // Update documents with incorrect revisions + updates[0]["age"] = 35 + var rawResponse []byte + if _, errs, err := col.UpdateDocuments(driver.WithRawResponse(initialRevCtx, &rawResponse), metas.Keys(), updates); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } else { + for _, err := range errs { + if !driver.IsPreconditionFailed(err) { + t.Errorf("Expected PreconditionFailedError, got %s (resp: %s", describe(err), string(rawResponse)) + } + } + } + + // Update documents once more with correct revisions + updates[0]["age"] = 36 + if _, _, err := col.UpdateDocuments(updatedRevCtx, metas.Keys(), updates); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } +} + +// TestUpdateDocumentsKeyEmpty updates documents with an empty key. +func TestUpdateDocumentsKeyEmpty(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "documents_test", nil, t) + // Update document + updates := []map[string]interface{}{ + map[string]interface{}{ + "name": "Updated", + }, + } + if _, _, err := col.UpdateDocuments(nil, []string{""}, updates); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestUpdateDocumentsUpdateNil updates documents it with a nil update. +func TestUpdateDocumentsUpdateNil(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "documents_test", nil, t) + if _, _, err := col.UpdateDocuments(nil, []string{"validKey"}, nil); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestUpdateDocumentsUpdateLenDiff updates documents with a different number of updates, keys. +func TestUpdateDocumentsUpdateLenDiff(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "document_test", nil, t) + col := ensureCollection(nil, db, "documents_test", nil, t) + updates := []map[string]interface{}{ + map[string]interface{}{ + "name": "name1", + }, + map[string]interface{}{ + "name": "name2", + }, + } + if _, _, err := col.UpdateDocuments(nil, []string{"only1"}, updates); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/edge_collection_test.go b/deps/github.com/arangodb/go-driver/test/edge_collection_test.go new file mode 100644 index 000000000..96056975b --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/edge_collection_test.go @@ -0,0 +1,208 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "strings" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// ensureEdgeCollection returns the edge collection with given name, creating it if needed. +func ensureEdgeCollection(ctx context.Context, g driver.Graph, collection string, from, to []string, t *testing.T) driver.Collection { + ec, _, err := g.EdgeCollection(ctx, collection) + if driver.IsNotFound(err) { + ec, err := g.CreateEdgeCollection(ctx, collection, driver.VertexConstraints{From: from, To: to}) + if err != nil { + t.Fatalf("Failed to create edge collection: %s", describe(err)) + } + return ec + } else if err != nil { + t.Fatalf("Failed to open edge collection: %s", describe(err)) + } + return ec +} + +// TestCreateEdgeCollection creates a graph and then adds an edge collection in it +func TestCreateEdgeCollection(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "edge_collection_test", nil, t) + name := "test_create_edge_collection" + g, err := db.CreateGraph(nil, name, nil) + if err != nil { + t.Fatalf("Failed to create graph '%s': %s", name, describe(err)) + } + + // List edge collections, must be empty + if list, _, err := g.EdgeCollections(nil); err != nil { + t.Errorf("EdgeCollections failed: %s", describe(err)) + } else if len(list) > 0 { + t.Errorf("EdgeCollections return %d edge collections, expected 0", len(list)) + } + + // Now create an edge collection + colName := "create_edge_collection_friends" + if ec, err := g.CreateEdgeCollection(nil, colName, driver.VertexConstraints{From: []string{"person"}, To: []string{"person"}}); err != nil { + t.Errorf("CreateEdgeCollection failed: %s", describe(err)) + } else if ec.Name() != colName { + t.Errorf("Invalid name, expected '%s', got '%s'", colName, ec.Name()) + } + + assertCollection(nil, db, colName, t) + assertCollection(nil, db, "person", t) + + // List edge collections, must be contain 'friends' + if list, constraints, err := g.EdgeCollections(nil); err != nil { + t.Errorf("EdgeCollections failed: %s", describe(err)) + } else { + if len(list) != 1 { + t.Errorf("EdgeCollections return %d edge collections, expected 1", len(list)) + } else if list[0].Name() != colName { + t.Errorf("Invalid list[0].name, expected '%s', got '%s'", colName, list[0].Name()) + } + if len(constraints) != 1 { + t.Errorf("EdgeCollections return %d constraints, expected 1", len(constraints)) + } else { + if strings.Join(constraints[0].From, ",") != "person" { + t.Errorf("Invalid constraints[0].From, expected ['person'], got %q", constraints[0].From) + } + if strings.Join(constraints[0].To, ",") != "person" { + t.Errorf("Invalid constraints[0].From, expected ['person'], got %q", constraints[0].To) + } + } + } + + // Friends edge collection must exits + if found, err := g.EdgeCollectionExists(nil, colName); err != nil { + t.Errorf("EdgeCollectionExists failed: %s", describe(err)) + } else if !found { + t.Errorf("EdgeCollectionExists return false, expected true") + } + + // Open friends edge collection must exits + if ec, _, err := g.EdgeCollection(nil, colName); err != nil { + t.Errorf("EdgeCollection failed: %s", describe(err)) + } else if ec.Name() != colName { + t.Errorf("EdgeCollection return invalid collection, expected '%s', got '%s'", colName, ec.Name()) + } +} + +// TestRemoveEdgeCollection creates a graph and then adds an edge collection in it and then removes the edge collection. +func TestRemoveEdgeCollection(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "edge_collection_test", nil, t) + name := "test_remove_edge_collection" + g, err := db.CreateGraph(nil, name, nil) + if err != nil { + t.Fatalf("Failed to create graph '%s': %s", name, describe(err)) + } + + // Now create an edge collection + colName := "remove_edge_collection_friends" + ec, err := g.CreateEdgeCollection(nil, colName, driver.VertexConstraints{From: []string{"person"}, To: []string{"person"}}) + if err != nil { + t.Fatalf("CreateEdgeCollection failed: %s", describe(err)) + } else if ec.Name() != colName { + t.Errorf("Invalid name, expected '%s', got '%s'", colName, ec.Name()) + } + + // Friends edge collection must exits + if found, err := g.EdgeCollectionExists(nil, colName); err != nil { + t.Errorf("EdgeCollectionExists failed: %s", describe(err)) + } else if !found { + t.Errorf("EdgeCollectionExists return false, expected true") + } + + // Remove edge collection + if err := ec.Remove(nil); err != nil { + t.Errorf("Remove failed: %s", describe(err)) + } + + // Friends edge collection must NOT exits + if found, err := g.EdgeCollectionExists(nil, colName); err != nil { + t.Errorf("EdgeCollectionExists failed: %s", describe(err)) + } else if found { + t.Errorf("EdgeCollectionExists return true, expected false") + } + + // Collection must still exist in database + assertCollection(nil, db, colName, t) +} + +// TestSetVertexConstraints creates a graph and then adds an edge collection in it and then removes the edge collection. +func TestSetVertexConstraints(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "edge_collection_test", nil, t) + name := "set_vertex_constraints" + g, err := db.CreateGraph(nil, name, nil) + if err != nil { + t.Fatalf("Failed to create graph '%s': %s", name, describe(err)) + } + + // Now create an edge collection + colName := "set_vertex_constraints_collection" + ec, err := g.CreateEdgeCollection(nil, colName, driver.VertexConstraints{From: []string{"cola"}, To: []string{"colb"}}) + if err != nil { + t.Fatalf("CreateEdgeCollection failed: %s", describe(err)) + } else if ec.Name() != colName { + t.Errorf("Invalid name, expected '%s', got '%s'", colName, ec.Name()) + } + + // Edge collection must exits + if found, err := g.EdgeCollectionExists(nil, colName); err != nil { + t.Errorf("EdgeCollectionExists failed: %s", describe(err)) + } else if !found { + t.Errorf("EdgeCollectionExists return false, expected true") + } + + // Edge collection must have proper constraints + if _, constraints, err := g.EdgeCollection(nil, colName); err != nil { + t.Errorf("EdgeCollection failed: %s", describe(err)) + } else { + if strings.Join(constraints.From, ",") != "cola" { + t.Errorf("Invalid from constraints. Expected ['cola'], got %q", constraints.From) + } + if strings.Join(constraints.To, ",") != "colb" { + t.Errorf("Invalid to constraints. Expected ['colb'], got %q", constraints.To) + } + } + + // Modify constraints + if err := g.SetVertexConstraints(nil, colName, driver.VertexConstraints{From: []string{"colC"}, To: []string{"colD"}}); err != nil { + t.Errorf("SetVertexConstraints failed: %s", describe(err)) + } + + // Edge collection must have modified constraints + if _, constraints, err := g.EdgeCollection(nil, colName); err != nil { + t.Errorf("EdgeCollection failed: %s", describe(err)) + } else { + if strings.Join(constraints.From, ",") != "colC" { + t.Errorf("Invalid from constraints. Expected ['colC'], got %q", constraints.From) + } + if strings.Join(constraints.To, ",") != "colD" { + t.Errorf("Invalid to constraints. Expected ['colD'], got %q", constraints.To) + } + } +} diff --git a/deps/github.com/arangodb/go-driver/test/edge_create_test.go b/deps/github.com/arangodb/go-driver/test/edge_create_test.go new file mode 100644 index 000000000..8ebdf31a4 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/edge_create_test.go @@ -0,0 +1,176 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "reflect" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestCreateEdge creates an edge and then checks that it exists. +func TestCreateEdge(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "create_edge_test_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + meta, err := ec.CreateDocument(ctx, driver.EdgeDocument{From: from.ID, To: to.ID}) + if err != nil { + t.Fatalf("Failed to create new edge: %s", describe(err)) + } + // Document must exists now + if found, err := ec.DocumentExists(nil, meta.Key); err != nil { + t.Fatalf("DocumentExists failed for '%s': %s", meta.Key, describe(err)) + } else if !found { + t.Errorf("DocumentExists returned false for '%s', expected true", meta.Key) + } + // Read edge + var readDoc driver.EdgeDocument + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read edge '%s': %s", meta.Key, describe(err)) + } else { + if readDoc.From != from.ID { + t.Errorf("Got invalid _from. Expected '%s', got '%s'", from.ID, readDoc.From) + } + if readDoc.To != to.ID { + t.Errorf("Got invalid _to. Expected '%s', got '%s'", to.ID, readDoc.To) + } + } +} + +// TestCreateCustomEdge creates an edge with a custom type and then checks that it exists. +func TestCreateCustomEdge(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "create_custom_edge_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 7, + } + meta, err := ec.CreateDocument(nil, doc) + if err != nil { + t.Fatalf("Failed to create new edge: %s", describe(err)) + } + // Document must exists now + var readDoc RouteEdge + if _, err := ec.ReadDocument(nil, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read edge '%s': %s", meta.Key, describe(err)) + } else if !reflect.DeepEqual(doc, readDoc) { + t.Errorf("Got invalid return document. Expected '%+v', got '%+v'", doc, readDoc) + } +} + +// TestCreateEdgeReturnNew creates a document and checks the document returned in in ReturnNew. +func TestCreateEdgeReturnNew(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2363 + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "create_edge_return_new_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 7, + } + var newDoc RouteEdge + meta, err := ec.CreateDocument(driver.WithReturnNew(ctx, &newDoc), doc) + if err != nil { + t.Fatalf("Failed to create new edge: %s", describe(err)) + } + // NewDoc must equal doc + if !reflect.DeepEqual(doc, newDoc) { + t.Errorf("Got wrong ReturnNew document. Expected %+v, got %+v", doc, newDoc) + } + // Document must exists now + var readDoc RouteEdge + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + if !reflect.DeepEqual(doc, readDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", doc, readDoc) + } +} + +// TestCreateEdgeSilent creates a document with WithSilent. +func TestCreateEdgeSilent(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "create_edge_silent_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 7, + } + if meta, err := ec.CreateDocument(driver.WithSilent(ctx), doc); err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } else if meta.Key != "" { + t.Errorf("Expected empty meta, got %v", meta) + } +} + +// TestCreateEdgeNil creates a document with a nil document. +func TestCreateEdgeNil(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "create_edge_nil_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + + if _, err := ec.CreateDocument(nil, nil); !driver.IsInvalidArgument(err) { + t.Fatalf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/edge_remove_test.go b/deps/github.com/arangodb/go-driver/test/edge_remove_test.go new file mode 100644 index 000000000..e8af639c3 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/edge_remove_test.go @@ -0,0 +1,201 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestRemoveEdge creates a document, remove it and then checks the removal has succeeded. +func TestRemoveEdge(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "remove_edge_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 32, + } + meta, err := ec.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + if _, err := ec.RemoveDocument(ctx, meta.Key); err != nil { + t.Fatalf("Failed to remove document '%s': %s", meta.Key, describe(err)) + } + // Should not longer exist + var readDoc RouteEdge + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Fatalf("Expected NotFoundError, got %s", describe(err)) + } +} + +// TestRemoveEdgeReturnOld creates a document, removes it with ReturnOld, which is an invalid argument. +func TestRemoveEdgeReturnOld(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2363 + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "remove_edge_returnOld_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 32, + } + meta, err := ec.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + var old RouteEdge + ctx = driver.WithReturnOld(ctx, &old) + if _, err := ec.RemoveDocument(ctx, meta.Key); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestRemoveEdgeSilent creates a document, removes it with Silent() and then checks the meta is indeed empty. +func TestRemoveEdgeSilent(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "remove_edge_silent_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 77, + } + meta, err := ec.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + ctx = driver.WithSilent(ctx) + if rmeta, err := ec.RemoveDocument(ctx, meta.Key); err != nil { + t.Fatalf("Failed to remove document '%s': %s", meta.Key, describe(err)) + } else if rmeta.Key != "" { + t.Errorf("Expected empty meta, got %v", rmeta) + } + // Should not longer exist + var readDoc RouteEdge + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Fatalf("Expected NotFoundError, got %s", describe(err)) + } +} + +// TestRemoveEdgeRevision creates a document, removes it with an incorrect revision. +func TestRemoveEdgeRevision(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "remove_edge_revision_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 77, + } + meta, err := ec.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + + // Replace the document to get another revision + replacement := RouteEdge{ + From: to.ID.String(), + To: from.ID.String(), + Distance: 88, + } + meta2, err := ec.ReplaceDocument(ctx, meta.Key, replacement) + if err != nil { + t.Fatalf("Failed to replace document '%s': %s", meta.Key, describe(err)) + } + + // Try to remove document with initial revision (must fail) + initialRevCtx := driver.WithRevision(ctx, meta.Rev) + if _, err := ec.RemoveDocument(initialRevCtx, meta.Key); !driver.IsPreconditionFailed(err) { + t.Fatalf("Expected PreconditionFailedError, got %s", describe(err)) + } + + // Try to remove document with correct revision (must succeed) + replacedRevCtx := driver.WithRevision(ctx, meta2.Rev) + if _, err := ec.RemoveDocument(replacedRevCtx, meta.Key); err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + + // Should not longer exist + var readDoc RouteEdge + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Fatalf("Expected NotFoundError, got %s", describe(err)) + } + + // Document must not exists now + if found, err := ec.DocumentExists(nil, meta.Key); err != nil { + t.Fatalf("DocumentExists failed for '%s': %s", meta.Key, describe(err)) + } else if found { + t.Errorf("DocumentExists returned true for '%s', expected false", meta.Key) + } +} + +// TestRemoveEdgeKeyEmpty removes a document it with an empty key. +func TestRemoveEdgeKeyEmpty(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "remove_edge_nil_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + + if _, err := ec.RemoveDocument(nil, ""); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/edge_replace_test.go b/deps/github.com/arangodb/go-driver/test/edge_replace_test.go new file mode 100644 index 000000000..957e85660 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/edge_replace_test.go @@ -0,0 +1,275 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "reflect" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestReplaceEdge creates a document, replaces it and then checks the replacement has succeeded. +func TestReplaceEdge(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "replace_edge_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 123, + } + meta, err := ec.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Replacement doc + replacement := RouteEdge{ + From: to.ID.String(), + To: from.ID.String(), + Distance: 567, + } + if _, err := ec.ReplaceDocument(ctx, meta.Key, replacement); err != nil { + t.Fatalf("Failed to replace document '%s': %s", meta.Key, describe(err)) + } + // Read replaces document + var readDoc RouteEdge + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + if !reflect.DeepEqual(replacement, readDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", replacement, readDoc) + } +} + +// TestReplaceEdgeReturnOld creates a document, replaces it checks the ReturnOld value. +func TestReplaceEdgeReturnOld(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2363 + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "replace_edge_returnOld_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 123, + } + meta, err := ec.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Replace document + replacement := RouteEdge{ + From: to.ID.String(), + To: from.ID.String(), + Distance: 246, + } + var old RouteEdge + ctx = driver.WithReturnOld(ctx, &old) + if _, err := ec.ReplaceDocument(ctx, meta.Key, replacement); err != nil { + t.Fatalf("Failed to replace document '%s': %s", meta.Key, describe(err)) + } + // Check old document + if !reflect.DeepEqual(doc, old) { + t.Errorf("Got wrong document. Expected %+v, got %+v", doc, old) + } +} + +// TestReplaceEdgeReturnNew creates a document, replaces it checks the ReturnNew value. +func TestReplaceEdgeReturnNew(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2363 + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "replace_edge_returnNew_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 123, + } + meta, err := ec.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + replacement := RouteEdge{ + From: to.ID.String(), + To: from.ID.String(), + Distance: 246, + } + var newDoc RouteEdge + ctx = driver.WithReturnNew(ctx, &newDoc) + if _, err := ec.ReplaceDocument(ctx, meta.Key, replacement); err != nil { + t.Fatalf("Failed to replace document '%s': %s", meta.Key, describe(err)) + } + // Check new document + expected := replacement + if !reflect.DeepEqual(expected, newDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", expected, newDoc) + } +} + +// TestReplaceEdgeSilent creates a document, replaces it with Silent() and then checks the meta is indeed empty. +func TestReplaceEdgeSilent(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "replace_edge_returnNew_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 0, + } + meta, err := ec.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + replacement := RouteEdge{ + From: to.ID.String(), + To: from.ID.String(), + Distance: -1, + } + ctx = driver.WithSilent(ctx) + if meta, err := ec.ReplaceDocument(ctx, meta.Key, replacement); err != nil { + t.Fatalf("Failed to replace document '%s': %s", meta.Key, describe(err)) + } else if meta.Key != "" { + t.Errorf("Expected empty meta, got %v", meta) + } +} + +// TestReplaceEdgeRevision creates a document, replaces it with a specific (correct) revision. +// Then it attempts a replacement with an incorrect revision which must fail. +func TestReplaceEdgeRevision(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "replace_edge_revision_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 0, + } + meta, err := ec.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + + // Replace document with correct revision + replacement := RouteEdge{ + From: to.ID.String(), + To: from.ID.String(), + Distance: -1, + } + initialRevCtx := driver.WithRevision(ctx, meta.Rev) + var replacedRevCtx context.Context + if meta2, err := ec.ReplaceDocument(initialRevCtx, meta.Key, replacement); err != nil { + t.Fatalf("Failed to replace document '%s': %s", meta.Key, describe(err)) + } else { + replacedRevCtx = driver.WithRevision(ctx, meta2.Rev) + if meta2.Rev == meta.Rev { + t.Errorf("Expected revision to change, got initial revision '%s', replaced revision '%s'", meta.Rev, meta2.Rev) + } + } + + // Replace document with incorrect revision + replacement.Distance = 999 + if _, err := ec.ReplaceDocument(initialRevCtx, meta.Key, replacement); !driver.IsPreconditionFailed(err) { + t.Errorf("Expected PreconditionFailedError, got %s", describe(err)) + } + + // Replace document once more with correct revision + replacement.Distance = 111 + if _, err := ec.ReplaceDocument(replacedRevCtx, meta.Key, replacement); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } +} + +// TestReplaceEdgeKeyEmpty replaces a document it with an empty key. +func TestReplaceEdgeKeyEmpty(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "replace_edge_keyEmpty_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + + // Update document + replacement := map[string]interface{}{ + "name": "Updated", + } + if _, err := ec.ReplaceDocument(nil, "", replacement); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestReplaceEdgeUpdateNil replaces a document it with a nil update. +func TestReplaceEdgeUpdateNil(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "replace_edge_updateNil_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + + if _, err := ec.ReplaceDocument(nil, "validKey", nil); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/edge_update_test.go b/deps/github.com/arangodb/go-driver/test/edge_update_test.go new file mode 100644 index 000000000..9ac33b33b --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/edge_update_test.go @@ -0,0 +1,373 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "reflect" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestUpdateEdge creates a document, updates it and then checks the update has succeeded. +func TestUpdateEdge(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "update_edge_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 123, + } + meta, err := ec.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + update := map[string]interface{}{ + "distance": 555, + } + if _, err := ec.UpdateDocument(ctx, meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } + // Read updated document + var readDoc RouteEdge + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + doc.Distance = 555 + if !reflect.DeepEqual(doc, readDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", doc, readDoc) + } +} + +// TestUpdateEdgeReturnOld creates a document, updates it checks the ReturnOld value. +func TestUpdateEdgeReturnOld(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2363 + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "update_edge_returnOld_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 123, + } + meta, err := ec.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + update := map[string]interface{}{ + "distance": 333, + } + var old RouteEdge + ctx = driver.WithReturnOld(ctx, &old) + if _, err := ec.UpdateDocument(ctx, meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } + // Check old document + if !reflect.DeepEqual(doc, old) { + t.Errorf("Got wrong document. Expected %+v, got %+v", doc, old) + } +} + +// TestUpdateEdgeReturnNew creates a document, updates it checks the ReturnNew value. +func TestUpdateEdgeReturnNew(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2363 + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "update_edge_returnNew_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 123, + } + meta, err := ec.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + update := map[string]interface{}{ + "_from": to.ID.String(), + } + var newDoc RouteEdge + ctx = driver.WithReturnNew(ctx, &newDoc) + if _, err := ec.UpdateDocument(ctx, meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } + // Check new document + expected := doc + expected.From = to.ID.String() + if !reflect.DeepEqual(expected, newDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", expected, newDoc) + } +} + +// TestUpdateEdgeKeepNullTrue creates a document, updates it with KeepNull(true) and then checks the update has succeeded. +func TestUpdateEdgeKeepNullTrue(t *testing.T) { + var ctx context.Context + var conn driver.Connection + c := createClientFromEnv(t, true, &conn) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "update_edge_keepNullTrue_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := AccountEdge{ + From: from.ID.String(), + To: to.ID.String(), + User: &UserDoc{ + "Mathilda", + 45, + }, + } + meta, err := ec.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + update := map[string]interface{}{ + "_to": from.ID.String(), + "user": nil, + } + if _, err := ec.UpdateDocument(driver.WithKeepNull(ctx, true), meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } + // Read updated document + var readDoc map[string]interface{} + var rawResponse []byte + ctx = driver.WithRawResponse(ctx, &rawResponse) + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + // We parse to this type of map, since unmarshalling nil values to a map of type map[string]interface{} + // will cause the entry to be deleted. + var jsonMap map[string]*driver.RawObject + if err := conn.Unmarshal(rawResponse, &jsonMap); err != nil { + t.Fatalf("Failed to parse raw response: %s", describe(err)) + } + // Get "edge" field and unmarshal it + if raw, found := jsonMap["edge"]; !found { + t.Errorf("Expected edge to be found but got not found") + } else { + jsonMap = nil + if err := conn.Unmarshal(*raw, &jsonMap); err != nil { + t.Fatalf("Failed to parse raw edge object: %s", describe(err)) + } + if raw, found := jsonMap["user"]; !found { + t.Errorf("Expected user to be found but got not found") + } else if raw != nil { + t.Errorf("Expected user to be found and nil, got %s", string(*raw)) + } + } +} + +// TestUpdateEdgeKeepNullFalse creates a document, updates it with KeepNull(false) and then checks the update has succeeded. +func TestUpdateEdgeKeepNullFalse(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "update_edge_keepNullFalse_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := AccountEdge{ + From: from.ID.String(), + To: to.ID.String(), + User: &UserDoc{ + "Mathilda", + 45, + }, + } + meta, err := ec.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + update := map[string]interface{}{ + "_to": from.ID.String(), + "user": nil, + } + if _, err := ec.UpdateDocument(driver.WithKeepNull(ctx, false), meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } + // Read updated document + readDoc := doc + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + if readDoc.User == nil { + t.Errorf("Expected user to be untouched, got %v", readDoc.User) + } +} + +// TestUpdateEdgeSilent creates a document, updates it with Silent() and then checks the meta is indeed empty. +func TestUpdateEdgeSilent(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "update_edge_silent_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 7, + } + meta, err := ec.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + update := map[string]interface{}{ + "distance": 61, + } + ctx = driver.WithSilent(ctx) + if meta, err := ec.UpdateDocument(ctx, meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } else if meta.Key != "" { + t.Errorf("Expected empty meta, got %v", meta) + } +} + +// TestUpdateEdgeRevision creates a document, updates it with a specific (correct) revision. +// Then it attempts an update with an incorrect revision which must fail. +func TestUpdateEdgeRevision(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "update_edge_revision_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + doc := RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 7, + } + meta, err := ec.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + + // Update document with correct revision + update := map[string]interface{}{ + "distance": 34, + } + initialRevCtx := driver.WithRevision(ctx, meta.Rev) + var updatedRevCtx context.Context + if meta2, err := ec.UpdateDocument(initialRevCtx, meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } else { + updatedRevCtx = driver.WithRevision(ctx, meta2.Rev) + if meta2.Rev == meta.Rev { + t.Errorf("Expected revision to change, got initial revision '%s', updated revision '%s'", meta.Rev, meta2.Rev) + } + } + + // Update document with incorrect revision + update["distance"] = 35 + if _, err := ec.UpdateDocument(initialRevCtx, meta.Key, update); !driver.IsPreconditionFailed(err) { + t.Errorf("Expected PreconditionFailedError, got %s", describe(err)) + } + + // Update document once more with correct revision + update["distance"] = 36 + if _, err := ec.UpdateDocument(updatedRevCtx, meta.Key, update); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } +} + +// TestUpdateEdgeKeyEmpty updates a document it with an empty key. +func TestUpdateEdgeKeyEmpty(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "update_edge_keyEmpty_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + + // Update document + update := map[string]interface{}{ + "name": "Updated", + } + if _, err := ec.UpdateDocument(nil, "", update); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestUpdateEdgeUpdateNil updates a document it with a nil update. +func TestUpdateEdgeUpdateNil(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edge_test", nil, t) + prefix := "update_edge_updateNil_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + + if _, err := ec.UpdateDocument(nil, "validKey", nil); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/edges_create_test.go b/deps/github.com/arangodb/go-driver/test/edges_create_test.go new file mode 100644 index 000000000..2fab94d2a --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/edges_create_test.go @@ -0,0 +1,216 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "reflect" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestCreateEdges creates documents and then checks that it exists. +func TestCreateEdges(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "create_edges_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + docs := []RouteEdge{ + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 40, + }, + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 68, + }, + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 21, + }, + } + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if len(metas) != len(docs) { + t.Errorf("Expected %d metas, got %d", len(docs), len(metas)) + } else { + for i := 0; i < len(docs); i++ { + if err := errs[i]; err != nil { + t.Errorf("Expected no error at index %d, got %s", i, describe(err)) + } + + // Document must exists now + var readDoc RouteEdge + if _, err := ec.ReadDocument(nil, metas[i].Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", metas[i].Key, describe(err)) + } + if !reflect.DeepEqual(docs[i], readDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", docs[i], readDoc) + } + } + } +} + +// TestCreateEdgesReturnNew creates documents and checks the document returned in in ReturnNew. +func TestCreateEdgesReturnNew(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2363 + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "create_edges_returnNew_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + docs := []RouteEdge{ + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 40, + }, + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 68, + }, + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 21, + }, + } + newDocs := make([]RouteEdge, len(docs)) + metas, errs, err := ec.CreateDocuments(driver.WithReturnNew(ctx, newDocs), docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if len(metas) != len(docs) { + t.Errorf("Expected %d metas, got %d", len(docs), len(metas)) + } else { + for i := 0; i < len(docs); i++ { + if err := errs[i]; err != nil { + t.Errorf("Expected no error at index %d, got %s", i, describe(err)) + } + // NewDoc must equal doc + if !reflect.DeepEqual(docs[i], newDocs[i]) { + t.Errorf("Got wrong ReturnNew document. Expected %+v, got %+v", docs[i], newDocs[i]) + } + // Document must exists now + var readDoc RouteEdge + if _, err := ec.ReadDocument(ctx, metas[i].Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", metas[i].Key, describe(err)) + } + if !reflect.DeepEqual(docs[i], readDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", docs[i], readDoc) + } + } + } +} + +// TestCreateEdgesSilent creates documents with WithSilent. +func TestCreateEdgesSilent(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "create_edges_silent_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + docs := []RouteEdge{ + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 40, + }, + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 68, + }, + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 21, + }, + } + if metas, errs, err := ec.CreateDocuments(driver.WithSilent(ctx), docs); err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else { + if len(metas) != 0 { + t.Errorf("Expected 0 metas, got %d", len(metas)) + } + if len(errs) != 0 { + t.Errorf("Expected 0 errors, got %d", len(errs)) + } + } +} + +// TestCreateEdgesNil creates multiple documents with a nil documents input. +func TestCreateEdgesNil(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "create_edges_nil_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + if _, _, err := ec.CreateDocuments(nil, nil); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestCreateEdgesNonSlice creates multiple documents with a non-slice documents input. +func TestCreateEdgesNonSlice(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "create_edges_nonSlice_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + + var obj UserDoc + if _, _, err := ec.CreateDocuments(nil, &obj); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } + var m map[string]interface{} + if _, _, err := ec.CreateDocuments(nil, &m); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/edges_import_test.go b/deps/github.com/arangodb/go-driver/test/edges_import_test.go new file mode 100644 index 000000000..55c46f9b9 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/edges_import_test.go @@ -0,0 +1,742 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "fmt" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestImportEdgesWithKeys imports documents and then checks that it exists. +func TestImportEdgesWithKeys(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "import_edges_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + col := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + docs := []RouteEdgeWithKey{ + RouteEdgeWithKey{ + "edge1", + from.ID.String(), + to.ID.String(), + 40, + }, + RouteEdgeWithKey{ + "edge2", + from.ID.String(), + to.ID.String(), + 50, + }, + RouteEdgeWithKey{ + "edge3", + from.ID.String(), + to.ID.String(), + 60, + }, + } + + var raw []byte + ctx = driver.WithRawResponse(ctx, &raw) + stats, err := col.ImportDocuments(ctx, docs, nil) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != int64(len(docs)) { + t.Errorf("Expected %d created documents, got %d (json %s)", len(docs), stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 0 { + t.Errorf("Expected %d error documents, got %d (json %s)", 0, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + } +} + +// TestImportEdgesWithoutKeys imports documents and then checks that it exists. +func TestImportEdgesWithoutKeys(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "import_edges_withhoutKeys_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + col := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + docs := []RouteEdgeWithKey{ + RouteEdgeWithKey{ + "edge1", + from.ID.String(), + to.ID.String(), + 40, + }, + RouteEdgeWithKey{ + "edge2", + from.ID.String(), + to.ID.String(), + 50, + }, + RouteEdgeWithKey{ + "edge3", + from.ID.String(), + to.ID.String(), + 60, + }, + } + + var raw []byte + ctx = driver.WithRawResponse(ctx, &raw) + stats, err := col.ImportDocuments(ctx, docs, nil) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != int64(len(docs)) { + t.Errorf("Expected %d created documents, got %d (json %s)", len(docs), stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 0 { + t.Errorf("Expected %d error documents, got %d (json %s)", 0, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + } +} + +// TestImportEdgesEmptyEntries imports documents and then checks that it exists. +func TestImportEdgesEmptyEntries(t *testing.T) { + if getContentTypeFromEnv(t) == driver.ContentTypeVelocypack { + t.Skip("Not supported on vpack") + } + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "import_edges_emptyEntries_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + col := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + docs := []*RouteEdgeWithKey{ + &RouteEdgeWithKey{ + "edge1", + from.ID.String(), + to.ID.String(), + 40, + }, + &RouteEdgeWithKey{ + "edge2", + from.ID.String(), + to.ID.String(), + 50, + }, + nil, + &RouteEdgeWithKey{ + "edge3", + from.ID.String(), + to.ID.String(), + 60, + }, + } + + var raw []byte + ctx = driver.WithRawResponse(ctx, &raw) + stats, err := col.ImportDocuments(ctx, docs, nil) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != int64(len(docs))-1 { + t.Errorf("Expected %d created documents, got %d (json %s)", len(docs)-1, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 0 { + t.Errorf("Expected %d error documents, got %d (json %s)", 0, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 1 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 1, stats.Empty, formatRawResponse(raw)) + } + } +} + +// TestImportEdgesInvalidEntries imports documents and then checks that it exists. +func TestImportEdgesInvalidEntries(t *testing.T) { + if getContentTypeFromEnv(t) == driver.ContentTypeVelocypack { + t.Skip("Not supported on vpack") + } + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "import_edges_invalidEntries_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + col := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + docs := []interface{}{ + &RouteEdgeWithKey{ + "edge1", + from.ID.String(), + to.ID.String(), + 40, + }, + []string{"array", "is", "invalid"}, + &RouteEdgeWithKey{ + "edge2", + from.ID.String(), + to.ID.String(), + 50, + }, + "string is not valid", + nil, + &RouteEdgeWithKey{ + "edge3", + from.ID.String(), + to.ID.String(), + 60, + }, + } + + var raw []byte + ctx = driver.WithRawResponse(ctx, &raw) + stats, err := col.ImportDocuments(ctx, docs, nil) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != int64(len(docs))-3 { + t.Errorf("Expected %d created documents, got %d (json %s)", len(docs)-3, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 2 { + t.Errorf("Expected %d error documents, got %d (json %s)", 2, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 1 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 1, stats.Empty, formatRawResponse(raw)) + } + } +} + +// TestImportEdgesDuplicateEntries imports documents and then checks that it exists. +func TestImportEdgesDuplicateEntries(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "import_edges_duplicateEntries_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + col := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + docs := []*RouteEdgeWithKey{ + &RouteEdgeWithKey{ + "edge1", + from.ID.String(), + to.ID.String(), + 40, + }, + &RouteEdgeWithKey{ + "edge1", + from.ID.String(), + to.ID.String(), + 40, + }, + } + + var raw []byte + ctx = driver.WithRawResponse(ctx, &raw) + stats, err := col.ImportDocuments(ctx, docs, nil) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 1 { + t.Errorf("Expected %d created documents, got %d (json %s)", 1, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 1 { + t.Errorf("Expected %d error documents, got %d (json %s)", 1, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + if stats.Updated != 0 { + t.Errorf("Expected %d updated documents, got %d (json %s)", 0, stats.Updated, formatRawResponse(raw)) + } + if stats.Ignored != 0 { + t.Errorf("Expected %d ignored documents, got %d (json %s)", 0, stats.Ignored, formatRawResponse(raw)) + } + } +} + +// TestImportEdgesDuplicateEntriesComplete imports documents and then checks that it exists. +func TestImportEdgesDuplicateEntriesComplete(t *testing.T) { + if getContentTypeFromEnv(t) == driver.ContentTypeVelocypack { + t.Skip("Not supported on vpack") + } + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "import_edges_duplicateEntriesComplete_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + col := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + docs := []*RouteEdgeWithKey{ + &RouteEdgeWithKey{ + "edge1", + from.ID.String(), + to.ID.String(), + 40, + }, + &RouteEdgeWithKey{ + "edge1", + from.ID.String(), + to.ID.String(), + 40, + }, + nil, + } + + var raw []byte + ctx = driver.WithRawResponse(ctx, &raw) + if _, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + Complete: true, + }); !driver.IsConflict(err) { + t.Errorf("Expected ConflictError, got %s", describe(err)) + } +} + +// TestImportEdgesDuplicateEntriesUpdate imports documents and then checks that it exists. +func TestImportEdgesDuplicateEntriesUpdate(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "import_edges_duplicateEntriesUpdate_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + col := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + docs := []interface{}{ + &RouteEdgeWithKey{ + "edge1", + from.ID.String(), + to.ID.String(), + 40, + }, + map[string]interface{}{ + "_key": "edge1", + "_from": to.ID.String(), + "_to": from.ID.String(), + }, + } + + var raw []byte + ctx = driver.WithRawResponse(ctx, &raw) + stats, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + OnDuplicate: driver.ImportOnDuplicateUpdate, + }) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 1 { + t.Errorf("Expected %d created documents, got %d (json %s)", 1, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 0 { + t.Errorf("Expected %d error documents, got %d (json %s)", 0, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + if stats.Updated != 1 { + t.Errorf("Expected %d updated documents, got %d (json %s)", 1, stats.Updated, formatRawResponse(raw)) + } + if stats.Ignored != 0 { + t.Errorf("Expected %d ignored documents, got %d (json %s)", 0, stats.Ignored, formatRawResponse(raw)) + } + + var edge RouteEdgeWithKey + if _, err := col.ReadDocument(nil, "edge1", &edge); err != nil { + t.Errorf("ReadDocument failed: %s", describe(err)) + } else { + if edge.From != to.ID.String() { + t.Errorf("Expected From to be '%s', got '%s'", to, edge.From) + } + if edge.Distance != 40 { + t.Errorf("Expected Distance to be 40, got %d", edge.Distance) + } + } + } +} + +// TestImportEdgesDuplicateEntriesReplace imports documents and then checks that it exists. +func TestImportEdgesDuplicateEntriesReplace(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "import_edges_duplicateEntriesReplace_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + col := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + docs := []interface{}{ + &RouteEdgeWithKey{ + "edge1", + from.ID.String(), + to.ID.String(), + 40, + }, + map[string]interface{}{ + "_key": "edge1", + "_from": to.ID.String(), + "_to": from.ID.String(), + }, + } + + var raw []byte + ctx = driver.WithRawResponse(ctx, &raw) + stats, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + OnDuplicate: driver.ImportOnDuplicateReplace, + }) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 1 { + t.Errorf("Expected %d created documents, got %d (json %s)", 1, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 0 { + t.Errorf("Expected %d error documents, got %d (json %s)", 0, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + if stats.Updated != 1 { + t.Errorf("Expected %d updated documents, got %d (json %s)", 1, stats.Updated, formatRawResponse(raw)) + } + if stats.Ignored != 0 { + t.Errorf("Expected %d ignored documents, got %d (json %s)", 0, stats.Ignored, formatRawResponse(raw)) + } + + var edge RouteEdgeWithKey + if _, err := col.ReadDocument(nil, "edge1", &edge); err != nil { + t.Errorf("ReadDocument failed: %s", describe(err)) + } else { + if edge.From != to.ID.String() { + t.Errorf("Expected From to be '%s', got '%s'", to, edge.From) + } + if edge.Distance != 0 { + t.Errorf("Expected Distance to be 0, got %d", edge.Distance) + } + } + } +} + +// TestImportEdgesDuplicateEntriesIgnore imports documents and then checks that it exists. +func TestImportEdgesDuplicateEntriesIgnore(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "import_edges_duplicateEntriesIgnore_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + col := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + docs := []interface{}{ + &RouteEdgeWithKey{ + "edge1", + from.ID.String(), + to.ID.String(), + 40, + }, + map[string]interface{}{ + "_key": "edge1", + "_from": to.ID.String(), + "_to": from.ID.String(), + }, + } + + var raw []byte + ctx = driver.WithRawResponse(ctx, &raw) + stats, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + OnDuplicate: driver.ImportOnDuplicateIgnore, + }) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 1 { + t.Errorf("Expected %d created documents, got %d (json %s)", 1, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 0 { + t.Errorf("Expected %d error documents, got %d (json %s)", 0, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + if stats.Updated != 0 { + t.Errorf("Expected %d updated documents, got %d (json %s)", 0, stats.Updated, formatRawResponse(raw)) + } + if stats.Ignored != 1 { + t.Errorf("Expected %d ignored documents, got %d (json %s)", 1, stats.Ignored, formatRawResponse(raw)) + } + + var edge RouteEdgeWithKey + if _, err := col.ReadDocument(nil, "edge1", &edge); err != nil { + t.Errorf("ReadDocument failed: %s", describe(err)) + } else { + if edge.From != from.ID.String() { + t.Errorf("Expected From to be '%s', got '%s'", to, edge.From) + } + if edge.Distance != 40 { + t.Errorf("Expected Distance to be 0, got %d", edge.Distance) + } + } + } +} + +// TestImportEdgesDetails imports documents and then checks that it exists. +func TestImportEdgesDetails(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "import_edges_details_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + col := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"_key": "venlo", "name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"_key": "lb", "name": "Limburg"}, t) + + docs := []interface{}{ + &RouteEdgeWithKey{ + "edge1", + from.ID.String(), + to.ID.String(), + 40, + }, + map[string]interface{}{ + "_key": "edge1", + "_from": to.ID.String(), + "_to": from.ID.String(), + }, + } + + var raw []byte + var details []string + ctx = driver.WithImportDetails(driver.WithRawResponse(ctx, &raw), &details) + stats, err := col.ImportDocuments(ctx, docs, nil) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 1 { + t.Errorf("Expected %d created documents, got %d (json %s)", 1, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 1 { + t.Errorf("Expected %d error documents, got %d (json %s)", 1, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + if stats.Updated != 0 { + t.Errorf("Expected %d updated documents, got %d (json %s)", 0, stats.Updated, formatRawResponse(raw)) + } + if stats.Ignored != 0 { + t.Errorf("Expected %d ignored documents, got %d (json %s)", 0, stats.Ignored, formatRawResponse(raw)) + } + + detailsExpected := fmt.Sprintf(`at position 1: creating document failed with error 'unique constraint violated', offending document: {"_from":"%sstate/lb","_key":"edge1","_to":"%scity/venlo"}`, prefix, prefix) + if len(details) != 1 { + t.Errorf("Expected 1 details, to %d", len(details)) + } else if details[0] != detailsExpected { + t.Errorf("Expected details[0] to be '%s', got '%s'", detailsExpected, details[0]) + } + } +} + +// TestImportEdgesOverwriteYes imports documents and then checks that it exists. +func TestImportEdgesOverwriteYes(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "import_edges_overwriteYes_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + col := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + docs := []interface{}{ + &RouteEdge{ + from.ID.String(), + to.ID.String(), + 40, + }, + map[string]interface{}{ + "_from": to.ID.String(), + "_to": from.ID.String(), + }, + } + + for i := 0; i < 3; i++ { + var raw []byte + var details []string + ctx := driver.WithImportDetails(driver.WithRawResponse(nil, &raw), &details) + stats, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + Overwrite: true, + }) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 2 { + t.Errorf("Expected %d created documents, got %d (json %s)", 2, stats.Created, formatRawResponse(raw)) + } + } + + countExpected := int64(2) + if count, err := col.Count(nil); err != nil { + t.Errorf("Failed to count documents: %s", describe(err)) + } else if count != countExpected { + t.Errorf("Expected count to be %d in round %d, got %d", countExpected, i, count) + } + } +} + +// TestImportEdgesOverwriteNo imports documents and then checks that it exists. +func TestImportEdgesOverwriteNo(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "import_edges_overwriteNo_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + col := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + docs := []interface{}{ + &RouteEdge{ + from.ID.String(), + to.ID.String(), + 40, + }, + map[string]interface{}{ + "_from": to.ID.String(), + "_to": from.ID.String(), + }, + } + + for i := 0; i < 3; i++ { + var raw []byte + var details []string + ctx := driver.WithImportDetails(driver.WithRawResponse(nil, &raw), &details) + stats, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + Overwrite: false, + }) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 2 { + t.Errorf("Expected %d created documents, got %d (json %s)", 2, stats.Created, formatRawResponse(raw)) + } + } + + countExpected := int64(2 * (i + 1)) + if count, err := col.Count(nil); err != nil { + t.Errorf("Failed to count documents: %s", describe(err)) + } else if count != countExpected { + t.Errorf("Expected count to be %d in round %d, got %d", countExpected, i, count) + } + } +} + +// TestImportEdgesPrefix imports documents and then checks that it exists. +func TestImportEdgesPrefix(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "import_edges_prefix_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + col := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + createDocument(ctx, cities, map[string]interface{}{"_key": "venlo", "name": "Venlo"}, t) + createDocument(ctx, states, map[string]interface{}{"_key": "lb", "name": "Limburg"}, t) + + docs := []interface{}{ + &RouteEdge{ + "venlo", + "lb", + 40, + }, + map[string]interface{}{ + "_from": "venlo", + "_to": "lb", + }, + } + + var raw []byte + var details []string + ctx = driver.WithImportDetails(driver.WithRawResponse(ctx, &raw), &details) + stats, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + FromPrefix: prefix + "city", + ToPrefix: prefix + "state", + }) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 2 { + t.Errorf("Expected %d created documents, got %d (json %s)", 2, stats.Created, formatRawResponse(raw)) + } + } + + countExpected := int64(2) + if count, err := col.Count(nil); err != nil { + t.Errorf("Failed to count documents: %s", describe(err)) + } else if count != countExpected { + t.Errorf("Expected count to be %d, got %d", countExpected, count) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/edges_remove_test.go b/deps/github.com/arangodb/go-driver/test/edges_remove_test.go new file mode 100644 index 000000000..f506f865b --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/edges_remove_test.go @@ -0,0 +1,275 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestRemoveEdges creates documents, removes them and then checks the removal has succeeded. +func TestRemoveEdges(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "remove_edges_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + docs := []RouteEdge{ + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 40, + }, + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 68, + }, + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 21, + }, + } + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + if _, _, err := ec.RemoveDocuments(ctx, metas.Keys()); err != nil { + t.Fatalf("Failed to remove documents: %s", describe(err)) + } + // Should not longer exist + for i, meta := range metas { + var readDoc Account + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Fatalf("Expected NotFoundError at %d, got %s", i, describe(err)) + } + } +} + +// TestRemoveEdgesReturnOld creates documents, removes them checks the ReturnOld value. +func TestRemoveEdgesReturnOld(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2363 + prefix := "remove_edges_returnOld_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + docs := []RouteEdge{ + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 40, + }, + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 68, + }, + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 21, + }, + } + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + oldDocs := make([]RouteEdge, len(docs)) + ctx = driver.WithReturnOld(ctx, oldDocs) + _, errs, err = ec.RemoveDocuments(ctx, metas.Keys()) + if err != nil { + t.Fatalf("Failed to remove documents: %s", describe(err)) + } + // Check errors + for i, err := range errs { + if !driver.IsInvalidArgument(err) { + t.Fatalf("Expected InvalidArgumentError at %d, got %s", i, describe(err)) + } + } +} + +// TestRemoveEdgesSilent creates documents, removes them with Silent() and then checks the meta is indeed empty. +func TestRemoveEdgesSilent(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "remove_edges_silent_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + docs := []RouteEdge{ + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 40, + }, + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 21, + }, + } + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + ctx = driver.WithSilent(ctx) + if rmetas, rerrs, err := ec.RemoveDocuments(ctx, metas.Keys()); err != nil { + t.Fatalf("Failed to remove documents: %s", describe(err)) + } else { + if len(rmetas) > 0 { + t.Errorf("Expected empty metas, got %d", len(rmetas)) + } + if len(rerrs) > 0 { + t.Errorf("Expected empty errors, got %d", len(rerrs)) + } + } + // Should not longer exist + for i, meta := range metas { + var readDoc RouteEdge + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Errorf("Expected NotFoundError at %d, got %s", i, describe(err)) + } + } +} + +// TestRemoveEdgesRevision creates documents, removes them with an incorrect revisions. +func TestRemoveEdgesRevision(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "remove_edges_revision_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + cities := ensureCollection(ctx, db, prefix+"city", nil, t) + states := ensureCollection(ctx, db, prefix+"state", nil, t) + from := createDocument(ctx, cities, map[string]interface{}{"name": "Venlo"}, t) + to := createDocument(ctx, states, map[string]interface{}{"name": "Limburg"}, t) + + docs := []RouteEdge{ + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 40, + }, + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 21, + }, + } + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + + // Replace the documents to get another revision + replacements := []RouteEdge{ + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 880, + }, + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 210, + }, + } + metas2, errs2, err := ec.ReplaceDocuments(ctx, metas.Keys(), replacements) + if err != nil { + t.Fatalf("Failed to replace documents: %s", describe(err)) + } else if err := errs2.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + + // Try to remove documents with initial revision (must fail) + initialRevCtx := driver.WithRevisions(ctx, metas.Revs()) + if _, errs, err := ec.RemoveDocuments(initialRevCtx, metas.Keys()); err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } else { + for i, err := range errs { + if !driver.IsPreconditionFailed(err) { + t.Errorf("Expected PreconditionFailedError at %d, got %s", i, describe(err)) + } + } + } + + // Try to remove documents with correct revision (must succeed) + replacedRevCtx := driver.WithRevisions(ctx, metas2.Revs()) + if _, errs, err := ec.RemoveDocuments(replacedRevCtx, metas.Keys()); err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + + // Should not longer exist + for i, meta := range metas { + var readDoc RouteEdge + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Errorf("Expected NotFoundError at %d, got %s", i, describe(err)) + } + } +} + +// TestRemoveEdgesKeyEmpty removes a document it with an empty key. +func TestRemoveEdgesKeyEmpty(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "remove_edges_keyEmpty_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"citiesPerState", []string{prefix + "city"}, []string{prefix + "state"}, t) + + if _, _, err := ec.RemoveDocuments(nil, []string{""}); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/edges_replace_test.go b/deps/github.com/arangodb/go-driver/test/edges_replace_test.go new file mode 100644 index 000000000..4a1b8b921 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/edges_replace_test.go @@ -0,0 +1,392 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "reflect" + "strings" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestReplaceEdges creates documents, replaces them and then checks the replacements have succeeded. +func TestReplaceEdges(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "replace_edges_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"relation", []string{prefix + "male", prefix + "female"}, []string{prefix + "male", prefix + "female"}, t) + male := ensureCollection(ctx, db, prefix+"male", nil, t) + female := ensureCollection(ctx, db, prefix+"female", nil, t) + from := createDocument(ctx, male, map[string]interface{}{"name": "Jan"}, t) + to := createDocument(ctx, female, map[string]interface{}{"name": "Alice"}, t) + + docs := []RelationEdge{ + RelationEdge{ + From: from.ID.String(), + To: to.ID.String(), + Type: "friend", + }, + RelationEdge{ + From: from.ID.String(), + To: to.ID.String(), + Type: "friend", + }, + } + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Replacement docs + replacements := []driver.EdgeDocument{ + driver.EdgeDocument{ + From: to.ID, + To: from.ID, + }, + driver.EdgeDocument{ + From: to.ID, + To: from.ID, + }, + } + if _, _, err := ec.ReplaceDocuments(ctx, metas.Keys(), replacements); err != nil { + t.Fatalf("Failed to replace documents: %s", describe(err)) + } + // Read replaced documents + for i, meta := range metas { + var readDoc driver.EdgeDocument + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + if !reflect.DeepEqual(replacements[i], readDoc) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, replacements[i], readDoc) + } + } +} + +// TestReplaceEdgesReturnOld creates documents, replaces them checks the ReturnOld values. +func TestReplaceEdgesReturnOld(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2363 + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "replace_edges_returnOld_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"relation", []string{prefix + "male", prefix + "female"}, []string{prefix + "male", prefix + "female"}, t) + male := ensureCollection(ctx, db, prefix+"male", nil, t) + female := ensureCollection(ctx, db, prefix+"female", nil, t) + from := createDocument(ctx, male, map[string]interface{}{"name": "Jan"}, t) + to := createDocument(ctx, female, map[string]interface{}{"name": "Alice"}, t) + + docs := []RelationEdge{ + RelationEdge{ + From: from.ID.String(), + To: to.ID.String(), + Type: "friend", + }, + RelationEdge{ + From: from.ID.String(), + To: to.ID.String(), + Type: "married", + }, + } + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Replace documents + replacements := []driver.EdgeDocument{ + driver.EdgeDocument{ + From: to.ID, + To: from.ID, + }, + driver.EdgeDocument{ + From: to.ID, + To: from.ID, + }, + } + oldDocs := make([]RelationEdge, len(docs)) + ctx = driver.WithReturnOld(ctx, oldDocs) + if _, _, err := ec.ReplaceDocuments(ctx, metas.Keys(), replacements); err != nil { + t.Fatalf("Failed to replace documents: %s", describe(err)) + } + // Check old document + for i, doc := range docs { + if !reflect.DeepEqual(doc, oldDocs[i]) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, doc, oldDocs[i]) + } + } +} + +// TestReplaceEdgesReturnNew creates documents, replaces them checks the ReturnNew values. +func TestReplaceEdgesReturnNew(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2363 + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "replace_edges_returnNew_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"relation", []string{prefix + "male", prefix + "female"}, []string{prefix + "male", prefix + "female"}, t) + male := ensureCollection(ctx, db, prefix+"male", nil, t) + female := ensureCollection(ctx, db, prefix+"female", nil, t) + from := createDocument(ctx, male, map[string]interface{}{"name": "Jan"}, t) + to := createDocument(ctx, female, map[string]interface{}{"name": "Alice"}, t) + + docs := []RelationEdge{ + RelationEdge{ + From: from.ID.String(), + To: to.ID.String(), + Type: "friend", + }, + RelationEdge{ + From: from.ID.String(), + To: to.ID.String(), + Type: "married", + }, + } + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Replace documents + replacements := []driver.EdgeDocument{ + driver.EdgeDocument{ + From: to.ID, + To: from.ID, + }, + driver.EdgeDocument{ + From: to.ID, + To: from.ID, + }, + } + newDocs := make([]driver.EdgeDocument, len(docs)) + ctx = driver.WithReturnNew(ctx, newDocs) + if _, _, err := ec.ReplaceDocuments(ctx, metas.Keys(), replacements); err != nil { + t.Fatalf("Failed to replace documents: %s", describe(err)) + } + // Check new documents + for i, replacement := range replacements { + expected := replacement + if !reflect.DeepEqual(expected, newDocs[i]) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, expected, newDocs[i]) + } + } +} + +// TestReplaceEdgesSilent creates documents, replaces them with Silent() and then checks the meta is indeed empty. +func TestReplaceEdgesSilent(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "replace_edges_silent_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"relation", []string{prefix + "male", prefix + "female"}, []string{prefix + "male", prefix + "female"}, t) + male := ensureCollection(ctx, db, prefix+"male", nil, t) + female := ensureCollection(ctx, db, prefix+"female", nil, t) + from := createDocument(ctx, male, map[string]interface{}{"name": "Jan"}, t) + to := createDocument(ctx, female, map[string]interface{}{"name": "Alice"}, t) + + docs := []RelationEdge{ + RelationEdge{ + From: from.ID.String(), + To: to.ID.String(), + Type: "friend", + }, + RelationEdge{ + From: from.ID.String(), + To: to.ID.String(), + Type: "married", + }, + } + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Replace documents + replacements := []driver.EdgeDocument{ + driver.EdgeDocument{ + From: to.ID, + To: from.ID, + }, + driver.EdgeDocument{ + From: to.ID, + To: from.ID, + }, + } + ctx = driver.WithSilent(ctx) + if metas, errs, err := ec.ReplaceDocuments(ctx, metas.Keys(), replacements); err != nil { + t.Fatalf("Failed to replace documents: %s", describe(err)) + } else { + if len(errs) > 0 { + t.Errorf("Expected 0 errors, got %d", len(errs)) + } + if len(metas) > 0 { + t.Errorf("Expected 0 metas, got %d", len(metas)) + } + } +} + +// TestReplaceEdgesRevision creates documents, replaces then with a specific (correct) revisions. +// Then it attempts replacements with incorrect revisions which must fail. +func TestReplaceEdgesRevision(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "replace_edges_revision_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"relation", []string{prefix + "male", prefix + "female"}, []string{prefix + "male", prefix + "female"}, t) + male := ensureCollection(ctx, db, prefix+"male", nil, t) + female := ensureCollection(ctx, db, prefix+"female", nil, t) + from := createDocument(ctx, male, map[string]interface{}{"name": "Jan"}, t) + to := createDocument(ctx, female, map[string]interface{}{"name": "Alice"}, t) + + docs := []RelationEdge{ + RelationEdge{ + From: from.ID.String(), + To: to.ID.String(), + Type: "friend", + }, + RelationEdge{ + From: from.ID.String(), + To: to.ID.String(), + Type: "married", + }, + } + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + + // Replace documents with correct revisions + replacements := []RelationEdge{ + RelationEdge{ + From: from.ID.String(), + To: to.ID.String(), + Type: "old-friend", + }, + RelationEdge{ + From: from.ID.String(), + To: to.ID.String(), + Type: "just-married", + }, + } + initialRevCtx := driver.WithRevisions(ctx, metas.Revs()) + var replacedRevCtx context.Context + if metas2, errs, err := ec.ReplaceDocuments(initialRevCtx, metas.Keys(), replacements); err != nil { + t.Fatalf("Failed to replace documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } else { + replacedRevCtx = driver.WithRevisions(ctx, metas2.Revs()) + if strings.Join(metas2.Revs(), ",") == strings.Join(metas.Revs(), ",") { + t.Errorf("Expected revisions to change, got initial revisions '%s', replaced revisions '%s'", strings.Join(metas.Revs(), ","), strings.Join(metas2.Revs(), ",")) + } + } + + // Replace documents with incorrect revision + replacements[0].Type = "Wrong deal 1" + replacements[1].Type = "Wrong deal 2" + if _, errs, err := ec.ReplaceDocuments(initialRevCtx, metas.Keys(), replacements); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } else { + for i, err := range errs { + if !driver.IsPreconditionFailed(err) { + t.Errorf("Expected PreconditionFailedError at %d, got %s", i, describe(err)) + } + } + } + + // Replace document once more with correct revision + replacements[0].Type = "Good deal 1" + replacements[1].Type = "Good deal 2" + if _, errs, err := ec.ReplaceDocuments(replacedRevCtx, metas.Keys(), replacements); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } +} + +// TestReplaceEdgesKeyEmpty replaces a document it with an empty key. +func TestReplaceEdgesKeyEmpty(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "replace_edges_updateNil_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"relation", []string{prefix + "male", prefix + "female"}, []string{prefix + "male", prefix + "female"}, t) + // Replacement document + replacement := map[string]interface{}{ + "name": "Updated", + } + if _, _, err := ec.ReplaceDocuments(nil, []string{""}, replacement); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestReplaceEdgesUpdateNil replaces a document it with a nil update. +func TestReplaceEdgesUpdateNil(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "replace_edges_updateNil_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"relation", []string{prefix + "male", prefix + "female"}, []string{prefix + "male", prefix + "female"}, t) + + if _, _, err := ec.ReplaceDocuments(nil, []string{"validKey"}, nil); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestReplaceEdgesUpdateLenDiff replacements documents with a different number of documents, keys. +func TestReplaceEdgesUpdateLenDiff(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "replace_edges_updateNil_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"relation", []string{prefix + "male", prefix + "female"}, []string{prefix + "male", prefix + "female"}, t) + + replacements := []map[string]interface{}{ + map[string]interface{}{ + "name": "name1", + }, + map[string]interface{}{ + "name": "name2", + }, + } + if _, _, err := ec.ReplaceDocuments(nil, []string{"only1"}, replacements); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/edges_update_test.go b/deps/github.com/arangodb/go-driver/test/edges_update_test.go new file mode 100644 index 000000000..e324ef394 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/edges_update_test.go @@ -0,0 +1,528 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "fmt" + "reflect" + "strings" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestUpdateEdges creates documents, updates them and then checks the updates have succeeded. +func TestUpdateEdges(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "update_edges_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"relation", []string{prefix + "male", prefix + "female"}, []string{prefix + "male", prefix + "female"}, t) + male := ensureCollection(ctx, db, prefix+"male", nil, t) + female := ensureCollection(ctx, db, prefix+"female", nil, t) + from := createDocument(ctx, male, map[string]interface{}{"name": "Jan"}, t) + to := createDocument(ctx, female, map[string]interface{}{"name": "Alice"}, t) + + docs := []RelationEdge{ + RelationEdge{ + From: from.ID.String(), + To: to.ID.String(), + Type: "friend", + }, + RelationEdge{ + From: from.ID.String(), + To: to.ID.String(), + Type: "friend", + }, + } + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Update documents + updates := []map[string]interface{}{ + map[string]interface{}{ + "type": "Updated1", + }, + map[string]interface{}{ + "type": "Updated2", + }, + } + if _, _, err := ec.UpdateDocuments(ctx, metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } + // Read updated documents + for i, meta := range metas { + var readDoc RelationEdge + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + doc := docs[i] + doc.Type = fmt.Sprintf("Updated%d", i+1) + if !reflect.DeepEqual(doc, readDoc) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, doc, readDoc) + } + } +} + +// TestUpdateEdgesReturnOld creates documents, updates them checks the ReturnOld values. +func TestUpdateEdgesReturnOld(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2363 + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "update_edges_returnOld_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"relation", []string{prefix + "male", prefix + "female"}, []string{prefix + "male", prefix + "female"}, t) + male := ensureCollection(ctx, db, prefix+"male", nil, t) + female := ensureCollection(ctx, db, prefix+"female", nil, t) + from := createDocument(ctx, male, map[string]interface{}{"name": "Jan"}, t) + to := createDocument(ctx, female, map[string]interface{}{"name": "Alice"}, t) + + docs := []RelationEdge{ + RelationEdge{ + From: from.ID.String(), + To: to.ID.String(), + Type: "friend", + }, + RelationEdge{ + From: from.ID.String(), + To: to.ID.String(), + Type: "friend", + }, + } + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Update documents + updates := []map[string]interface{}{ + map[string]interface{}{ + "type": "Updated1", + }, + map[string]interface{}{ + "type": "Updated2", + }, + } + oldDocs := make([]RelationEdge, len(docs)) + ctx = driver.WithReturnOld(ctx, oldDocs) + if _, _, err := ec.UpdateDocuments(ctx, metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } + // Check old documents + for i, doc := range docs { + if !reflect.DeepEqual(doc, oldDocs[i]) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, doc, oldDocs[i]) + } + } +} + +// TestUpdateEdgesReturnNew creates documents, updates them checks the ReturnNew values. +func TestUpdateEdgesReturnNew(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2363 + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "update_edges_returnOld_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"relation", []string{prefix + "male", prefix + "female"}, []string{prefix + "male", prefix + "female"}, t) + male := ensureCollection(ctx, db, prefix+"male", nil, t) + female := ensureCollection(ctx, db, prefix+"female", nil, t) + from := createDocument(ctx, male, map[string]interface{}{"name": "Jan"}, t) + to := createDocument(ctx, female, map[string]interface{}{"name": "Alice"}, t) + + docs := []RelationEdge{ + RelationEdge{ + From: from.ID.String(), + To: to.ID.String(), + Type: "friend", + }, + RelationEdge{ + From: from.ID.String(), + To: to.ID.String(), + Type: "friend", + }, + } + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Update documents + updates := []map[string]interface{}{ + map[string]interface{}{ + "type": "Updated1", + }, + map[string]interface{}{ + "type": "Updated2", + }, + } + newDocs := make([]RelationEdge, len(docs)) + ctx = driver.WithReturnNew(ctx, newDocs) + if _, _, err := ec.UpdateDocuments(ctx, metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } + // Check new documents + for i, doc := range docs { + expected := doc + expected.Type = fmt.Sprintf("Updated%d", i+1) + if !reflect.DeepEqual(expected, newDocs[i]) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, expected, newDocs[i]) + } + } +} + +// TestUpdateEdgesKeepNullTrue creates documents, updates them with KeepNull(true) and then checks the updates have succeeded. +func TestUpdateEdgesKeepNullTrue(t *testing.T) { + ctx := context.Background() + var conn driver.Connection + c := createClientFromEnv(t, true, &conn) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "update_edges_keepNullTrue_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"relation", []string{prefix + "male", prefix + "female"}, []string{prefix + "male", prefix + "female"}, t) + male := ensureCollection(ctx, db, prefix+"male", nil, t) + female := ensureCollection(ctx, db, prefix+"female", nil, t) + from := createDocument(ctx, male, map[string]interface{}{"name": "Jan"}, t) + to := createDocument(ctx, female, map[string]interface{}{"name": "Alice"}, t) + + docs := []AccountEdge{ + AccountEdge{ + From: from.ID.String(), + To: to.ID.String(), + User: &UserDoc{ + "Greata", + 77, + }, + }, + AccountEdge{ + From: from.ID.String(), + To: to.ID.String(), + User: &UserDoc{ + "Mathilda", + 45, + }, + }, + } + + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + + // Update documents + updates := []map[string]interface{}{ + map[string]interface{}{ + "to": from.ID.String(), + "user": nil, + }, + map[string]interface{}{ + "from": to.ID.String(), + "user": nil, + }, + } + if _, _, err := ec.UpdateDocuments(driver.WithKeepNull(ctx, true), metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } + // Read updated documents + for i, meta := range metas { + var readDoc map[string]interface{} + var rawResponse []byte + ctx = driver.WithRawResponse(ctx, &rawResponse) + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document %d '%s': %s", i, meta.Key, describe(err)) + } + // We parse to this type of map, since unmarshalling nil values to a map of type map[string]interface{} + // will cause the entry to be deleted. + var jsonMap map[string]*driver.RawObject + if err := conn.Unmarshal(rawResponse, &jsonMap); err != nil { + t.Fatalf("Failed to parse raw response: %s", describe(err)) + } + // Get "edge" field and unmarshal it + if raw, found := jsonMap["edge"]; !found { + t.Errorf("Expected edge to be found but got not found") + } else { + jsonMap = nil + if err := conn.Unmarshal(*raw, &jsonMap); err != nil { + t.Fatalf("Failed to parse raw edge object: %s", describe(err)) + } + if raw, found := jsonMap["user"]; !found { + t.Errorf("Expected user to be found but got not found") + } else if raw != nil { + t.Errorf("Expected user to be found and nil, got %s", string(*raw)) + } + } + } +} + +// TestUpdateEdgesKeepNullFalse creates documents, updates them with KeepNull(false) and then checks the updates have succeeded. +func TestUpdateEdgesKeepNullFalse(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "update_edges_keepNullFalse_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"relation", []string{prefix + "male", prefix + "female"}, []string{prefix + "male", prefix + "female"}, t) + male := ensureCollection(ctx, db, prefix+"male", nil, t) + female := ensureCollection(ctx, db, prefix+"female", nil, t) + from := createDocument(ctx, male, map[string]interface{}{"name": "Jan"}, t) + to := createDocument(ctx, female, map[string]interface{}{"name": "Alice"}, t) + + docs := []AccountEdge{ + AccountEdge{ + From: from.ID.String(), + To: to.ID.String(), + User: &UserDoc{ + "Piere", + 77, + }, + }, + AccountEdge{ + From: from.ID.String(), + To: to.ID.String(), + User: &UserDoc{ + "Joan", + 45, + }, + }, + } + + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Update document + updates := []map[string]interface{}{ + map[string]interface{}{ + "to": from.ID.String(), + "user": nil, + }, + map[string]interface{}{ + "from": to.ID.String(), + "user": nil, + }, + } + if _, _, err := ec.UpdateDocuments(driver.WithKeepNull(ctx, false), metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } + // Read updated documents + for i, meta := range metas { + readDoc := docs[i] + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + if readDoc.User == nil { + t.Errorf("Expected user to be untouched, got %v", readDoc.User) + } + } +} + +// TestUpdateEdgesSilent creates documents, updates them with Silent() and then checks the metas are indeed empty. +func TestUpdateEdgesSilent(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "update_edges_silent_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"relation", []string{prefix + "male", prefix + "female"}, []string{prefix + "male", prefix + "female"}, t) + male := ensureCollection(ctx, db, prefix+"male", nil, t) + female := ensureCollection(ctx, db, prefix+"female", nil, t) + from := createDocument(ctx, male, map[string]interface{}{"name": "Jan"}, t) + to := createDocument(ctx, female, map[string]interface{}{"name": "Alice"}, t) + + docs := []RouteEdge{ + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 7, + }, + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 88, + }, + } + metas, _, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } + // Update documents + updates := []map[string]interface{}{ + map[string]interface{}{ + "distance": 61, + }, + map[string]interface{}{ + "distance": 16, + }, + } + ctx = driver.WithSilent(ctx) + if metas, errs, err := ec.UpdateDocuments(ctx, metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } else if strings.Join(metas.Keys(), "") != "" { + t.Errorf("Expected empty meta, got %v", metas) + } +} + +// TestUpdateEdgesRevision creates documents, updates them with a specific (correct) revisions. +// Then it attempts an update with an incorrect revisions which must fail. +func TestUpdateEdgesRevision(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "update_edges_revision_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"relation", []string{prefix + "male", prefix + "female"}, []string{prefix + "male", prefix + "female"}, t) + male := ensureCollection(ctx, db, prefix+"male", nil, t) + female := ensureCollection(ctx, db, prefix+"female", nil, t) + from := createDocument(ctx, male, map[string]interface{}{"name": "Jan"}, t) + to := createDocument(ctx, female, map[string]interface{}{"name": "Alice"}, t) + + docs := []RouteEdge{ + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 7, + }, + RouteEdge{ + From: from.ID.String(), + To: to.ID.String(), + Distance: 88, + }, + } + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } else if len(metas) != len(docs) { + t.Fatalf("Expected %d metas, got %d", len(docs), len(metas)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + + // Update documents with correct revisions + updates := []map[string]interface{}{ + map[string]interface{}{ + "distance": 34, + }, + map[string]interface{}{ + "distance": 77, + }, + } + initialRevCtx := driver.WithRevisions(ctx, metas.Revs()) + var updatedRevCtx context.Context + if metas2, _, err := ec.UpdateDocuments(initialRevCtx, metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } else { + updatedRevCtx = driver.WithRevisions(ctx, metas2.Revs()) + if strings.Join(metas2.Revs(), ",") == strings.Join(metas.Revs(), ",") { + t.Errorf("Expected revision to change, got initial revision '%s', updated revision '%s'", strings.Join(metas.Revs(), ","), strings.Join(metas2.Revs(), ",")) + } + } + + // Update documents with incorrect revisions + updates[0]["distance"] = 35 + var rawResponse []byte + if _, errs, err := ec.UpdateDocuments(driver.WithRawResponse(initialRevCtx, &rawResponse), metas.Keys(), updates); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } else { + for _, err := range errs { + if !driver.IsPreconditionFailed(err) { + t.Errorf("Expected PreconditionFailedError, got %s (resp: %s", describe(err), string(rawResponse)) + } + } + } + + // Update documents once more with correct revisions + updates[0]["distance"] = 36 + if _, _, err := ec.UpdateDocuments(updatedRevCtx, metas.Keys(), updates); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } +} + +// TestUpdateEdgesKeyEmpty updates documents with an empty key. +func TestUpdateEdgesKeyEmpty(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "update_edges_keyEmpty_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"relation", []string{prefix + "male", prefix + "female"}, []string{prefix + "male", prefix + "female"}, t) + + // Update document + updates := []map[string]interface{}{ + map[string]interface{}{ + "name": "Updated", + }, + } + if _, _, err := ec.UpdateDocuments(nil, []string{""}, updates); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestUpdateEdgesUpdateNil updates documents it with a nil update. +func TestUpdateEdgesUpdateNil(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "update_edges_updateNil_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"relation", []string{prefix + "male", prefix + "female"}, []string{prefix + "male", prefix + "female"}, t) + + if _, _, err := ec.UpdateDocuments(nil, []string{"validKey"}, nil); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestUpdateEdgesUpdateLenDiff updates documents with a different number of updates, keys. +func TestUpdateEdgesUpdateLenDiff(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "edges_test", nil, t) + prefix := "update_edges_updateLenDiff_" + g := ensureGraph(ctx, db, prefix+"graph", nil, t) + ec := ensureEdgeCollection(ctx, g, prefix+"relation", []string{prefix + "male", prefix + "female"}, []string{prefix + "male", prefix + "female"}, t) + + updates := []map[string]interface{}{ + map[string]interface{}{ + "name": "name1", + }, + map[string]interface{}{ + "name": "name2", + }, + } + if _, _, err := ec.UpdateDocuments(nil, []string{"only1"}, updates); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/failover_test.go b/deps/github.com/arangodb/go-driver/test/failover_test.go new file mode 100644 index 000000000..7d3a41793 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/failover_test.go @@ -0,0 +1,181 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// +build failover + +package test + +import ( + "context" + "math/rand" + "reflect" + "strconv" + "strings" + "testing" + "time" + + driver "github.com/arangodb/go-driver" + "github.com/coreos/go-iptables/iptables" +) + +const ( + filterTable = "filter" + chainName = "ARANGOGODRIVER" +) + +// TestFailoverDrop performs various tests while DROP'ng traffic to 1 coordinator. +func TestFailoverDrop(t *testing.T) { + failoverTest("DROP", t) +} + +// TestFailoverReject performs various tests while REJECT'ng traffic to 1 coordinator. +func TestFailoverReject(t *testing.T) { + failoverTest("REJECT", t) +} + +func failoverTest(action string, t *testing.T) { + iptc, err := iptables.New() + if err != nil { + t.Fatalf("Failed to create iptables client: %s", describe(err)) + } + createChains(iptc, t) + defer cleanupChains(iptc, t) + + coordinatorPorts := []int{7002, 7007, 7012} + var conn driver.Connection + c := createClientFromEnv(t, true, &conn) + db := ensureDatabase(nil, c, "failover_test", nil, t) + col := ensureCollection(nil, db, strings.ToLower(action)+"_test", nil, t) + + lastEndpoint := "" + endpointChanges := 0 + for i := 0; i < 1000 && endpointChanges < 10; i++ { + port := coordinatorPorts[rand.Intn(len(coordinatorPorts))] + ruleSpec := blockPort(iptc, port, action, t) + + // Perform low lever request and check handling endpoint + for { + var resp driver.Response + ctx := driver.WithResponse(nil, &resp) + ctx, cancel := context.WithTimeout(ctx, time.Second*9) + _, err := c.Version(ctx) + cancel() + if driver.IsResponse(err) { + t.Logf("ResponseError in version request") + continue + } else if err != nil { + t.Fatalf("Cannot execute request: %s", describe(err)) + } + ep := resp.Endpoint() + if ep != lastEndpoint { + lastEndpoint = ep + endpointChanges++ + t.Logf("New server detected: %s", ep) + } + break + } + + // Create document & read it + doc := UserDoc{ + "Jan", + 40, + } + meta, err := col.CreateDocument(nil, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Document must exists now + var readDoc UserDoc + if _, err := col.ReadDocument(nil, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + if !reflect.DeepEqual(doc, readDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", doc, readDoc) + } + + removeRuleSpec(iptc, ruleSpec, t) + } +} + +func blockPort(client *iptables.IPTables, port int, action string, t *testing.T) []string { + ruleSpec := []string{ + "-p", "tcp", + "-m", "tcp", "--dport", strconv.Itoa(port), + "-j", action, + } + t.Logf("Denying traffic to TCP port %d", port) + if found, err := client.Exists(filterTable, chainName, ruleSpec...); err != nil { + t.Fatalf("Failed to check existance of rulespec %q: %v", ruleSpec, err) + } else if !found { + if err := client.Insert(filterTable, chainName, 1, ruleSpec...); err != nil { + t.Fatalf("Failed to deny traffic to TCP port %d: %v", port, err) + } + } + return ruleSpec +} + +func removeRuleSpec(client *iptables.IPTables, ruleSpec []string, t *testing.T) { + if found, err := client.Exists(filterTable, chainName, ruleSpec...); err != nil { + t.Fatalf("Failed to check existance of rulespec %q: %v", ruleSpec, err) + } else if found { + if err := client.Delete(filterTable, chainName, ruleSpec...); err != nil { + t.Fatalf("Failed to remove ruleSpec %q: %v", ruleSpec, err) + } + } +} + +func createChains(client *iptables.IPTables, t *testing.T) { + if err := client.ClearChain(filterTable, chainName); err != nil { + t.Fatalf("Failed to create chain: %s", describe(err)) + } + if err := client.Append(filterTable, chainName, "-j", "RETURN"); err != nil { + t.Fatalf("Failed to append RETURN to chain: %s", describe(err)) + } + if err := client.Insert(filterTable, "INPUT", 1, "-j", chainName); err != nil { + t.Fatalf("Failed to insert INPUT chain: %s", describe(err)) + } + if err := client.Insert(filterTable, "FORWARD", 1, "-j", chainName); err != nil { + t.Fatalf("Failed to insert FORWARD OUTPUT chain: %s", describe(err)) + } + if err := client.Insert(filterTable, "OUTPUT", 1, "-j", chainName); err != nil { + t.Fatalf("Failed to insert OUTPUT chain: %s", describe(err)) + } +} + +// cleanupChains removes all generated iptables chain & rules made by createChains. +func cleanupChains(client *iptables.IPTables, t *testing.T) { + if err := client.Delete(filterTable, "INPUT", "-j", chainName); err != nil { + t.Logf("Failed to remove INPUT chain rule: %v", err) + } + if err := client.Delete(filterTable, "FORWARD", "-j", chainName); err != nil { + t.Logf("Failed to remove FORWARD chain rule: %v", err) + } + if err := client.Delete(filterTable, "OUTPUT", "-j", chainName); err != nil { + t.Logf("Failed to remove OUTPUT chain rule: %v", err) + } + if err := client.ClearChain(filterTable, chainName); err != nil { + t.Logf("Failed to clear '%s' chain: %v", chainName, err) + } + if err := client.DeleteChain(filterTable, chainName); err != nil { + t.Logf("Failed to remove '%s' chain: %v", chainName, err) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/graph_test.go b/deps/github.com/arangodb/go-driver/test/graph_test.go new file mode 100644 index 000000000..541279417 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/graph_test.go @@ -0,0 +1,109 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// ensureGraph is a helper to check if a graph exists and create if if needed. +// It will fail the test when an error occurs. +func ensureGraph(ctx context.Context, db driver.Database, name string, options *driver.CreateGraphOptions, t *testing.T) driver.Graph { + g, err := db.Graph(ctx, name) + if driver.IsNotFound(err) { + g, err = db.CreateGraph(ctx, name, options) + if err != nil { + t.Fatalf("Failed to create graph '%s': %s", name, describe(err)) + } + } else if err != nil { + t.Fatalf("Failed to open graph '%s': %s", name, describe(err)) + } + return g +} + +// TestCreateGraph creates a graph and then checks that it exists. +func TestCreateGraph(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "graph_test", nil, t) + name := "test_create_graph" + if _, err := db.CreateGraph(nil, name, nil); err != nil { + t.Fatalf("Failed to create graph '%s': %s", name, describe(err)) + } + // Graph must exist now + if found, err := db.GraphExists(nil, name); err != nil { + t.Errorf("GraphExists('%s') failed: %s", name, describe(err)) + } else if !found { + t.Errorf("GraphExists('%s') return false, expected true", name) + } + // Graph must be listed + if list, err := db.Graphs(nil); err != nil { + t.Errorf("Graphs failed: %s", describe(err)) + } else { + found := false + for _, g := range list { + if g.Name() == name { + found = true + break + } + } + if !found { + t.Errorf("Graph '%s' not found in list", name) + } + } + // Open graph + if g, err := db.Graph(nil, name); err != nil { + t.Errorf("Graph('%s') failed: %s", name, describe(err)) + } else if g.Name() != name { + t.Errorf("Graph.Name wrong. Expected '%s', got '%s'", name, g.Name()) + } +} + +// TestRemoveGraph creates a graph and then removes it. +func TestRemoveGraph(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "graph_test", nil, t) + name := "test_remove_graph" + g, err := db.CreateGraph(nil, name, nil) + if err != nil { + t.Fatalf("Failed to create graph '%s': %s", name, describe(err)) + } + // Graph must exist now + if found, err := db.GraphExists(nil, name); err != nil { + t.Errorf("GraphExists('%s') failed: %s", name, describe(err)) + } else if !found { + t.Errorf("GraphExists('%s') return false, expected true", name) + } + // Now remove it + if err := g.Remove(nil); err != nil { + t.Fatalf("Failed to remove graph '%s': %s", name, describe(err)) + } + // Graph must not exist now + if found, err := db.GraphExists(nil, name); err != nil { + t.Errorf("GraphExists('%s') failed: %s", name, describe(err)) + } else if found { + t.Errorf("GraphExists('%s') return true, expected false", name) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/index_ensure_test.go b/deps/github.com/arangodb/go-driver/test/index_ensure_test.go new file mode 100644 index 000000000..e5a78152a --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/index_ensure_test.go @@ -0,0 +1,296 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "fmt" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestEnsureFullTextIndex creates a collection with a full text index. +func TestEnsureFullTextIndex(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "index_test", nil, t) + + testOptions := []*driver.EnsureFullTextIndexOptions{ + nil, + &driver.EnsureFullTextIndexOptions{MinLength: 2}, + &driver.EnsureFullTextIndexOptions{MinLength: 20}, + } + + for i, options := range testOptions { + col := ensureCollection(nil, db, fmt.Sprintf("fulltext_index_test_%d", i), nil, t) + + idx, created, err := col.EnsureFullTextIndex(nil, []string{"name"}, options) + if err != nil { + t.Fatalf("Failed to create new index: %s", describe(err)) + } + if !created { + t.Error("Expected created to be true, got false") + } + + // Index must exists now + if found, err := col.IndexExists(nil, idx.Name()); err != nil { + t.Fatalf("Failed to check index '%s' exists: %s", idx.Name(), describe(err)) + } else if !found { + t.Errorf("Index '%s' does not exist, expected it to exist", idx.Name()) + } + + // Ensure again, created must be false now + _, created, err = col.EnsureFullTextIndex(nil, []string{"name"}, options) + if err != nil { + t.Fatalf("Failed to re-create index: %s", describe(err)) + } + if created { + t.Error("Expected created to be false, got true") + } + + // Remove index + if err := idx.Remove(nil); err != nil { + t.Fatalf("Failed to remove index '%s': %s", idx.Name(), describe(err)) + } + + // Index must not exists now + if found, err := col.IndexExists(nil, idx.Name()); err != nil { + t.Fatalf("Failed to check index '%s' exists: %s", idx.Name(), describe(err)) + } else if found { + t.Errorf("Index '%s' does exist, expected it not to exist", idx.Name()) + } + } +} + +// TestEnsureGeoIndex creates a collection with a geo index. +func TestEnsureGeoIndex(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "index_test", nil, t) + + testOptions := []*driver.EnsureGeoIndexOptions{ + nil, + &driver.EnsureGeoIndexOptions{GeoJSON: true}, + &driver.EnsureGeoIndexOptions{GeoJSON: false}, + } + + for i, options := range testOptions { + col := ensureCollection(nil, db, fmt.Sprintf("geo_index_test_%d", i), nil, t) + + idx, created, err := col.EnsureGeoIndex(nil, []string{"name"}, options) + if err != nil { + t.Fatalf("Failed to create new index: %s", describe(err)) + } + if !created { + t.Error("Expected created to be true, got false") + } + + // Index must exists now + if found, err := col.IndexExists(nil, idx.Name()); err != nil { + t.Fatalf("Failed to check index '%s' exists: %s", idx.Name(), describe(err)) + } else if !found { + t.Errorf("Index '%s' does not exist, expected it to exist", idx.Name()) + } + + // Ensure again, created must be false now + _, created, err = col.EnsureGeoIndex(nil, []string{"name"}, options) + if err != nil { + t.Fatalf("Failed to re-create index: %s", describe(err)) + } + if created { + t.Error("Expected created to be false, got true") + } + + // Remove index + if err := idx.Remove(nil); err != nil { + t.Fatalf("Failed to remove index '%s': %s", idx.Name(), describe(err)) + } + + // Index must not exists now + if found, err := col.IndexExists(nil, idx.Name()); err != nil { + t.Fatalf("Failed to check index '%s' exists: %s", idx.Name(), describe(err)) + } else if found { + t.Errorf("Index '%s' does exist, expected it not to exist", idx.Name()) + } + } +} + +// TestEnsureHashIndex creates a collection with a hash index. +func TestEnsureHashIndex(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "index_test", nil, t) + + testOptions := []*driver.EnsureHashIndexOptions{ + nil, + &driver.EnsureHashIndexOptions{Unique: true, Sparse: false}, + &driver.EnsureHashIndexOptions{Unique: true, Sparse: true}, + &driver.EnsureHashIndexOptions{Unique: false, Sparse: false}, + &driver.EnsureHashIndexOptions{Unique: false, Sparse: true}, + } + + for i, options := range testOptions { + col := ensureCollection(nil, db, fmt.Sprintf("hash_index_test_%d", i), nil, t) + + idx, created, err := col.EnsureHashIndex(nil, []string{"name"}, options) + if err != nil { + t.Fatalf("Failed to create new index: %s", describe(err)) + } + if !created { + t.Error("Expected created to be true, got false") + } + + // Index must exists now + if found, err := col.IndexExists(nil, idx.Name()); err != nil { + t.Fatalf("Failed to check index '%s' exists: %s", idx.Name(), describe(err)) + } else if !found { + t.Errorf("Index '%s' does not exist, expected it to exist", idx.Name()) + } + + // Ensure again, created must be false now + _, created, err = col.EnsureHashIndex(nil, []string{"name"}, options) + if err != nil { + t.Fatalf("Failed to re-create index: %s", describe(err)) + } + if created { + t.Error("Expected created to be false, got true") + } + + // Remove index + if err := idx.Remove(nil); err != nil { + t.Fatalf("Failed to remove index '%s': %s", idx.Name(), describe(err)) + } + + // Index must not exists now + if found, err := col.IndexExists(nil, idx.Name()); err != nil { + t.Fatalf("Failed to check index '%s' exists: %s", idx.Name(), describe(err)) + } else if found { + t.Errorf("Index '%s' does exist, expected it not to exist", idx.Name()) + } + } +} + +// TestEnsurePersistentIndex creates a collection with a persistent index. +func TestEnsurePersistentIndex(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "index_test", nil, t) + + testOptions := []*driver.EnsurePersistentIndexOptions{ + nil, + &driver.EnsurePersistentIndexOptions{Unique: true, Sparse: false}, + &driver.EnsurePersistentIndexOptions{Unique: true, Sparse: true}, + &driver.EnsurePersistentIndexOptions{Unique: false, Sparse: false}, + &driver.EnsurePersistentIndexOptions{Unique: false, Sparse: true}, + } + + for i, options := range testOptions { + col := ensureCollection(nil, db, fmt.Sprintf("persistent_index_test_%d", i), nil, t) + + idx, created, err := col.EnsurePersistentIndex(nil, []string{"age", "name"}, options) + if err != nil { + t.Fatalf("Failed to create new index: %s", describe(err)) + } + if !created { + t.Error("Expected created to be true, got false") + } + + // Index must exists now + if found, err := col.IndexExists(nil, idx.Name()); err != nil { + t.Fatalf("Failed to check index '%s' exists: %s", idx.Name(), describe(err)) + } else if !found { + t.Errorf("Index '%s' does not exist, expected it to exist", idx.Name()) + } + + // Ensure again, created must be false now + _, created, err = col.EnsurePersistentIndex(nil, []string{"age", "name"}, options) + if err != nil { + t.Fatalf("Failed to re-create index: %s", describe(err)) + } + if created { + t.Error("Expected created to be false, got true") + } + + // Remove index + if err := idx.Remove(nil); err != nil { + t.Fatalf("Failed to remove index '%s': %s", idx.Name(), describe(err)) + } + + // Index must not exists now + if found, err := col.IndexExists(nil, idx.Name()); err != nil { + t.Fatalf("Failed to check index '%s' exists: %s", idx.Name(), describe(err)) + } else if found { + t.Errorf("Index '%s' does exist, expected it not to exist", idx.Name()) + } + } +} + +// TestEnsureSkipListIndex creates a collection with a skiplist index. +func TestEnsureSkipListIndex(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "index_test", nil, t) + + testOptions := []*driver.EnsureSkipListIndexOptions{ + nil, + &driver.EnsureSkipListIndexOptions{Unique: true, Sparse: false}, + &driver.EnsureSkipListIndexOptions{Unique: true, Sparse: true}, + &driver.EnsureSkipListIndexOptions{Unique: false, Sparse: false}, + &driver.EnsureSkipListIndexOptions{Unique: false, Sparse: true}, + } + + for i, options := range testOptions { + col := ensureCollection(nil, db, fmt.Sprintf("skiplist_index_test_%d", i), nil, t) + + idx, created, err := col.EnsureSkipListIndex(nil, []string{"name", "title"}, options) + if err != nil { + t.Fatalf("Failed to create new index: %s", describe(err)) + } + if !created { + t.Error("Expected created to be true, got false") + } + + // Index must exists now + if found, err := col.IndexExists(nil, idx.Name()); err != nil { + t.Fatalf("Failed to check index '%s' exists: %s", idx.Name(), describe(err)) + } else if !found { + t.Errorf("Index '%s' does not exist, expected it to exist", idx.Name()) + } + + // Ensure again, created must be false now + _, created, err = col.EnsureSkipListIndex(nil, []string{"name", "title"}, options) + if err != nil { + t.Fatalf("Failed to re-create index: %s", describe(err)) + } + if created { + t.Error("Expected created to be false, got true") + } + + // Remove index + if err := idx.Remove(nil); err != nil { + t.Fatalf("Failed to remove index '%s': %s", idx.Name(), describe(err)) + } + + // Index must not exists now + if found, err := col.IndexExists(nil, idx.Name()); err != nil { + t.Fatalf("Failed to check index '%s' exists: %s", idx.Name(), describe(err)) + } else if found { + t.Errorf("Index '%s' does exist, expected it not to exist", idx.Name()) + } + } +} diff --git a/deps/github.com/arangodb/go-driver/test/indexes_test.go b/deps/github.com/arangodb/go-driver/test/indexes_test.go new file mode 100644 index 000000000..c5ee73903 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/indexes_test.go @@ -0,0 +1,186 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestCreateFullTextIndex creates a collection with a full text index. +func TestIndexes(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "index_test", nil, t) + col := ensureCollection(nil, db, "indexes_test", nil, t) + + // Create some indexes + if _, _, err := col.EnsureFullTextIndex(nil, []string{"name"}, nil); err != nil { + t.Fatalf("Failed to create new index: %s", describe(err)) + } + if _, _, err := col.EnsureHashIndex(nil, []string{"age", "gender"}, nil); err != nil { + t.Fatalf("Failed to create new index: %s", describe(err)) + } + + // Get list of indexes + if idxs, err := col.Indexes(context.Background()); err != nil { + t.Fatalf("Failed to get indexes: %s", describe(err)) + } else { + if len(idxs) != 3 { + // We made 2 indexes, 1 is always added by the system + t.Errorf("Expected 3 indexes, got %d", len(idxs)) + } + + // Try opening the indexes 1 by 1 + for _, x := range idxs { + if idx, err := col.Index(nil, x.Name()); err != nil { + t.Errorf("Failed to open index '%s': %s", x.Name(), describe(err)) + } else if idx.Name() != x.Name() { + t.Errorf("Got different index name. Expected '%s', got '%s'", x.Name(), idx.Name()) + } + } + } + + // Check index count + if stats, err := col.Statistics(nil); err != nil { + t.Fatalf("Statistics failed: %s", describe(err)) + } else if stats.Figures.Indexes.Count != 3 { + // 3 because 1 system index + 2 created above + t.Errorf("Expected 3 indexes, got %d", stats.Figures.Indexes.Count) + } +} + +// TestIndexesDeduplicateHash tests no-deduplicate on hash index. +func TestIndexesDeduplicateHash(t *testing.T) { + c := createClientFromEnv(t, true) + version, err := c.Version(nil) + if err != nil { + t.Fatalf("Version failed: %s", describe(err)) + } + isv32p := version.Version.CompareTo("3.2") >= 0 + if !isv32p { + t.Skip("Test requires 3.2") + } else { + db := ensureDatabase(nil, c, "index_test", nil, t) + + { + // Create some indexes with de-duplication off + col := ensureCollection(nil, db, "indexes_hash_deduplicate_false_test", nil, t) + if _, _, err := col.EnsureHashIndex(nil, []string{"tags[*]"}, &driver.EnsureHashIndexOptions{ + Unique: true, + Sparse: false, + NoDeduplicate: true, + }); err != nil { + t.Fatalf("Failed to create new index: %s", describe(err)) + } + + doc := struct { + Tags []string `json:"tags"` + }{ + Tags: []string{"a", "a", "b"}, + } + if _, err := col.CreateDocument(nil, doc); !driver.IsConflict(err) { + t.Errorf("Expected Conflict error, got %s", describe(err)) + } + } + + { + // Create some indexes with de-duplication on + col := ensureCollection(nil, db, "indexes_hash_deduplicate_true_test", nil, t) + if _, _, err := col.EnsureHashIndex(nil, []string{"tags"}, &driver.EnsureHashIndexOptions{ + Unique: true, + Sparse: false, + NoDeduplicate: false, + }); err != nil { + t.Fatalf("Failed to create new index: %s", describe(err)) + } + + doc := struct { + Tags []string `json:"tags"` + }{ + Tags: []string{"a", "a", "b"}, + } + if _, err := col.CreateDocument(nil, doc); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } + } + } +} + +// TestIndexesDeduplicateSkipList tests no-deduplicate on skiplist index. +func TestIndexesDeduplicateSkipList(t *testing.T) { + c := createClientFromEnv(t, true) + version, err := c.Version(nil) + if err != nil { + t.Fatalf("Version failed: %s", describe(err)) + } + isv32p := version.Version.CompareTo("3.2") >= 0 + if !isv32p { + t.Skip("Test requires 3.2") + } else { + db := ensureDatabase(nil, c, "index_test", nil, t) + + { + // Create some indexes with de-duplication off + col := ensureCollection(nil, db, "indexes_skiplist_deduplicate_false_test", nil, t) + if _, _, err := col.EnsureSkipListIndex(nil, []string{"tags[*]"}, &driver.EnsureSkipListIndexOptions{ + Unique: true, + Sparse: false, + NoDeduplicate: true, + }); err != nil { + t.Fatalf("Failed to create new index: %s", describe(err)) + } + + doc := struct { + Tags []string `json:"tags"` + }{ + Tags: []string{"a", "a", "b"}, + } + if _, err := col.CreateDocument(nil, doc); !driver.IsConflict(err) { + t.Errorf("Expected Conflict error, got %s", describe(err)) + } + } + + { + // Create some indexes with de-duplication on + col := ensureCollection(nil, db, "indexes_skiplist_deduplicate_true_test", nil, t) + if _, _, err := col.EnsureSkipListIndex(nil, []string{"tags"}, &driver.EnsureSkipListIndexOptions{ + Unique: true, + Sparse: false, + NoDeduplicate: false, + }); err != nil { + t.Fatalf("Failed to create new index: %s", describe(err)) + } + + doc := struct { + Tags []string `json:"tags"` + }{ + Tags: []string{"a", "a", "b"}, + } + if _, err := col.CreateDocument(nil, doc); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } + } + } +} diff --git a/deps/github.com/arangodb/go-driver/test/query_test.go b/deps/github.com/arangodb/go-driver/test/query_test.go new file mode 100644 index 000000000..80134d0aa --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/query_test.go @@ -0,0 +1,119 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "testing" +) + +type validateQueryTest struct { + Query string + ExpectSuccess bool +} + +// TestValidateQuery validates several AQL queries. +func TestValidateQuery(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "validate_query_test", nil, t) + + // Create data set + collectionData := map[string][]interface{}{ + "books": []interface{}{ + Book{Title: "Book 01"}, + Book{Title: "Book 02"}, + Book{Title: "Book 03"}, + Book{Title: "Book 04"}, + Book{Title: "Book 05"}, + Book{Title: "Book 06"}, + Book{Title: "Book 07"}, + Book{Title: "Book 08"}, + Book{Title: "Book 09"}, + Book{Title: "Book 10"}, + Book{Title: "Book 11"}, + Book{Title: "Book 12"}, + Book{Title: "Book 13"}, + Book{Title: "Book 14"}, + Book{Title: "Book 15"}, + Book{Title: "Book 16"}, + Book{Title: "Book 17"}, + Book{Title: "Book 18"}, + Book{Title: "Book 19"}, + Book{Title: "Book 20"}, + }, + "users": []interface{}{ + UserDoc{Name: "John", Age: 13}, + UserDoc{Name: "Jake", Age: 25}, + UserDoc{Name: "Clair", Age: 12}, + UserDoc{Name: "Johnny", Age: 42}, + UserDoc{Name: "Blair", Age: 67}, + }, + } + for colName, colDocs := range collectionData { + col := ensureCollection(ctx, db, colName, nil, t) + if _, _, err := col.CreateDocuments(ctx, colDocs); err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + } + + // Setup tests + tests := []validateQueryTest{ + validateQueryTest{ + Query: "FOR d IN books SORT d.Title RETURN d", + ExpectSuccess: true, + }, + validateQueryTest{ + Query: "FOR d IN books FILTER d.Title==@title SORT d.Title RETURN d", + ExpectSuccess: true, + }, + validateQueryTest{ + Query: "FOR u IN users FILTER u.age>>>100 SORT u.name RETURN u", + ExpectSuccess: false, + }, + validateQueryTest{ + Query: "", + ExpectSuccess: false, + }, + /*validateQueryTest{ + Query: "FOR u IN unknown RETURN u", + ExpectSuccess: false, + },*/ + } + + // Run tests for every context alternative + for i, test := range tests { + err := db.ValidateQuery(ctx, test.Query) + if test.ExpectSuccess { + if err != nil { + t.Errorf("Expected success in query %d (%s), got '%s'", i, test.Query, describe(err)) + continue + } + } else { + if err == nil { + t.Errorf("Expected error in query %d (%s), got '%s'", i, test.Query, describe(err)) + continue + } + } + } +} diff --git a/deps/github.com/arangodb/go-driver/test/replication_test.go b/deps/github.com/arangodb/go-driver/test/replication_test.go new file mode 100644 index 000000000..9e64961ae --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/replication_test.go @@ -0,0 +1,68 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestReplicationDatabaseInventory tests the Replication.DatabaseInventory method. +func TestReplicationDatabaseInventory(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + if _, err := c.Cluster(ctx); err == nil { + // Cluster, not supported for this test + t.Skip("Skipping in cluster") + } else if !driver.IsPreconditionFailed(err) { + t.Errorf("Failed to query cluster: %s", describe(err)) + } else { + // Single server (what we need) + rep := c.Replication() + db, err := c.Database(ctx, "_system") + if err != nil { + t.Fatalf("Failed to open _system database: %s", describe(err)) + } + inv, err := rep.DatabaseInventory(ctx, db) + if err != nil { + t.Fatalf("DatabaseInventory failed: %s", describe(err)) + } + if len(inv.Collections) == 0 { + t.Error("Expected multiple collections, got 0") + } + foundSystemCol := false + for _, col := range inv.Collections { + if col.Parameters.Name == "" { + t.Error("Expected non-empty name") + } + if col.Parameters.IsSystem { + foundSystemCol = true + } + } + if !foundSystemCol { + t.Error("Expected multiple system collections, found none") + } + } +} diff --git a/deps/github.com/arangodb/go-driver/test/server_mode_auth_test.go b/deps/github.com/arangodb/go-driver/test/server_mode_auth_test.go new file mode 100644 index 000000000..ed38131c7 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/server_mode_auth_test.go @@ -0,0 +1,160 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// +build auth + +package test + +import ( + "context" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestServerModeAndGrants checks user access grants in combination with +// server mode and WithConfigured. +func TestServerModeAndGrants(t *testing.T) { + c := createClientFromEnv(t, true) + ctx := context.Background() + + version, err := c.Version(nil) + if err != nil { + t.Fatalf("Version failed: %s", describe(err)) + } + isv33p := version.Version.CompareTo("3.3") >= 0 + if !isv33p { + t.Skip("This test requires version 3.3") + } else { + // Get root user + u, err := c.User(ctx, "root") + if err != nil { + t.Fatalf("User('root') failed: %s", describe(err)) + } + + // Initial server mode must be default + if mode, err := c.ServerMode(ctx); err != nil { + t.Fatalf("ServerMode failed: %s", describe(err)) + } else if mode != driver.ServerModeDefault { + t.Errorf("ServerMode returned '%s', but expected '%s'", mode, driver.ServerModeDefault) + } + + // Create simple collection + db := ensureDatabase(ctx, c, "_system", nil, t) + colName := "server_mode_and_grants_test1" + col := ensureCollection(ctx, db, colName, nil, t) + + // Get database & collection access + defaultDBAccess, err := u.GetDatabaseAccess(ctx, db) + if err != nil { + t.Fatalf("GetDatabaseAccess failed: %s", describe(err)) + } + defaultColAccess, err := u.GetCollectionAccess(ctx, col) + if err != nil { + t.Fatalf("GetCollectionAccess failed: %s", describe(err)) + } + + // Get database & collection access using WithConfigured + if grant, err := u.GetDatabaseAccess(driver.WithConfigured(ctx), db); err != nil { + t.Fatalf("GetDatabaseAccess(WithConfigured) failed: %s", describe(err)) + } else if grant != defaultDBAccess { + t.Errorf("Database access using WithConfigured differs, got '%s', expected '%s'", grant, defaultDBAccess) + } + if grant, err := u.GetCollectionAccess(driver.WithConfigured(ctx), col); err != nil { + t.Fatalf("GetCollectionAccess(WithConfigured) failed: %s", describe(err)) + } else if grant != defaultDBAccess { + t.Errorf("Collection access using WithConfigured differs, got '%s', expected '%s'", grant, defaultColAccess) + } + + // Change server mode to readonly. + if err := c.SetServerMode(ctx, driver.ServerModeReadOnly); err != nil { + t.Fatalf("SetServerMode failed: %s", describe(err)) + } + + // Check server mode, must be readonly + if mode, err := c.ServerMode(ctx); err != nil { + t.Fatalf("ServerMode failed: %s", describe(err)) + } else if mode != driver.ServerModeReadOnly { + t.Errorf("ServerMode returned '%s', but expected '%s'", mode, driver.ServerModeReadOnly) + } + + // Get database & collection access now (must be readonly) + if grant, err := u.GetDatabaseAccess(ctx, db); err != nil { + t.Fatalf("GetDatabaseAccess failed: %s", describe(err)) + } else if grant != driver.GrantReadOnly { + t.Errorf("Database access must be readonly, got '%s'", grant) + } + if grant, err := u.GetCollectionAccess(ctx, col); err != nil { + t.Fatalf("GetCollectionAccess failed: %s", describe(err)) + } else if grant != driver.GrantReadOnly { + t.Errorf("Collection access must be readonly, got '%s'", grant) + } + + // Get database & collection access using WithConfigured (must be same as before) + if grant, err := u.GetDatabaseAccess(driver.WithConfigured(ctx), db); err != nil { + t.Fatalf("GetDatabaseAccess(WithConfigured) failed: %s", describe(err)) + } else if grant != defaultDBAccess { + t.Errorf("Database access using WithConfigured differs, got '%s', expected '%s'", grant, defaultDBAccess) + } + if grant, err := u.GetCollectionAccess(driver.WithConfigured(ctx), col); err != nil { + t.Fatalf("GetCollectionAccess(WithConfigured) failed: %s", describe(err)) + } else if grant != defaultDBAccess { + t.Errorf("Collection access using WithConfigured differs, got '%s', expected '%s'", grant, defaultColAccess) + } + + // Change server mode back to default. + if err := c.SetServerMode(ctx, driver.ServerModeDefault); err != nil { + t.Fatalf("SetServerMode failed: %s", describe(err)) + } + + // Initial server mode must be default + if mode, err := c.ServerMode(ctx); err != nil { + t.Fatalf("ServerMode failed: %s", describe(err)) + } else if mode != driver.ServerModeDefault { + t.Errorf("ServerMode returned '%s', but expected '%s'", mode, driver.ServerModeDefault) + } + + // Get database & collection access (must now be same as before) + if grant, err := u.GetDatabaseAccess(ctx, db); err != nil { + t.Fatalf("GetDatabaseAccess failed: %s", describe(err)) + } else if grant != defaultDBAccess { + t.Errorf("Database access differs, got '%s', expected '%s'", grant, defaultDBAccess) + } + if grant, err := u.GetCollectionAccess(ctx, col); err != nil { + t.Fatalf("GetCollectionAccess failed: %s", describe(err)) + } else if grant != defaultDBAccess { + t.Errorf("Collection access differs, got '%s', expected '%s'", grant, defaultColAccess) + } + + // Get database & collection access with WithConfigured (must now be same as before) + if grant, err := u.GetDatabaseAccess(driver.WithConfigured(ctx), db); err != nil { + t.Fatalf("GetDatabaseAccess(WithConfigured) failed: %s", describe(err)) + } else if grant != defaultDBAccess { + t.Errorf("Database access using WithConfigured differs, got '%s', expected '%s'", grant, defaultDBAccess) + } + if grant, err := u.GetCollectionAccess(driver.WithConfigured(ctx), col); err != nil { + t.Fatalf("GetCollectionAccess(WithConfigured) failed: %s", describe(err)) + } else if grant != defaultDBAccess { + t.Errorf("Collection access using WithConfigured differs, got '%s', expected '%s'", grant, defaultColAccess) + } + } +} diff --git a/deps/github.com/arangodb/go-driver/test/server_mode_test.go b/deps/github.com/arangodb/go-driver/test/server_mode_test.go new file mode 100644 index 000000000..aca0fa701 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/server_mode_test.go @@ -0,0 +1,84 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestServerMode creates a database and checks the various server modes. +func TestServerMode(t *testing.T) { + c := createClientFromEnv(t, true) + ctx := context.Background() + + version, err := c.Version(nil) + if err != nil { + t.Fatalf("Version failed: %s", describe(err)) + } + isv33p := version.Version.CompareTo("3.3") >= 0 + if !isv33p { + t.Skip("This test requires version 3.3") + } else { + // Create simple collection + db := ensureDatabase(ctx, c, "_system", nil, t) + colName := "server_mode_test1" + col := ensureCollection(ctx, db, colName, nil, t) + + // Initial server mode must be default + if mode, err := c.ServerMode(ctx); err != nil { + t.Fatalf("ServerMode failed: %s", describe(err)) + } else if mode != driver.ServerModeDefault { + t.Errorf("ServerMode returned '%s', but expected '%s'", mode, driver.ServerModeDefault) + } + + // Change server mode to readonly. + if err := c.SetServerMode(ctx, driver.ServerModeReadOnly); err != nil { + t.Fatalf("SetServerMode failed: %s", describe(err)) + } + + // Try to drop collection now (it must fail) + if err := col.Remove(ctx); !driver.IsForbidden(err) { + t.Fatalf("Collection remove should have return ForbiddenError, got error %s", describe(err)) + } + + // Check server mode, must be readonly + if mode, err := c.ServerMode(ctx); err != nil { + t.Fatalf("ServerMode failed: %s", describe(err)) + } else if mode != driver.ServerModeReadOnly { + t.Errorf("ServerMode returned '%s', but expected '%s'", mode, driver.ServerModeReadOnly) + } + + // Change server mode back to default. + if err := c.SetServerMode(ctx, driver.ServerModeDefault); err != nil { + t.Fatalf("SetServerMode failed: %s", describe(err)) + } + + // Try to drop collection now (it must succeed) + if err := col.Remove(ctx); err != nil { + t.Fatalf("Collection remove failed: %s", describe(err)) + } + } +} diff --git a/deps/github.com/arangodb/go-driver/test/types.go b/deps/github.com/arangodb/go-driver/test/types.go new file mode 100644 index 000000000..474237e84 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/types.go @@ -0,0 +1,68 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +type UserDoc struct { + Name string `json:"name"` + Age int `json:"age"` +} + +type UserDocWithKey struct { + Key string `json:"_key"` + Name string `json:"name"` + Age int `json:"age"` +} + +type Account struct { + ID string `json:"id"` + User *UserDoc `json:"user"` +} + +type Book struct { + Title string +} + +type RouteEdge struct { + From string `json:"_from,omitempty"` + To string `json:"_to,omitempty"` + Distance int `json:"distance,omitempty"` +} + +type RouteEdgeWithKey struct { + Key string `json:"_key"` + From string `json:"_from,omitempty"` + To string `json:"_to,omitempty"` + Distance int `json:"distance,omitempty"` +} + +type RelationEdge struct { + From string `json:"_from,omitempty"` + To string `json:"_to,omitempty"` + Type string `json:"type,omitempty"` +} + +type AccountEdge struct { + From string `json:"_from,omitempty"` + To string `json:"_to,omitempty"` + User *UserDoc `json:"user"` +} diff --git a/deps/github.com/arangodb/go-driver/test/user_auth_test.go b/deps/github.com/arangodb/go-driver/test/user_auth_test.go new file mode 100644 index 000000000..29db97099 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/user_auth_test.go @@ -0,0 +1,558 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// +build auth + +package test + +import ( + "context" + "testing" + "time" + + driver "github.com/arangodb/go-driver" +) + +// TestUpdateUserPasswordMyself creates a user and tries to update the password of the authenticated user. +func TestUpdateUserPasswordMyself(t *testing.T) { + var conn driver.Connection + c := createClientFromEnv(t, true, &conn) + version, err := c.Version(nil) + if err != nil { + t.Fatalf("Version failed: %s", describe(err)) + } + isv32p := version.Version.CompareTo("3.2") >= 0 + isVST1_0 := conn.Protocols().Contains(driver.ProtocolVST1_0) + ensureUser(nil, c, "user@TestUpdateUserPasswordMyself", &driver.UserOptions{Password: "foo"}, t) + + authClient, err := driver.NewClient(driver.ClientConfig{ + Connection: createConnectionFromEnv(t), + Authentication: driver.BasicAuthentication("user@TestUpdateUserPasswordMyself", "foo"), + }) + if err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + + if isVST1_0 && !isv32p { + t.Skip("Cannot update my own password using VST in 3.1") + } else { + u, err := authClient.User(nil, "user@TestUpdateUserPasswordMyself") + if err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + if err := u.Update(context.TODO(), driver.UserOptions{Password: "something"}); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } + } +} + +// TestUpdateUserPasswordOtherUser creates a user and tries to update the password of another user. +func TestUpdateUserPasswordOtherUser(t *testing.T) { + var conn driver.Connection + c := createClientFromEnv(t, true, &conn) + version, err := c.Version(nil) + if err != nil { + t.Fatalf("Version failed: %s", describe(err)) + } + isv32p := version.Version.CompareTo("3.2") >= 0 + isVST1_0 := conn.Protocols().Contains(driver.ProtocolVST1_0) + u1 := ensureUser(nil, c, "user1", &driver.UserOptions{Password: "foo"}, t) + ensureUser(nil, c, "user2", nil, t) + systemDb, err := c.Database(nil, "_system") + if err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + + authClient, err := driver.NewClient(driver.ClientConfig{ + Connection: createConnectionFromEnv(t), + Authentication: driver.BasicAuthentication("user1", "foo"), + }) + if err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + + if isVST1_0 && !isv32p { + t.Skip("Cannot update other password using VST in 3.1") + } else { + // Right now user1 has no right to access user2 + if _, err := authClient.User(nil, "user2"); !driver.IsForbidden(err) { + t.Fatalf("Expected ForbiddenError, got %s", describe(err)) + } + + // Grant user1 access to _system db, then it should be able to access user2 + if err := u1.SetDatabaseAccess(nil, systemDb, driver.GrantReadWrite); err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + + // Now change the password of another user. + // With user1 having rights for _system, this must succeed now + u2, err := authClient.User(nil, "user2") + if err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + if err := u2.Update(context.TODO(), driver.UserOptions{Password: "something"}); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } + } +} + +// TestGrantUserDatabase creates a user & database and granting the user access to the database. +func TestGrantUserDatabase(t *testing.T) { + c := createClientFromEnv(t, true) + version, err := c.Version(nil) + if err != nil { + t.Fatalf("Version failed: %s", describe(err)) + } + isv32p := version.Version.CompareTo("3.2") >= 0 + u := ensureUser(nil, c, "grant_user1", &driver.UserOptions{Password: "foo"}, t) + db := ensureDatabase(nil, c, "grant_user_test", nil, t) + + // Grant read/write access + if err := u.SetDatabaseAccess(nil, db, driver.GrantReadWrite); err != nil { + t.Fatalf("SetDatabaseAccess failed: %s", describe(err)) + } + if isv32p { + // Read back access + if grant, err := u.GetDatabaseAccess(nil, db); err != nil { + t.Fatalf("GetDatabaseAccess failed: %s", describe(err)) + } else if grant != driver.GrantReadWrite { + t.Errorf("Database access invalid, expected 'rw', got '%s'", grant) + } + } + + authClient, err := driver.NewClient(driver.ClientConfig{ + Connection: createConnectionFromEnv(t), + Authentication: driver.BasicAuthentication("grant_user1", "foo"), + }) + if err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + + // Try to create a collection in the db + authDb, err := authClient.Database(nil, "grant_user_test") + if err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + if _, err := authDb.CreateCollection(nil, "some_collection", nil); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } + + // Now revoke access + if err := u.SetDatabaseAccess(nil, db, driver.GrantNone); err != nil { + t.Fatalf("SetDatabaseAccess failed: %s", describe(err)) + } + if isv32p { + // Read back access + if grant, err := u.GetDatabaseAccess(nil, db); err != nil { + t.Fatalf("GetDatabaseAccess failed: %s", describe(err)) + } else if grant != driver.GrantNone { + t.Errorf("Database access invalid, expected 'none', got '%s'", grant) + } + } + + // Try to access the db, should fail now + if _, err := authClient.Database(nil, "grant_user_test"); !driver.IsUnauthorized(err) { + t.Errorf("Expected UnauthorizedError, got %s %#v", describe(err), err) + } + + if isv32p { + // Now grant read-only access + if err := u.SetDatabaseAccess(nil, db, driver.GrantReadOnly); err != nil { + t.Fatalf("SetDatabaseAccess failed: %s", describe(err)) + } + // Read back access + if grant, err := u.GetDatabaseAccess(nil, db); err != nil { + t.Fatalf("GetDatabaseAccess failed: %s", describe(err)) + } else if grant != driver.GrantReadOnly { + t.Errorf("Database access invalid, expected 'ro', got '%s'", grant) + } + // Try to access the db, should succeed + if _, err := authClient.Database(nil, "grant_user_test"); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } + // Try to create another collection, should fail + if _, err := authDb.CreateCollection(nil, "some_other_collection", nil); !driver.IsForbidden(err) { + t.Errorf("Expected UnauthorizedError, got %s %#v", describe(err), err) + } + } else { + t.Logf("SetDatabaseAccess(ReadOnly) is not supported on versions below 3.2 (got version %s)", version.Version) + } +} + +// TestGrantUserDefaultDatabase creates a user & database and granting the user access to the "default" database. +func TestGrantUserDefaultDatabase(t *testing.T) { + c := createClientFromEnv(t, true) + version, err := c.Version(nil) + if err != nil { + t.Fatalf("Version failed: %s", describe(err)) + } + isv32p := version.Version.CompareTo("3.2") >= 0 + if !isv32p { + t.Skipf("This test requires 3.2 or higher, got %s", version.Version) + } + + // We skip this test until Feb-1 + startTestDate := time.Date(2018, time.February, 1, 0, 0, 0, 0, time.UTC) + if time.Now().Before(startTestDate) { + t.Skipf("This test is skipped until %s", startTestDate) + } + + u := ensureUser(nil, c, "grant_user_def", &driver.UserOptions{Password: "foo"}, t) + db := ensureDatabase(nil, c, "grant_user_def_test", nil, t) + // Grant read/write access to default database + if err := u.SetDatabaseAccess(nil, nil, driver.GrantReadWrite); err != nil { + t.Fatalf("SetDatabaseAccess failed: %s", describe(err)) + } + // Read back default database access + if grant, err := u.GetDatabaseAccess(nil, nil); err != nil { + t.Fatalf("GetDatabaseAccess failed: %s", describe(err)) + } else if grant != driver.GrantReadWrite { + t.Errorf("Collection access invalid, expected 'rw', got '%s'", grant) + } + + authClient, err := driver.NewClient(driver.ClientConfig{ + Connection: createConnectionFromEnv(t), + Authentication: driver.BasicAuthentication(u.Name(), "foo"), + }) + if err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + + // Try to create a collection in the db, should succeed + authDb, err := authClient.Database(nil, db.Name()) + if err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + + authCol, err := authDb.CreateCollection(nil, "books_def_db", nil) + if err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + + // Remove explicit grant for db + if err := u.RemoveDatabaseAccess(nil, db); err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + + // Remove explicit grant for col + if err := u.RemoveCollectionAccess(nil, authCol); err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + + // wait for change to propagate (TODO add a check to the coordinators) + time.Sleep(time.Second * 5) + + // Try to create document in collection, should fail because there are no collection grants for this user and/or collection. + if _, err := authCol.CreateDocument(nil, Book{Title: "I cannot write"}); !driver.IsForbidden(err) { + t.Errorf("Expected failure, got %s", describe(err)) + } + + // Grant read-only access to default database + if err := u.SetDatabaseAccess(nil, nil, driver.GrantReadOnly); err != nil { + t.Fatalf("SetDatabaseAccess failed: %s", describe(err)) + } + // Try to create collection, should fail + if _, err := authDb.CreateCollection(nil, "books_def_ro_db", nil); !driver.IsForbidden(err) { + t.Errorf("Expected failure, got %s", describe(err)) + } + + // Grant no access to default database + if err := u.SetDatabaseAccess(nil, nil, driver.GrantNone); err != nil { + t.Fatalf("SetDatabaseAccess failed: %s", describe(err)) + } + // Try to create collection, should fail + if _, err := authDb.CreateCollection(nil, "books_def_none_db", nil); !driver.IsUnauthorized(err) { + t.Errorf("Expected failure, got %s", describe(err)) + } + + // Remove default database access, should fallback to "no-access" then + if err := u.RemoveDatabaseAccess(nil, nil); err != nil { + t.Fatalf("RemoveDatabaseAccess failed: %s", describe(err)) + } + // Try to create collection, should fail + if _, err := authDb.CreateCollection(nil, "books_def_star_db", nil); !driver.IsUnauthorized(err) { + t.Errorf("Expected failure, got %s", describe(err)) + } +} + +// TestGrantUserCollection creates a user & database & collection and granting the user access to the collection. +func TestGrantUserCollection(t *testing.T) { + c := createClientFromEnv(t, true) + version, err := c.Version(nil) + if err != nil { + t.Fatalf("Version failed: %s", describe(err)) + } + isv32p := version.Version.CompareTo("3.2") >= 0 + if !isv32p { + t.Skipf("This test requires 3.2 or higher, got %s", version.Version) + } + + // We skip this test until Feb-1 + startTestDate := time.Date(2018, time.February, 1, 0, 0, 0, 0, time.UTC) + if time.Now().Before(startTestDate) { + t.Skipf("This test is skipped until %s", startTestDate) + } + + u := ensureUser(nil, c, "grant_user_col", &driver.UserOptions{Password: "foo"}, t) + db := ensureDatabase(nil, c, "grant_user_col_test", nil, t) + // Grant read/write access to database + if err := u.SetDatabaseAccess(nil, db, driver.GrantReadWrite); err != nil { + t.Fatalf("SetDatabaseAccess failed: %s", describe(err)) + } + col := ensureCollection(nil, db, "grant_col_test", nil, t) + // Grant read/write access to collection + if err := u.SetCollectionAccess(nil, col, driver.GrantReadWrite); err != nil { + t.Fatalf("SetCollectionAccess failed: %s", describe(err)) + } + // Read back collection access + if grant, err := u.GetCollectionAccess(nil, col); err != nil { + t.Fatalf("GetCollectionAccess failed: %s", describe(err)) + } else if grant != driver.GrantReadWrite { + t.Errorf("Collection access invalid, expected 'rw', got '%s'", grant) + } + + authClient, err := driver.NewClient(driver.ClientConfig{ + Connection: createConnectionFromEnv(t), + Authentication: driver.BasicAuthentication("grant_user_col", "foo"), + }) + if err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + + // Try to create a document in the col + authDb, err := authClient.Database(nil, db.Name()) + if err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + authCol, err := authDb.Collection(nil, col.Name()) + if err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + meta1, err := authCol.CreateDocument(nil, Book{Title: "I can write"}) + if err != nil { + t.Errorf("CreateDocument failed: %s", describe(err)) + } + + // Now set collection access to Read-only + if err := u.SetCollectionAccess(nil, col, driver.GrantReadOnly); err != nil { + t.Fatalf("SetCollectionAccess failed: %s", describe(err)) + } + // Read back collection access + if grant, err := u.GetCollectionAccess(nil, col); err != nil { + t.Fatalf("GetCollectionAccess failed: %s", describe(err)) + } else if grant != driver.GrantReadOnly { + t.Errorf("Collection access invalid, expected 'ro', got '%s'", grant) + } + // Try to create another document, should fail + if _, err := authCol.CreateDocument(nil, Book{Title: "I should not be able to write"}); !driver.IsForbidden(err) { + t.Errorf("Expected failure, got: %s", describe(err)) + } + // Try to read back first document, should succeed + var doc Book + if _, err := authCol.ReadDocument(nil, meta1.Key, &doc); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } + + // Now set collection access to None + if err := u.SetCollectionAccess(nil, col, driver.GrantNone); err != nil { + t.Fatalf("SetCollectionAccess failed: %s", describe(err)) + } + // Read back collection access + if grant, err := u.GetCollectionAccess(nil, col); err != nil { + t.Fatalf("GetCollectionAccess failed: %s", describe(err)) + } else if grant != driver.GrantNone { + t.Errorf("Collection access invalid, expected 'none', got '%s'", grant) + } + // Try to create another document, should fail + if _, err := authCol.CreateDocument(nil, Book{Title: "I should not be able to write"}); !driver.IsForbidden(err) { + t.Errorf("Expected failure, got: %s", describe(err)) + } + // Try to read back first document, should fail + if _, err := authCol.ReadDocument(nil, meta1.Key, &doc); !driver.IsForbidden(err) { + t.Errorf("Expected failure, got %s", describe(err)) + } + + // Now remove explicit collection access + if err := u.RemoveCollectionAccess(nil, col); err != nil { + t.Fatalf("RemoveCollectionAccess failed: %s", describe(err)) + } + // Read back collection access + if grant, err := u.GetCollectionAccess(nil, col); err != nil { + t.Fatalf("GetCollectionAccess failed: %s", describe(err)) + } else if grant != driver.GrantNone { + t.Errorf("Collection access invalid, expected 'none', got '%s'", grant) + } + // Try to create another document, should fail + if _, err := authCol.CreateDocument(nil, Book{Title: "I should not be able to write"}); !driver.IsForbidden(err) { + t.Errorf("Expected failure, got: %s", describe(err)) + } + // Try to read back first document, should fail + if _, err := authCol.ReadDocument(nil, meta1.Key, &doc); !driver.IsForbidden(err) { + t.Errorf("Expected failure, got %s", describe(err)) + } + + // Set default collection access to read-only + if err := u.SetCollectionAccess(nil, db, driver.GrantReadOnly); err != nil { + t.Fatalf("SetCollectionAccess failed: %s", describe(err)) + } + if err := u.RemoveCollectionAccess(nil, col); err != nil { + t.Fatalf("RemoveCollectionAccess failed: %s", describe(err)) + } + // Read back collection access + if grant, err := u.GetCollectionAccess(nil, col); err != nil { + t.Fatalf("GetCollectionAccess failed: %s", describe(err)) + } else if grant != driver.GrantReadOnly { + t.Errorf("Collection access invalid, expected 'ro', got '%s'", grant) + } + // Try to create another document, should fail + if _, err := authCol.CreateDocument(nil, Book{Title: "I should not be able to write"}); !driver.IsForbidden(err) { + t.Errorf("Expected failure, got: %s", describe(err)) + } + // Try to read back first document, should succeed + if _, err := authCol.ReadDocument(nil, meta1.Key, &doc); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } + + // Set default collection access to read-write + if err := u.SetCollectionAccess(nil, db, driver.GrantReadWrite); err != nil { + t.Fatalf("SetCollectionAccess failed: %s", describe(err)) + } + // Read back collection access + if grant, err := u.GetCollectionAccess(nil, col); err != nil { + t.Fatalf("GetCollectionAccess failed: %s", describe(err)) + } else if grant != driver.GrantReadWrite { + t.Errorf("Collection access invalid, expected 'rw', got '%s'", grant) + } + // Try to create another document, should succeed + if _, err := authCol.CreateDocument(nil, Book{Title: "I should again be able to write"}); err != nil { + t.Errorf("Expected success, got: %s", describe(err)) + } + // Try to read back first document, should succeed + if _, err := authCol.ReadDocument(nil, meta1.Key, &doc); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } +} + +// TestUserAccessibleDatabases creates a user & databases and checks the list of accessible databases. +func TestUserAccessibleDatabases(t *testing.T) { + c := createClientFromEnv(t, true) + version, err := c.Version(nil) + if err != nil { + t.Fatalf("Version failed: %s", describe(err)) + } + isv32p := version.Version.CompareTo("3.2") >= 0 + u := ensureUser(nil, c, "accessible_db_user1", nil, t) + db1 := ensureDatabase(nil, c, "accessible_db1", nil, t) + db2 := ensureDatabase(nil, c, "accessible_db2", nil, t) + + contains := func(list []driver.Database, name string) bool { + for _, db := range list { + if db.Name() == name { + return true + } + } + return false + } + + expectListContains := func(listName string, list []driver.Database, name ...string) { + for _, n := range name { + if !contains(list, n) { + t.Errorf("Expected list '%s' to contain '%s', it did not", listName, n) + } + } + } + + expectListNotContains := func(listName string, list []driver.Database, name ...string) { + for _, n := range name { + if contains(list, n) { + t.Errorf("Expected list '%s' to not contain '%s', it did", listName, n) + } + } + } + + // Nothing allowed yet + list, err := u.AccessibleDatabases(nil) + if err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + expectListContains("expect-none", list) + expectListNotContains("expect-none", list, db1.Name(), db2.Name()) + + // Allow db1 + if err := u.SetDatabaseAccess(nil, db1, driver.GrantReadWrite); err != nil { + t.Fatalf("SetDatabaseAccess failed: %s", describe(err)) + } + + list, err = u.AccessibleDatabases(nil) + if err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + expectListContains("expect-db1", list, db1.Name()) + expectListNotContains("expect-db1", list, db2.Name()) + + // allow db2, revoke db1 + if err := u.SetDatabaseAccess(nil, db2, driver.GrantReadWrite); err != nil { + t.Fatalf("SetDatabaseAccess(RW) failed: %s", describe(err)) + } + if err := u.SetDatabaseAccess(nil, db1, driver.GrantNone); err != nil { + t.Fatalf("SetDatabaseAccess(None) failed: %s", describe(err)) + } + + if isv32p { + list, err = u.AccessibleDatabases(nil) + if err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + expectListContains("expect-db2", list, db2.Name()) + expectListNotContains("expect-db2", list, db1.Name()) + + // revoke db2 + if err := u.SetDatabaseAccess(nil, db2, driver.GrantNone); err != nil { + t.Fatalf("SetDatabaseAccess(None) failed: %s", describe(err)) + } + + list, err = u.AccessibleDatabases(nil) + if err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + expectListContains("expect-none2", list) + expectListNotContains("expect-none2", list, db1.Name(), db2.Name()) + + // grant read-only access to db1, db2 + if err := u.SetDatabaseAccess(nil, db1, driver.GrantReadOnly); err != nil { + t.Fatalf("SetDatabaseAccess(RO) failed: %s", describe(err)) + } + if err := u.SetDatabaseAccess(nil, db2, driver.GrantReadOnly); err != nil { + t.Fatalf("SetDatabaseAccess(RO) failed: %s", describe(err)) + } + + list, err = u.AccessibleDatabases(nil) + if err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + expectListContains("expect-db1-db2", list, db1.Name(), db2.Name()) + expectListNotContains("expect-db1-db2", list) + + } else { + t.Logf("Last part of test fails on version < 3.2 (got version %s)", version.Version) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/user_test.go b/deps/github.com/arangodb/go-driver/test/user_test.go new file mode 100644 index 000000000..c643cc89f --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/user_test.go @@ -0,0 +1,238 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "encoding/json" + "reflect" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// ensureUser is a helper to check if a user exists and create it if needed. +// It will fail the test when an error occurs. +func ensureUser(ctx context.Context, c driver.Client, name string, options *driver.UserOptions, t *testing.T) driver.User { + u, err := c.User(ctx, name) + if driver.IsNotFound(err) { + u, err = c.CreateUser(ctx, name, options) + if err != nil { + t.Fatalf("Failed to create user '%s': %s", name, describe(err)) + } + } else if err != nil { + t.Fatalf("Failed to open user '%s': %s", name, describe(err)) + } + return u +} + +// TestCreateUser creates a user and then checks that it exists. +func TestCreateUser(t *testing.T) { + c := createClientFromEnv(t, true) + + tests := map[string]*driver.UserOptions{ + "jan1": nil, + "george": &driver.UserOptions{Password: "foo", Active: boolRef(false)}, + "candy": &driver.UserOptions{Password: "ARANGODB_DEFAULT_ROOT_PASSWORD", Active: boolRef(true)}, + "joe": &driver.UserOptions{Extra: map[string]interface{}{"key": "value", "x": 5}}, + // Some strange names + "ewout/foo": nil, + "admin@api": nil, + "測試用例": nil, + "測試用例@foo": nil, + "_": nil, + //" ": nil, // No longer valid in 3.2 + "/": nil, + } + + for name, options := range tests { + if _, err := c.CreateUser(nil, name, options); err != nil { + t.Fatalf("Failed to create user '%s': %s", name, describe(err)) + } + // User must exist now + if found, err := c.UserExists(nil, name); err != nil { + t.Errorf("UserExists('%s') failed: %s", name, describe(err)) + } else if !found { + t.Errorf("UserExists('%s') return false, expected true", name) + } + + // Must be able to open user + if u, err := c.User(nil, name); err != nil { + t.Errorf("Failed to open user '%s': %s", name, describe(err)) + } else { + if u.Name() != name { + t.Errorf("Invalid name, expected '%s', got '%s'", name, u.Name()) + } + if options != nil { + if options.Active != nil { + if u.IsActive() != *options.Active { + t.Errorf("Invalid active, expected '%v', got '%v'", *options.Active, u.IsActive()) + } + } + var extra map[string]interface{} + if err := u.Extra(&extra); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } else { + if options.Extra == nil { + if len(extra) != 0 { + t.Errorf("Invalid extra, expected 'nil', got '%+v'", extra) + } + } else { + expected, _ := json.Marshal(options.Extra) + got, _ := json.Marshal(extra) + if string(expected) != string(got) { + t.Errorf("Invalid extra, expected '%s', got '%s'", string(expected), string(got)) + } + } + } + } + if u.IsPasswordChangeNeeded() != false { + t.Errorf("Invalid passwordChangeNeeded, expected 'false', got '%v'", u.IsPasswordChangeNeeded()) + } + } + + // Create again (must fail) + if _, err := c.CreateUser(nil, name, options); !driver.IsConflict(err) { + t.Fatalf("Expected ConflictError, got %s", describe(err)) + } + } + + // Fetch all users + users, err := c.Users(nil) + if err != nil { + t.Fatalf("Failed to fetch users: %s", describe(err)) + } + for userName := range tests { + foundUser := false + for _, u := range users { + if u.Name() == userName { + foundUser = true + break + } + } + if !foundUser { + t.Errorf("Cannot find user '%s'", userName) + } + } + + // Now remove the users + for userName := range tests { + u, err := c.User(nil, userName) + if err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } else { + if err := u.Remove(context.Background()); err != nil { + t.Errorf("Failed to remove user '%s': %s", userName, describe(err)) + } + + // User must no longer exist + if found, err := c.UserExists(nil, userName); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } else if found { + t.Errorf("Expected user '%s' to be NOT found, but it was found", userName) + } + } + } +} + +// TestUpdateUser creates a user and performs various updates. +func TestUpdateUser(t *testing.T) { + c := createClientFromEnv(t, true) + u := ensureUser(nil, c, "update_user", nil, t) + + if err := u.Update(context.TODO(), driver.UserOptions{}); err != nil { + t.Errorf("Cannot update user with empty options: %s", describe(err)) + } + + if u.IsActive() != true { + t.Errorf("Expected IsActive to be true, got false") + } + if err := u.Update(context.TODO(), driver.UserOptions{ + Active: boolRef(false), + }); err != nil { + t.Errorf("Cannot update user with Active in options: %s", describe(err)) + } + if u.IsActive() != false { + t.Errorf("Expected IsActive to be false, got true") + } + + if err := u.Update(context.TODO(), driver.UserOptions{ + Active: boolRef(true), + }); err != nil { + t.Errorf("Cannot update user with Active in options: %s", describe(err)) + } + if u.IsActive() != true { + t.Errorf("Expected IsActive to be true, got false") + } + + book := Book{Title: "Testing is fun"} + if err := u.Update(context.TODO(), driver.UserOptions{ + Extra: book, + }); err != nil { + t.Errorf("Cannot update user with Extra in options: %s", describe(err)) + } + var readBook Book + if err := u.Extra(&readBook); err != nil { + t.Errorf("Failed to read extra: %s", describe(err)) + } else if !reflect.DeepEqual(book, readBook) { + t.Errorf("Extra differs; expected '%+v', got '%+v'", book, readBook) + } +} + +// TestReplaceUser creates a user and performs various replacements. +func TestReplaceUser(t *testing.T) { + c := createClientFromEnv(t, true) + u := ensureUser(nil, c, "replace_user", nil, t) + + if err := u.Replace(context.TODO(), driver.UserOptions{}); err != nil { + t.Errorf("Cannot replace user with empty options: %s", describe(err)) + } + + if u.IsActive() != true { + t.Errorf("Expected IsActive to be true, got false") + } + if err := u.Replace(context.TODO(), driver.UserOptions{ + Active: boolRef(false), + }); err != nil { + t.Errorf("Cannot replace user with Active in options: %s", describe(err)) + } + if u.IsActive() != false { + t.Errorf("Expected IsActive to be false, got true") + } + + book := Book{Title: "Testing is fun"} + if err := u.Replace(context.TODO(), driver.UserOptions{ + Extra: book, + }); err != nil { + t.Errorf("Cannot replace user with Extra in options: %s", describe(err)) + } + var readBook Book + if err := u.Extra(&readBook); err != nil { + t.Errorf("Failed to read extra: %s", describe(err)) + } else if !reflect.DeepEqual(book, readBook) { + t.Errorf("Extra differs; expected '%+v', got '%+v'", book, readBook) + } + if u.IsActive() != true { + t.Errorf("Expected IsActive to be true, got false") + } +} diff --git a/deps/github.com/arangodb/go-driver/test/util.go b/deps/github.com/arangodb/go-driver/test/util.go new file mode 100644 index 000000000..d764d8ce4 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/util.go @@ -0,0 +1,84 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "testing" + + driver "github.com/arangodb/go-driver" +) + +type testEnv interface { + Error(message ...interface{}) + Errorf(format string, args ...interface{}) + Fatal(message ...interface{}) + Fatalf(format string, args ...interface{}) + Log(message ...interface{}) + Logf(format string, args ...interface{}) +} + +// boolRef returns a reference to a given boolean +func boolRef(v bool) *bool { + return &v +} + +// assertOK fails the test if the given error is not nil. +func assertOK(err error, t *testing.T) { + if err != nil { + t.Fatalf("Assertion failed: %s", describe(err)) + } +} + +// describe returns a string description of the given error. +func describe(err error) string { + if err == nil { + return "nil" + } + cause := driver.Cause(err) + var msg string + if re, ok := cause.(*driver.ResponseError); ok { + msg = re.Error() + } else { + c, _ := json.Marshal(cause) + msg = string(c) + } + if cause.Error() != err.Error() { + return fmt.Sprintf("%v caused by %v (%v)", err, cause, msg) + } else { + return fmt.Sprintf("%v (%v)", err, msg) + } +} + +func formatRawResponse(raw []byte) string { + l := len(raw) + if l < 2 { + return hex.EncodeToString(raw) + } + if (raw[0] == '{' && raw[l-1] == '}') || (raw[0] == '[' && raw[l-1] == ']') { + return string(raw) + } + return hex.EncodeToString(raw) +} diff --git a/deps/github.com/arangodb/go-driver/test/version_test.go b/deps/github.com/arangodb/go-driver/test/version_test.go new file mode 100644 index 000000000..202eb05a4 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/version_test.go @@ -0,0 +1,90 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestVersion tests Version functions. +func TestVersion(t *testing.T) { + + tests := []struct { + Input driver.Version + Major int + Minor int + Sub string + SubInt int + SubIsInt bool + }{ + {"1.2.3", 1, 2, "3", 3, true}, + {"", 0, 0, "", 0, false}, + {"1.2.3a", 1, 2, "3a", 0, false}, + {"13.12", 13, 12, "", 0, false}, + } + + for _, test := range tests { + if v := test.Input.Major(); v != test.Major { + t.Errorf("Major failed for '%s', expected %d, got %d", test.Input, test.Major, v) + } + if v := test.Input.Minor(); v != test.Minor { + t.Errorf("Minor failed for '%s', expected %d, got %d", test.Input, test.Minor, v) + } + if v := test.Input.Sub(); v != test.Sub { + t.Errorf("Sub failed for '%s', expected '%s', got '%s'", test.Input, test.Sub, v) + } + if v, vIsInt := test.Input.SubInt(); vIsInt != test.SubIsInt || v != test.SubInt { + t.Errorf("SubInt failed for '%s', expected (%d,%v), got (%d,%v)", test.Input, test.SubInt, test.SubIsInt, v, vIsInt) + } + } +} + +// TestVersionCompareTo tests Version.CompareTo. +func TestVersionCompareTo(t *testing.T) { + tests := []struct { + A driver.Version + B driver.Version + Result int + }{ + {"1.2.3", "1.2.3", 0}, + {"1.2", "1.2.3", -1}, + {"1.2.3", "2.3.5", -1}, + {"1.2", "1.1", 1}, + {"1.2.3", "1.1.7", 1}, + {"2.2", "1.2.a", 1}, + {"1", "1.2.3", -1}, + {"1.2.a", "1.2.3", 1}, + {"1.2.3", "1.2.a", -1}, + {"", "", 0}, + {"1", "1", 0}, + {"2.1", "2.1", 0}, + } + + for _, test := range tests { + if r := test.A.CompareTo(test.B); r != test.Result { + t.Errorf("CompareTo('%s', '%s') failed, expected %d, got %d", test.A, test.B, test.Result, r) + } + } +} diff --git a/deps/github.com/arangodb/go-driver/test/vertex_collection_test.go b/deps/github.com/arangodb/go-driver/test/vertex_collection_test.go new file mode 100644 index 000000000..2287cd43a --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/vertex_collection_test.go @@ -0,0 +1,135 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// ensureVertexCollection returns the vertex collection with given name, creating it if needed. +func ensureVertexCollection(ctx context.Context, g driver.Graph, collection string, t *testing.T) driver.Collection { + ec, err := g.VertexCollection(ctx, collection) + if driver.IsNotFound(err) { + ec, err := g.CreateVertexCollection(ctx, collection) + if err != nil { + t.Fatalf("Failed to create vertex collection: %s", describe(err)) + } + return ec + } else if err != nil { + t.Fatalf("Failed to open vertex collection: %s", describe(err)) + } + return ec +} + +// TestCreateVertexCollection creates a graph and then adds a vertex collection in it +func TestCreateVertexCollection(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "vertex_collection_test", nil, t) + name := "test_create_vertex_collection" + g, err := db.CreateGraph(nil, name, nil) + if err != nil { + t.Fatalf("Failed to create graph '%s': %s", name, describe(err)) + } + + // List vertex collections, must be empty + if list, err := g.VertexCollections(nil); err != nil { + t.Errorf("VertexCollections failed: %s", describe(err)) + } else if len(list) > 0 { + t.Errorf("VertexCollections return %d vertex collections, expected 0", len(list)) + } + + // Now create a vertex collection + if vc, err := g.CreateVertexCollection(nil, "person"); err != nil { + t.Errorf("CreateVertexCollection failed: %s", describe(err)) + } else if vc.Name() != "person" { + t.Errorf("Invalid name, expected 'person', got '%s'", vc.Name()) + } + + // List vertex collections, must be contain 'person' + if list, err := g.VertexCollections(nil); err != nil { + t.Errorf("VertexCollections failed: %s", describe(err)) + } else if len(list) != 1 { + t.Errorf("VertexCollections return %d vertex collections, expected 1", len(list)) + } else if list[0].Name() != "person" { + t.Errorf("Invalid list[0].name, expected 'person', got '%s'", list[0].Name()) + } + + // Person vertex collection must exits + if found, err := g.VertexCollectionExists(nil, "person"); err != nil { + t.Errorf("VertexCollectionExists failed: %s", describe(err)) + } else if !found { + t.Errorf("VertexCollectionExists return false, expected true") + } + + // Open person vertex collection must exits + if vc, err := g.VertexCollection(nil, "person"); err != nil { + t.Errorf("VertexCollection failed: %s", describe(err)) + } else if vc.Name() != "person" { + t.Errorf("VertexCollection return invalid collection, expected 'person', got '%s'", vc.Name()) + } +} + +// TestRemoveVertexCollection creates a graph and then adds an vertex collection in it and then removes the vertex collection. +func TestRemoveVertexCollection(t *testing.T) { + c := createClientFromEnv(t, true) + db := ensureDatabase(nil, c, "vertex_collection_test", nil, t) + name := "test_remove_vertex_collection" + g, err := db.CreateGraph(nil, name, nil) + if err != nil { + t.Fatalf("Failed to create graph '%s': %s", name, describe(err)) + } + + // Now create an vertex collection + vc, err := g.CreateVertexCollection(nil, "friends") + if err != nil { + t.Errorf("CreateVertexCollection failed: %s", describe(err)) + } else if vc.Name() != "friends" { + t.Errorf("Invalid name, expected 'friends', got '%s'", vc.Name()) + } + + // Friends vertex collection must exits + if found, err := g.VertexCollectionExists(nil, "friends"); err != nil { + t.Errorf("VertexCollectionExists failed: %s", describe(err)) + } else if !found { + t.Errorf("VertexCollectionExists return false, expected true") + } + + // Remove vertex collection + if err := vc.Remove(nil); err != nil { + t.Errorf("Remove failed: %s", describe(err)) + } + + // Friends vertex collection must NOT exits + if found, err := g.VertexCollectionExists(nil, "friends"); err != nil { + t.Errorf("VertexCollectionExists failed: %s", describe(err)) + } else if found { + t.Errorf("VertexCollectionExists return true, expected false") + } + + // Collection must still exist in database + assertCollection(nil, db, "friends", t) + +} diff --git a/deps/github.com/arangodb/go-driver/test/vertex_create_test.go b/deps/github.com/arangodb/go-driver/test/vertex_create_test.go new file mode 100644 index 000000000..0035f0736 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/vertex_create_test.go @@ -0,0 +1,126 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "reflect" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestCreateVertex creates an vertex and then checks that it exists. +func TestCreateVertex(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "create_vertex_test", nil, t) + vc := ensureVertexCollection(ctx, g, "books", t) + + book := Book{Title: "Graphs are cool"} + meta, err := vc.CreateDocument(ctx, book) + if err != nil { + t.Fatalf("Failed to create new vertex: %s", describe(err)) + } + // Document must exists now + if found, err := vc.DocumentExists(nil, meta.Key); err != nil { + t.Fatalf("DocumentExists failed for '%s': %s", meta.Key, describe(err)) + } else if !found { + t.Errorf("DocumentExists returned false for '%s', expected true", meta.Key) + } + + // Read document + var readDoc Book + if _, err := vc.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read vertex '%s': %s", meta.Key, describe(err)) + } else { + if !reflect.DeepEqual(book, readDoc) { + t.Errorf("Got invalid document. Expected '%+v', got '%+v'", book, readDoc) + } + } +} + +// TestCreateVertexReturnNew creates a document and checks the document returned in in ReturnNew. +func TestCreateVertexReturnNew(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2365 + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "create_vertex_return_new_est", nil, t) + vc := ensureVertexCollection(ctx, g, "users", t) + + doc := UserDoc{ + Name: "Fern", + Age: 31, + } + var newDoc UserDoc + meta, err := vc.CreateDocument(driver.WithReturnNew(ctx, &newDoc), doc) + if err != nil { + t.Fatalf("Failed to create new vertex: %s", describe(err)) + } + // NewDoc must equal doc + if !reflect.DeepEqual(doc, newDoc) { + t.Errorf("Got wrong ReturnNew document. Expected %+v, got %+v", doc, newDoc) + } + // Document must exists now + var readDoc UserDoc + if _, err := vc.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + if !reflect.DeepEqual(doc, readDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", doc, readDoc) + } +} + +// TestCreateVertexSilent creates a document with WithSilent. +func TestCreateVertexSilent(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "create_vertex_silent_test", nil, t) + vc := ensureVertexCollection(ctx, g, "users", t) + + doc := UserDoc{ + Name: "Fern", + Age: 31, + } + if meta, err := vc.CreateDocument(driver.WithSilent(ctx), doc); err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } else if meta.Key != "" { + t.Errorf("Expected empty meta, got %v", meta) + } +} + +// TestCreateVertexNil creates a document with a nil document. +func TestCreateVertexNil(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "create_vertex_nil_test", nil, t) + vc := ensureVertexCollection(ctx, g, "users", t) + + if _, err := vc.CreateDocument(nil, nil); !driver.IsInvalidArgument(err) { + t.Fatalf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/vertex_remove_test.go b/deps/github.com/arangodb/go-driver/test/vertex_remove_test.go new file mode 100644 index 000000000..8c628737e --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/vertex_remove_test.go @@ -0,0 +1,172 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestRemoveVertex creates a document, remove it and then checks the removal has succeeded. +func TestRemoveVertex(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "remove_vertex_test", nil, t) + vc := ensureVertexCollection(ctx, g, "users", t) + + doc := UserDoc{ + Name: "Jones", + Age: 65, + } + meta, err := vc.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + if _, err := vc.RemoveDocument(ctx, meta.Key); err != nil { + t.Fatalf("Failed to remove document '%s': %s", meta.Key, describe(err)) + } + // Should not longer exist + var readDoc UserDoc + if _, err := vc.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Fatalf("Expected NotFoundError, got %s", describe(err)) + } + + // Document must not exist now + if found, err := vc.DocumentExists(nil, meta.Key); err != nil { + t.Fatalf("DocumentExists failed for '%s': %s", meta.Key, describe(err)) + } else if found { + t.Errorf("DocumentExists returned true for '%s', expected false", meta.Key) + } +} + +// TestRemoveVertexReturnOld creates a document, removes it checks the ReturnOld value. +func TestRemoveVertexReturnOld(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2365 + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "remove_vertex_returnOld_test", nil, t) + vc := ensureVertexCollection(ctx, g, "books", t) + + doc := Book{ + Title: "Testing 101", + } + meta, err := vc.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + var old Book + ctx = driver.WithReturnOld(ctx, &old) + if _, err := vc.RemoveDocument(ctx, meta.Key); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestRemoveVertexSilent creates a document, removes it with Silent() and then checks the meta is indeed empty. +func TestRemoveVertexSilent(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "remove_vertex_silent_test", nil, t) + vc := ensureVertexCollection(ctx, g, "books", t) + + doc := Book{ + Title: "Shhh...", + } + meta, err := vc.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + ctx = driver.WithSilent(ctx) + if rmeta, err := vc.RemoveDocument(ctx, meta.Key); err != nil { + t.Fatalf("Failed to remove document '%s': %s", meta.Key, describe(err)) + } else if rmeta.Key != "" { + t.Errorf("Expected empty meta, got %v", rmeta) + } + // Should not longer exist + var readDoc Book + if _, err := vc.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Fatalf("Expected NotFoundError, got %s", describe(err)) + } +} + +// TestRemoveVertexRevision creates a document, removes it with an incorrect revision. +func TestRemoveVertexRevision(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "remove_vertex_revision_test", nil, t) + vc := ensureVertexCollection(ctx, g, "persons", t) + + doc := UserDoc{ + Name: "Dude", + Age: 12, + } + meta, err := vc.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + + // Replace the document to get another revision + replacement := Book{ + Title: "The only way is change", + } + meta2, err := vc.ReplaceDocument(ctx, meta.Key, replacement) + if err != nil { + t.Fatalf("Failed to replace document '%s': %s", meta.Key, describe(err)) + } + + // Try to remove document with initial revision (must fail) + initialRevCtx := driver.WithRevision(ctx, meta.Rev) + if _, err := vc.RemoveDocument(initialRevCtx, meta.Key); !driver.IsPreconditionFailed(err) { + t.Fatalf("Expected PreconditionFailedError, got %s", describe(err)) + } + + // Try to remove document with correct revision (must succeed) + replacedRevCtx := driver.WithRevision(ctx, meta2.Rev) + if _, err := vc.RemoveDocument(replacedRevCtx, meta.Key); err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } + + // Should not longer exist + var readDoc Book + if _, err := vc.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Fatalf("Expected NotFoundError, got %s", describe(err)) + } +} + +// TestRemoveVertexKeyEmpty removes a document it with an empty key. +func TestRemoveVertexKeyEmpty(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "remove_vertex_nil_test", nil, t) + vc := ensureVertexCollection(ctx, g, "hobby", t) + + if _, err := vc.RemoveDocument(nil, ""); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/vertex_replace_test.go b/deps/github.com/arangodb/go-driver/test/vertex_replace_test.go new file mode 100644 index 000000000..acd4bb243 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/vertex_replace_test.go @@ -0,0 +1,231 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "reflect" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestReplaceVertex creates a document, replaces it and then checks the replacement has succeeded. +func TestReplaceVertex(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "replace_vertex_test", nil, t) + vc := ensureVertexCollection(ctx, g, "friend", t) + + doc := UserDoc{ + Name: "Bunny", + Age: 82, + } + meta, err := vc.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Replacement doc + replacement := Book{ + Title: "Old is nice", + } + if _, err := vc.ReplaceDocument(ctx, meta.Key, replacement); err != nil { + t.Fatalf("Failed to replace document '%s': %s", meta.Key, describe(err)) + } + // Read replaces document + var readDoc Book + if _, err := vc.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + if !reflect.DeepEqual(replacement, readDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", replacement, readDoc) + } +} + +// TestReplaceVertexReturnOld creates a document, replaces it checks the ReturnOld value. +func TestReplaceVertexReturnOld(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2365 + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "replace_vertex_returnOld_test", nil, t) + vc := ensureVertexCollection(ctx, g, "books", t) + + doc := Book{ + Title: "Who goes there", + } + meta, err := vc.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Replace document + replacement := UserDoc{ + Name: "Ghost", + Age: 1011, + } + var old Book + ctx = driver.WithReturnOld(ctx, &old) + if _, err := vc.ReplaceDocument(ctx, meta.Key, replacement); err != nil { + t.Fatalf("Failed to replace document '%s': %s", meta.Key, describe(err)) + } + // Check old document + if !reflect.DeepEqual(doc, old) { + t.Errorf("Got wrong document. Expected %+v, got %+v", doc, old) + } +} + +// TestReplaceVertexReturnNew creates a document, replaces it checks the ReturnNew value. +func TestReplaceVertexReturnNew(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2365 + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "replace_vertex_returnNew_test", nil, t) + vc := ensureVertexCollection(ctx, g, "users", t) + + doc := UserDoc{ + Name: "Mark", + Age: 51, + } + meta, err := vc.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + replacement := Book{ + Title: "How to win elections", + } + var newDoc Book + ctx = driver.WithReturnNew(ctx, &newDoc) + if _, err := vc.ReplaceDocument(ctx, meta.Key, replacement); err != nil { + t.Fatalf("Failed to replace document '%s': %s", meta.Key, describe(err)) + } + // Check new document + expected := replacement + if !reflect.DeepEqual(expected, newDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", expected, newDoc) + } +} + +// TestReplaceVertexSilent creates a document, replaces it with Silent() and then checks the meta is indeed empty. +func TestReplaceVertexSilent(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "replace_vertex_returnNew_test", nil, t) + vc := ensureVertexCollection(ctx, g, "person", t) + + doc := UserDoc{ + Name: "Janna", + } + meta, err := vc.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + replacement := UserDoc{ + Name: "Boeda", + } + ctx = driver.WithSilent(ctx) + if meta, err := vc.ReplaceDocument(ctx, meta.Key, replacement); err != nil { + t.Fatalf("Failed to replace document '%s': %s", meta.Key, describe(err)) + } else if meta.Key != "" { + t.Errorf("Expected empty meta, got %v", meta) + } +} + +// TestReplaceVertexRevision creates a document, replaces it with a specific (correct) revision. +// Then it attempts a replacement with an incorrect revision which must fail. +func TestReplaceVertexRevision(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "replace_vertex_revision_test", nil, t) + vc := ensureVertexCollection(ctx, g, "books", t) + + doc := Book{ + Title: "France in spring", + } + meta, err := vc.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + + // Replace document with correct revision + replacement := Book{ + Title: "France in winter", + } + initialRevCtx := driver.WithRevision(ctx, meta.Rev) + var replacedRevCtx context.Context + if meta2, err := vc.ReplaceDocument(initialRevCtx, meta.Key, replacement); err != nil { + t.Fatalf("Failed to replace document '%s': %s", meta.Key, describe(err)) + } else { + replacedRevCtx = driver.WithRevision(ctx, meta2.Rev) + if meta2.Rev == meta.Rev { + t.Errorf("Expected revision to change, got initial revision '%s', replaced revision '%s'", meta.Rev, meta2.Rev) + } + } + + // Replace document with incorrect revision + replacement.Title = "France in fall" + if _, err := vc.ReplaceDocument(initialRevCtx, meta.Key, replacement); !driver.IsPreconditionFailed(err) { + t.Errorf("Expected PreconditionFailedError, got %s", describe(err)) + } + + // Replace document once more with correct revision + replacement.Title = "France in autumn" + if _, err := vc.ReplaceDocument(replacedRevCtx, meta.Key, replacement); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } +} + +// TestReplaceVertexKeyEmpty replaces a document it with an empty key. +func TestReplaceVertexKeyEmpty(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "replace_vertex_keyEmpty_test", nil, t) + vc := ensureVertexCollection(ctx, g, "names", t) + + // Replace document + replacement := map[string]interface{}{ + "name": "Updated", + } + if _, err := vc.ReplaceDocument(nil, "", replacement); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestReplaceVertexUpdateNil replaces a document it with a nil update. +func TestReplaceVertexUpdateNil(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "replace_vertex_updateNil_test", nil, t) + vc := ensureVertexCollection(ctx, g, "names", t) + + if _, err := vc.ReplaceDocument(nil, "validKey", nil); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/vertex_update_test.go b/deps/github.com/arangodb/go-driver/test/vertex_update_test.go new file mode 100644 index 000000000..087424adc --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/vertex_update_test.go @@ -0,0 +1,326 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "reflect" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestUpdateVertex creates a document, updates it and then checks the update has succeeded. +func TestUpdateVertex(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "update_vertex_test", nil, t) + vc := ensureVertexCollection(ctx, g, "user", t) + + doc := UserDoc{ + Name: "Francis", + Age: 51, + } + meta, err := vc.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + update := map[string]interface{}{ + "age": 55, + } + if _, err := vc.UpdateDocument(ctx, meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } + // Read updated document + var readDoc UserDoc + if _, err := vc.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + doc.Age = 55 + if !reflect.DeepEqual(doc, readDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", doc, readDoc) + } +} + +// TestUpdateVertexReturnOld creates a document, updates it checks the ReturnOld value. +func TestUpdateVertexReturnOld(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2365 + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "update_vertex_returnOld_test", nil, t) + vc := ensureVertexCollection(ctx, g, "books", t) + + doc := Book{ + Title: "Hello", + } + meta, err := vc.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + update := map[string]interface{}{ + "Title": "Goodbye", + } + var old Book + ctx = driver.WithReturnOld(ctx, &old) + if _, err := vc.UpdateDocument(ctx, meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } + // Check old document + if !reflect.DeepEqual(doc, old) { + t.Errorf("Got wrong document. Expected %+v, got %+v", doc, old) + } +} + +// TestUpdateVertexReturnNew creates a document, updates it checks the ReturnNew value. +func TestUpdateVertexReturnNew(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2365 + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "update_vertex_returnNew_test", nil, t) + vc := ensureVertexCollection(ctx, g, "person", t) + + doc := UserDoc{ + Name: "Bertha", + Age: 31, + } + meta, err := vc.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + update := map[string]interface{}{ + "age": 45, + } + var newDoc UserDoc + ctx = driver.WithReturnNew(ctx, &newDoc) + if _, err := vc.UpdateDocument(ctx, meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } + // Check new document + expected := doc + expected.Age = 45 + if !reflect.DeepEqual(expected, newDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", expected, newDoc) + } +} + +// TestUpdateVertexKeepNullTrue creates a document, updates it with KeepNull(true) and then checks the update has succeeded. +func TestUpdateVertexKeepNullTrue(t *testing.T) { + var ctx context.Context + var conn driver.Connection + c := createClientFromEnv(t, true, &conn) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "update_vertex_keepNullTrue_test", nil, t) + vc := ensureVertexCollection(ctx, g, "accounts", t) + + doc := Account{ + ID: "store1", + User: &UserDoc{ + "Mathilda", + 45, + }, + } + meta, err := vc.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + update := map[string]interface{}{ + "id": "foo", + "user": nil, + } + if _, err := vc.UpdateDocument(driver.WithKeepNull(ctx, true), meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } + // Read updated document + var readDoc map[string]interface{} + var rawResponse []byte + ctx = driver.WithRawResponse(ctx, &rawResponse) + if _, err := vc.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + // We parse to this type of map, since unmarshalling nil values to a map of type map[string]interface{} + // will cause the entry to be deleted. + var jsonMap map[string]*driver.RawObject + if err := conn.Unmarshal(rawResponse, &jsonMap); err != nil { + t.Fatalf("Failed to parse raw response: %s", describe(err)) + } + // Get "vertex" field and unmarshal it + if raw, found := jsonMap["vertex"]; !found { + t.Errorf("Expected vertex to be found but got not found") + } else { + jsonMap = nil + if err := conn.Unmarshal(*raw, &jsonMap); err != nil { + t.Fatalf("Failed to parse raw vertex object: %s", describe(err)) + } + if raw, found := jsonMap["user"]; !found { + t.Errorf("Expected user to be found but got not found") + } else if raw != nil { + t.Errorf("Expected user to be found and nil, got %s", string(*raw)) + } + } +} + +// TestUpdateVertexKeepNullFalse creates a document, updates it with KeepNull(false) and then checks the update has succeeded. +func TestUpdateVertexKeepNullFalse(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "update_vertex_keepNullFalse_test", nil, t) + vc := ensureVertexCollection(ctx, g, "accounts", t) + + doc := Account{ + ID: "Nullify", + User: &UserDoc{ + "Mathilda", + 45, + }, + } + meta, err := vc.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + update := map[string]interface{}{ + "id": "another", + "user": nil, + } + if _, err := vc.UpdateDocument(driver.WithKeepNull(ctx, false), meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } + // Read updated document + readDoc := doc + if _, err := vc.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + if readDoc.User == nil { + t.Errorf("Expected user to be untouched, got %v", readDoc.User) + } +} + +// TestUpdateVertexSilent creates a document, updates it with Silent() and then checks the meta is indeed empty. +func TestUpdateVertexSilent(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "update_vertex_silent_test", nil, t) + vc := ensureVertexCollection(ctx, g, "moments", t) + + doc := Book{ + Title: "Enjoy the silence", + } + meta, err := vc.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + // Update document + update := map[string]interface{}{ + "Title": "No more noise", + } + ctx = driver.WithSilent(ctx) + if meta, err := vc.UpdateDocument(ctx, meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } else if meta.Key != "" { + t.Errorf("Expected empty meta, got %v", meta) + } +} + +// TestUpdateVertexRevision creates a document, updates it with a specific (correct) revision. +// Then it attempts an update with an incorrect revision which must fail. +func TestUpdateVertexRevision(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "update_vertex_revision_test", nil, t) + vc := ensureVertexCollection(ctx, g, "books", t) + + doc := Book{ + Title: "Rev1", + } + meta, err := vc.CreateDocument(ctx, doc) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } + + // Update document with correct revision + update := map[string]interface{}{ + "Title": "Rev2", + } + initialRevCtx := driver.WithRevision(ctx, meta.Rev) + var updatedRevCtx context.Context + if meta2, err := vc.UpdateDocument(initialRevCtx, meta.Key, update); err != nil { + t.Fatalf("Failed to update document '%s': %s", meta.Key, describe(err)) + } else { + updatedRevCtx = driver.WithRevision(ctx, meta2.Rev) + if meta2.Rev == meta.Rev { + t.Errorf("Expected revision to change, got initial revision '%s', updated revision '%s'", meta.Rev, meta2.Rev) + } + } + + // Update document with incorrect revision + update["Title"] = "Rev3" + if _, err := vc.UpdateDocument(initialRevCtx, meta.Key, update); !driver.IsPreconditionFailed(err) { + t.Errorf("Expected PreconditionFailedError, got %s", describe(err)) + } + + // Update document once more with correct revision + update["Title"] = "Rev4" + if _, err := vc.UpdateDocument(updatedRevCtx, meta.Key, update); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } +} + +// TestUpdateVertexKeyEmpty updates a document it with an empty key. +func TestUpdateVertexKeyEmpty(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "update_vertex_keyEmpty_test", nil, t) + vc := ensureVertexCollection(ctx, g, "tests", t) + + // Update document + update := map[string]interface{}{ + "name": "Updated", + } + if _, err := vc.UpdateDocument(nil, "", update); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestUpdateVertexUpdateNil updates a document it with a nil update. +func TestUpdateVertexUpdateNil(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertex_test", nil, t) + g := ensureGraph(ctx, db, "update_vertex_updateNil_test", nil, t) + vc := ensureVertexCollection(ctx, g, "errors", t) + + if _, err := vc.UpdateDocument(nil, "validKey", nil); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/vertices_create_test.go b/deps/github.com/arangodb/go-driver/test/vertices_create_test.go new file mode 100644 index 000000000..db10b5745 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/vertices_create_test.go @@ -0,0 +1,180 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "reflect" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestCreateVertices creates documents and then checks that it exists. +func TestCreateVertices(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "create_vertices_test", nil, t) + vc := ensureVertexCollection(ctx, g, "books", t) + + docs := []Book{ + Book{ + Title: "Book1", + }, + Book{ + Title: "Book2", + }, + Book{ + Title: "Book3", + }, + } + metas, errs, err := vc.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if len(metas) != len(docs) { + t.Errorf("Expected %d metas, got %d", len(docs), len(metas)) + } else { + for i := 0; i < len(docs); i++ { + if err := errs[i]; err != nil { + t.Errorf("Expected no error at index %d, got %s", i, describe(err)) + } + + // Document must exists now + var readDoc Book + if _, err := vc.ReadDocument(nil, metas[i].Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", metas[i].Key, describe(err)) + } + if !reflect.DeepEqual(docs[i], readDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", docs[i], readDoc) + } + } + } +} + +// TestCreateVerticesReturnNew creates documents and checks the document returned in in ReturnNew. +func TestCreateVerticesReturnNew(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2365 + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "create_vertices_returnNew_test", nil, t) + vc := ensureVertexCollection(ctx, g, "books", t) + + docs := []Book{ + Book{ + Title: "Book1", + }, + Book{ + Title: "Book2", + }, + Book{ + Title: "Book3", + }, + } + newDocs := make([]Book, len(docs)) + metas, errs, err := vc.CreateDocuments(driver.WithReturnNew(ctx, newDocs), docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if len(metas) != len(docs) { + t.Errorf("Expected %d metas, got %d", len(docs), len(metas)) + } else { + for i := 0; i < len(docs); i++ { + if err := errs[i]; err != nil { + t.Errorf("Expected no error at index %d, got %s", i, describe(err)) + } + // NewDoc must equal doc + if !reflect.DeepEqual(docs[i], newDocs[i]) { + t.Errorf("Got wrong ReturnNew document. Expected %+v, got %+v", docs[i], newDocs[i]) + } + // Document must exists now + var readDoc Book + if _, err := vc.ReadDocument(ctx, metas[i].Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", metas[i].Key, describe(err)) + } + if !reflect.DeepEqual(docs[i], readDoc) { + t.Errorf("Got wrong document. Expected %+v, got %+v", docs[i], readDoc) + } + } + } +} + +// TestCreateVerticesSilent creates documents with WithSilent. +func TestCreateVerticesSilent(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "create_vertices_silent_test", nil, t) + vc := ensureVertexCollection(ctx, g, "users", t) + + docs := []UserDoc{ + UserDoc{ + Name: "Jan", + Age: 12, + }, + UserDoc{ + Name: "Piet", + Age: 2, + }, + } + if metas, errs, err := vc.CreateDocuments(driver.WithSilent(ctx), docs); err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else { + if len(metas) != 0 { + t.Errorf("Expected 0 metas, got %d", len(metas)) + } + if len(errs) != 0 { + t.Errorf("Expected 0 errors, got %d", len(errs)) + } + } +} + +// TestCreateVerticesNil creates multiple documents with a nil documents input. +func TestCreateVerticesNil(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "create_vertices_nil_test", nil, t) + vc := ensureVertexCollection(ctx, g, "rivers", t) + if _, _, err := vc.CreateDocuments(nil, nil); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestCreateVerticesNonSlice creates multiple documents with a non-slice documents input. +func TestCreateVerticesNonSlice(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "create_vertices_nonSlice_test", nil, t) + vc := ensureVertexCollection(ctx, g, "failures", t) + + var obj UserDoc + if _, _, err := vc.CreateDocuments(nil, &obj); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } + var m map[string]interface{} + if _, _, err := vc.CreateDocuments(nil, &m); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/vertices_import_test.go b/deps/github.com/arangodb/go-driver/test/vertices_import_test.go new file mode 100644 index 000000000..cc3edfb74 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/vertices_import_test.go @@ -0,0 +1,586 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestImportVerticesWithKeys imports documents and then checks that it exists. +func TestImportVerticesWithKeys(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "import_vertices_test", nil, t) + col := ensureVertexCollection(nil, g, "import_withKeys_test", t) + docs := []UserDocWithKey{ + UserDocWithKey{ + "jan", + "Jan", + 40, + }, + UserDocWithKey{ + "foo", + "Foo", + 41, + }, + UserDocWithKey{ + "frank", + "Frank", + 42, + }, + } + + var raw []byte + ctx = driver.WithRawResponse(ctx, &raw) + stats, err := col.ImportDocuments(ctx, docs, nil) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != int64(len(docs)) { + t.Errorf("Expected %d created documents, got %d (json %s)", len(docs), stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 0 { + t.Errorf("Expected %d error documents, got %d (json %s)", 0, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + } +} + +// TestImportVerticesWithoutKeys imports documents and then checks that it exists. +func TestImportVerticesWithoutKeys(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "import_vertices_test", nil, t) + col := ensureVertexCollection(nil, g, "import_withoutKeys_test", t) + docs := []UserDoc{ + UserDoc{ + "Jan", + 40, + }, + UserDoc{ + "Foo", + 41, + }, + UserDoc{ + "Frank", + 42, + }, + } + + var raw []byte + ctx = driver.WithRawResponse(ctx, &raw) + stats, err := col.ImportDocuments(ctx, docs, nil) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != int64(len(docs)) { + t.Errorf("Expected %d created documents, got %d (json %s)", len(docs), stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 0 { + t.Errorf("Expected %d error documents, got %d (json %s)", 0, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + } +} + +// TestImportVerticesEmptyEntries imports documents and then checks that it exists. +func TestImportVerticesEmptyEntries(t *testing.T) { + if getContentTypeFromEnv(t) == driver.ContentTypeVelocypack { + t.Skip("Not supported on vpack") + } + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "import_vertices_test", nil, t) + col := ensureVertexCollection(nil, g, "import_emptyEntries_test", t) + docs := []*UserDocWithKey{ + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + &UserDocWithKey{ + "foo", + "Foo", + 41, + }, + nil, + &UserDocWithKey{ + "frank", + "Frank", + 42, + }, + } + + var raw []byte + ctx = driver.WithRawResponse(ctx, &raw) + stats, err := col.ImportDocuments(ctx, docs, nil) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != int64(len(docs))-1 { + t.Errorf("Expected %d created documents, got %d (json %s)", len(docs)-1, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 0 { + t.Errorf("Expected %d error documents, got %d (json %s)", 0, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 1 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 1, stats.Empty, formatRawResponse(raw)) + } + } +} + +// TestImportVerticesInvalidEntries imports documents and then checks that it exists. +func TestImportVerticesInvalidEntries(t *testing.T) { + if getContentTypeFromEnv(t) == driver.ContentTypeVelocypack { + t.Skip("Not supported on vpack") + } + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "import_vertices_test", nil, t) + col := ensureVertexCollection(nil, g, "import_invalidEntries_test", t) + docs := []interface{}{ + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + []string{"array", "is", "invalid"}, + &UserDocWithKey{ + "foo", + "Foo", + 41, + }, + "string is not valid", + nil, + &UserDocWithKey{ + "frank", + "Frank", + 42, + }, + } + + var raw []byte + ctx = driver.WithRawResponse(ctx, &raw) + stats, err := col.ImportDocuments(ctx, docs, nil) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != int64(len(docs))-3 { + t.Errorf("Expected %d created documents, got %d (json %s)", len(docs)-3, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 2 { + t.Errorf("Expected %d error documents, got %d (json %s)", 2, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 1 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 1, stats.Empty, formatRawResponse(raw)) + } + } +} + +// TestImportVerticesDuplicateEntries imports documents and then checks that it exists. +func TestImportVerticesDuplicateEntries(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "import_vertices_test", nil, t) + col := ensureVertexCollection(nil, g, "import_duplicateEntries_test", t) + docs := []interface{}{ + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + } + + var raw []byte + ctx = driver.WithRawResponse(ctx, &raw) + stats, err := col.ImportDocuments(ctx, docs, nil) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 1 { + t.Errorf("Expected %d created documents, got %d (json %s)", 1, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 1 { + t.Errorf("Expected %d error documents, got %d (json %s)", 1, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + if stats.Updated != 0 { + t.Errorf("Expected %d updated documents, got %d (json %s)", 0, stats.Updated, formatRawResponse(raw)) + } + if stats.Ignored != 0 { + t.Errorf("Expected %d ignored documents, got %d (json %s)", 0, stats.Ignored, formatRawResponse(raw)) + } + } +} + +// TestImportVerticesDuplicateEntriesComplete imports documents and then checks that it exists. +func TestImportVerticesDuplicateEntriesComplete(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "import_vertices_test", nil, t) + col := ensureVertexCollection(nil, g, "import_duplicateEntriesComplete_test", t) + docs := []interface{}{ + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + } + + var raw []byte + ctx = driver.WithRawResponse(ctx, &raw) + if _, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + Complete: true, + }); !driver.IsConflict(err) { + t.Errorf("Expected ConflictError, got %s", describe(err)) + } +} + +// TestImportVerticesDuplicateEntriesUpdate imports documents and then checks that it exists. +func TestImportVerticesDuplicateEntriesUpdate(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "import_vertices_test", nil, t) + col := ensureVertexCollection(nil, g, "import_duplicateEntriesUpdate_test", t) + docs := []interface{}{ + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + map[string]interface{}{ + "_key": "jan", + "name": "Jan2", + }, + } + + var raw []byte + ctx = driver.WithRawResponse(ctx, &raw) + stats, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + OnDuplicate: driver.ImportOnDuplicateUpdate, + }) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 1 { + t.Errorf("Expected %d created documents, got %d (json %s)", 1, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 0 { + t.Errorf("Expected %d error documents, got %d (json %s)", 0, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + if stats.Updated != 1 { + t.Errorf("Expected %d updated documents, got %d (json %s)", 1, stats.Updated, formatRawResponse(raw)) + } + if stats.Ignored != 0 { + t.Errorf("Expected %d ignored documents, got %d (json %s)", 0, stats.Ignored, formatRawResponse(raw)) + } + + var user UserDocWithKey + if _, err := col.ReadDocument(nil, "jan", &user); err != nil { + t.Errorf("ReadDocument failed: %s", describe(err)) + } else { + if user.Name != "Jan2" { + t.Errorf("Expected Name to be 'Jan2', got '%s'", user.Name) + } + if user.Age != 40 { + t.Errorf("Expected Age to be 40, got %d", user.Age) + } + } + } +} + +// TestImportVerticesDuplicateEntriesReplace imports documents and then checks that it exists. +func TestImportVerticesDuplicateEntriesReplace(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "import_vertices_test", nil, t) + col := ensureVertexCollection(nil, g, "import_duplicateEntriesReplace_test", t) + docs := []interface{}{ + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + map[string]interface{}{ + "_key": "jan", + "name": "Jan2", + }, + } + + var raw []byte + ctx = driver.WithRawResponse(ctx, &raw) + stats, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + OnDuplicate: driver.ImportOnDuplicateReplace, + }) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 1 { + t.Errorf("Expected %d created documents, got %d (json %s)", 1, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 0 { + t.Errorf("Expected %d error documents, got %d (json %s)", 0, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + if stats.Updated != 1 { + t.Errorf("Expected %d updated documents, got %d (json %s)", 1, stats.Updated, formatRawResponse(raw)) + } + if stats.Ignored != 0 { + t.Errorf("Expected %d ignored documents, got %d (json %s)", 0, stats.Ignored, formatRawResponse(raw)) + } + + var user UserDocWithKey + if _, err := col.ReadDocument(nil, "jan", &user); err != nil { + t.Errorf("ReadDocument failed: %s", describe(err)) + } else { + if user.Name != "Jan2" { + t.Errorf("Expected Name to be 'Jan2', got '%s'", user.Name) + } + if user.Age != 0 { + t.Errorf("Expected Age to be 0, got %d", user.Age) + } + } + } +} + +// TestImportVerticesDuplicateEntriesIgnore imports documents and then checks that it exists. +func TestImportVerticesDuplicateEntriesIgnore(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "import_vertices_test", nil, t) + col := ensureVertexCollection(nil, g, "import_duplicateEntriesIgnore_test", t) + docs := []interface{}{ + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + map[string]interface{}{ + "_key": "jan", + "name": "Jan2", + }, + } + + var raw []byte + ctx = driver.WithRawResponse(ctx, &raw) + stats, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + OnDuplicate: driver.ImportOnDuplicateIgnore, + }) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 1 { + t.Errorf("Expected %d created documents, got %d (json %s)", 1, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 0 { + t.Errorf("Expected %d error documents, got %d (json %s)", 0, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + if stats.Updated != 0 { + t.Errorf("Expected %d updated documents, got %d (json %s)", 0, stats.Updated, formatRawResponse(raw)) + } + if stats.Ignored != 1 { + t.Errorf("Expected %d ignored documents, got %d (json %s)", 1, stats.Ignored, formatRawResponse(raw)) + } + + var user UserDocWithKey + if _, err := col.ReadDocument(nil, "jan", &user); err != nil { + t.Errorf("ReadDocument failed: %s", describe(err)) + } else { + if user.Name != "Jan" { + t.Errorf("Expected Name to be 'Jan', got '%s'", user.Name) + } + if user.Age != 40 { + t.Errorf("Expected Age to be 40, got %d", user.Age) + } + } + } +} + +// TestImportVerticesDetails imports documents and then checks that it exists. +func TestImportVerticesDetails(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "import_vertices_test", nil, t) + col := ensureVertexCollection(nil, g, "import_details_test", t) + docs := []interface{}{ + &UserDocWithKey{ + "jan", + "Jan", + 40, + }, + map[string]interface{}{ + "_key": "jan", + "name": "Jan2", + }, + } + + var raw []byte + var details []string + ctx = driver.WithImportDetails(driver.WithRawResponse(ctx, &raw), &details) + stats, err := col.ImportDocuments(ctx, docs, nil) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 1 { + t.Errorf("Expected %d created documents, got %d (json %s)", 1, stats.Created, formatRawResponse(raw)) + } + if stats.Errors != 1 { + t.Errorf("Expected %d error documents, got %d (json %s)", 1, stats.Errors, formatRawResponse(raw)) + } + if stats.Empty != 0 { + t.Errorf("Expected %d empty documents, got %d (json %s)", 0, stats.Empty, formatRawResponse(raw)) + } + if stats.Updated != 0 { + t.Errorf("Expected %d updated documents, got %d (json %s)", 0, stats.Updated, formatRawResponse(raw)) + } + if stats.Ignored != 0 { + t.Errorf("Expected %d ignored documents, got %d (json %s)", 0, stats.Ignored, formatRawResponse(raw)) + } + + detailsExpected := `at position 1: creating document failed with error 'unique constraint violated', offending document: {"_key":"jan","name":"Jan2"}` + if len(details) != 1 { + t.Errorf("Expected 1 details, to %d", len(details)) + } else if details[0] != detailsExpected { + t.Errorf("Expected details[0] to be '%s', got '%s'", detailsExpected, details[0]) + } + } +} + +// TestImportVerticesOverwriteYes imports documents and then checks that it exists. +func TestImportVerticesOverwriteYes(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "import_vertices_test", nil, t) + col := ensureVertexCollection(nil, g, "import_overwriteYes_test", t) + docs := []interface{}{ + &UserDoc{ + "Jan", + 40, + }, + map[string]interface{}{ + "name": "Jan2", + }, + } + + for i := 0; i < 3; i++ { + var raw []byte + var details []string + ctx := driver.WithImportDetails(driver.WithRawResponse(nil, &raw), &details) + stats, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + Overwrite: true, + }) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 2 { + t.Errorf("Expected %d created documents, got %d (json %s)", 2, stats.Created, formatRawResponse(raw)) + } + } + + countExpected := int64(2) + if count, err := col.Count(nil); err != nil { + t.Errorf("Failed to count documents: %s", describe(err)) + } else if count != countExpected { + t.Errorf("Expected count to be %d in round %d, got %d", countExpected, i, count) + } + } +} + +// TestImportVerticesOverwriteNo imports documents and then checks that it exists. +func TestImportVerticesOverwriteNo(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "import_vertices_test", nil, t) + col := ensureVertexCollection(nil, g, "import_overwriteNo_test", t) + docs := []interface{}{ + &UserDoc{ + "Jan", + 40, + }, + map[string]interface{}{ + "name": "Jan2", + }, + } + + for i := 0; i < 3; i++ { + var raw []byte + var details []string + ctx := driver.WithImportDetails(driver.WithRawResponse(nil, &raw), &details) + stats, err := col.ImportDocuments(ctx, docs, &driver.ImportDocumentOptions{ + Overwrite: false, + }) + if err != nil { + t.Fatalf("Failed to import documents: %s", describe(err)) + } else { + if stats.Created != 2 { + t.Errorf("Expected %d created documents, got %d (json %s)", 2, stats.Created, formatRawResponse(raw)) + } + } + + countExpected := int64(2 * (i + 1)) + if count, err := col.Count(nil); err != nil { + t.Errorf("Failed to count documents: %s", describe(err)) + } else if count != countExpected { + t.Errorf("Expected count to be %d in round %d, got %d", countExpected, i, count) + } + } +} diff --git a/deps/github.com/arangodb/go-driver/test/vertices_remove_test.go b/deps/github.com/arangodb/go-driver/test/vertices_remove_test.go new file mode 100644 index 000000000..a517d43b3 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/vertices_remove_test.go @@ -0,0 +1,230 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestRemoveVertices creates documents, removes them and then checks the removal has succeeded. +func TestRemoveVertices(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "remove_vertices_test", nil, t) + vc := ensureVertexCollection(ctx, g, "places", t) + + docs := []Book{ + Book{ + Title: "For reading", + }, + Book{ + Title: "For sleeping", + }, + Book{ + Title: "For carrying monitors", + }, + } + metas, errs, err := vc.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + if _, _, err := vc.RemoveDocuments(ctx, metas.Keys()); err != nil { + t.Fatalf("Failed to remove documents: %s", describe(err)) + } + // Should not longer exist + for i, meta := range metas { + var readDoc Book + if _, err := vc.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Fatalf("Expected NotFoundError at %d, got %s", i, describe(err)) + } + } +} + +// TestRemoveVerticesReturnOld creates documents, removes them checks the ReturnOld value. +func TestRemoveVerticesReturnOld(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2365 + g := ensureGraph(ctx, db, "remove_vertices_returnOld_test", nil, t) + vc := ensureVertexCollection(ctx, g, "books", t) + + docs := []Book{ + Book{ + Title: "For reading", + }, + Book{ + Title: "For sleeping", + }, + Book{ + Title: "For carrying monitors", + }, + } + metas, errs, err := vc.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + oldDocs := make([]Book, len(docs)) + ctx = driver.WithReturnOld(ctx, oldDocs) + _, errs, err = vc.RemoveDocuments(ctx, metas.Keys()) + if err != nil { + t.Fatalf("Failed to remove documents: %s", describe(err)) + } + // Check errors + for i, err := range errs { + if !driver.IsInvalidArgument(err) { + t.Fatalf("Expected InvalidArgumentError at %d, got %s", i, describe(err)) + } + } +} + +// TestRemoveVerticesSilent creates documents, removes them with Silent() and then checks the meta is indeed empty. +func TestRemoveVerticesSilent(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "remove_vertices_silent_test", nil, t) + vc := ensureVertexCollection(ctx, g, "silence", t) + + docs := []Book{ + Book{ + Title: "Sleepy", + }, + Book{ + Title: "Sleeping", + }, + } + metas, errs, err := vc.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + ctx = driver.WithSilent(ctx) + if rmetas, rerrs, err := vc.RemoveDocuments(ctx, metas.Keys()); err != nil { + t.Fatalf("Failed to remove documents: %s", describe(err)) + } else { + if len(rmetas) > 0 { + t.Errorf("Expected empty metas, got %d", len(rmetas)) + } + if len(rerrs) > 0 { + t.Errorf("Expected empty errors, got %d", len(rerrs)) + } + } + // Should not longer exist + for i, meta := range metas { + var readDoc Book + if _, err := vc.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Errorf("Expected NotFoundError at %d, got %s", i, describe(err)) + } + } +} + +// TestRemoveVerticesRevision creates documents, removes them with an incorrect revisions. +func TestRemoveVerticesRevision(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "remove_vertices_revision_test", nil, t) + vc := ensureVertexCollection(ctx, g, "books", t) + + docs := []Book{ + Book{ + Title: "Old", + }, + Book{ + Title: "New", + }, + } + metas, errs, err := vc.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + + // Replace the documents to get another revision + replacements := []UserDoc{ + UserDoc{ + Name: "Anna", + }, + UserDoc{ + Name: "Nicole", + }, + } + metas2, errs2, err := vc.ReplaceDocuments(ctx, metas.Keys(), replacements) + if err != nil { + t.Fatalf("Failed to replace documents: %s", describe(err)) + } else if err := errs2.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + + // Try to remove documents with initial revision (must fail) + initialRevCtx := driver.WithRevisions(ctx, metas.Revs()) + if _, errs, err := vc.RemoveDocuments(initialRevCtx, metas.Keys()); err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } else { + for i, err := range errs { + if !driver.IsPreconditionFailed(err) { + t.Errorf("Expected PreconditionFailedError at %d, got %s", i, describe(err)) + } + } + } + + // Try to remove documents with correct revision (must succeed) + replacedRevCtx := driver.WithRevisions(ctx, metas2.Revs()) + if _, errs, err := vc.RemoveDocuments(replacedRevCtx, metas.Keys()); err != nil { + t.Fatalf("Expected success, got %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + + // Should not longer exist + for i, meta := range metas { + var readDoc Book + if _, err := vc.ReadDocument(ctx, meta.Key, &readDoc); !driver.IsNotFound(err) { + t.Errorf("Expected NotFoundError at %d, got %s", i, describe(err)) + } + } +} + +// TestRemoveVerticesKeyEmpty removes a document it with an empty key. +func TestRemoveVerticesKeyEmpty(t *testing.T) { + var ctx context.Context + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "remove_vertices_keyEmpty_test", nil, t) + vc := ensureVertexCollection(ctx, g, "failures", t) + + if _, _, err := vc.RemoveDocuments(nil, []string{""}); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/vertices_replace_test.go b/deps/github.com/arangodb/go-driver/test/vertices_replace_test.go new file mode 100644 index 000000000..a618821a0 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/vertices_replace_test.go @@ -0,0 +1,333 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "reflect" + "strings" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestReplaceVertices creates documents, replaces them and then checks the replacements have succeeded. +func TestReplaceVertices(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "replace_vertices_test", nil, t) + vc := ensureVertexCollection(ctx, g, "male", t) + + docs := []UserDoc{ + UserDoc{ + Name: "Bob", + }, + UserDoc{ + Name: "Joe", + }, + } + metas, errs, err := vc.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Replacement docs + replacements := []Book{ + Book{ + Title: "For bob", + }, + Book{ + Title: "For joe", + }, + } + if _, _, err := vc.ReplaceDocuments(ctx, metas.Keys(), replacements); err != nil { + t.Fatalf("Failed to replace documents: %s", describe(err)) + } + // Read replaced documents + for i, meta := range metas { + var readDoc Book + if _, err := vc.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + if !reflect.DeepEqual(replacements[i], readDoc) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, replacements[i], readDoc) + } + } +} + +// TestReplaceVerticesReturnOld creates documents, replaces them checks the ReturnOld values. +func TestReplaceVerticesReturnOld(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2365 + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "replace_vertices_returnOld_test", nil, t) + vc := ensureVertexCollection(ctx, g, "pensions", t) + + docs := []UserDoc{ + UserDoc{ + Name: "Bob", + }, + UserDoc{ + Name: "Joe", + }, + } + metas, errs, err := vc.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Replace documents + replacements := []Book{ + Book{ + Title: "For bob", + }, + Book{ + Title: "For joe", + }, + } + oldDocs := make([]UserDoc, len(docs)) + ctx = driver.WithReturnOld(ctx, oldDocs) + if _, _, err := vc.ReplaceDocuments(ctx, metas.Keys(), replacements); err != nil { + t.Fatalf("Failed to replace documents: %s", describe(err)) + } + // Check old document + for i, doc := range docs { + if !reflect.DeepEqual(doc, oldDocs[i]) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, doc, oldDocs[i]) + } + } +} + +// TestReplaceVerticesReturnNew creates documents, replaces them checks the ReturnNew values. +func TestReplaceVerticesReturnNew(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2365 + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "replace_vertices_returnNew_test", nil, t) + vc := ensureVertexCollection(ctx, g, "books", t) + + docs := []Book{ + Book{ + Title: "For bob", + }, + Book{ + Title: "For joe", + }, + } + metas, errs, err := vc.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Replace documents + replacements := []Book{ + Book{ + Title: "For the new bob", + }, + Book{ + Title: "For the new joe", + }, + } + newDocs := make([]Book, len(docs)) + ctx = driver.WithReturnNew(ctx, newDocs) + if _, _, err := vc.ReplaceDocuments(ctx, metas.Keys(), replacements); err != nil { + t.Fatalf("Failed to replace documents: %s", describe(err)) + } + // Check new documents + for i, replacement := range replacements { + expected := replacement + if !reflect.DeepEqual(expected, newDocs[i]) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, expected, newDocs[i]) + } + } +} + +// TestReplaceVerticesSilent creates documents, replaces them with Silent() and then checks the meta is indeed empty. +func TestReplaceVerticesSilent(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "replace_vertices_silent_test", nil, t) + vc := ensureVertexCollection(ctx, g, "moments", t) + + docs := []Book{ + Book{ + Title: "Fly me to the moon", + }, + Book{ + Title: "Fly me to the earth", + }, + } + metas, errs, err := vc.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Replace documents + replacements := []UserDoc{ + UserDoc{ + Name: "Bob", + }, + UserDoc{ + Name: "Christal", + }, + } + ctx = driver.WithSilent(ctx) + if metas, errs, err := vc.ReplaceDocuments(ctx, metas.Keys(), replacements); err != nil { + t.Fatalf("Failed to replace documents: %s", describe(err)) + } else { + if len(errs) > 0 { + t.Errorf("Expected 0 errors, got %d", len(errs)) + } + if len(metas) > 0 { + t.Errorf("Expected 0 metas, got %d", len(metas)) + } + } +} + +// TestReplaceVerticesRevision creates documents, replaces then with a specific (correct) revisions. +// Then it attempts replacements with incorrect revisions which must fail. +func TestReplaceVerticesRevision(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "replace_vertices_revision_test", nil, t) + vc := ensureVertexCollection(ctx, g, "planets", t) + + docs := []Book{ + Book{ + Title: "Pluto", + }, + Book{ + Title: "Mars", + }, + } + metas, errs, err := vc.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + + // Replace documents with correct revisions + replacements := []UserDoc{ + UserDoc{ + Name: "Bob", + }, + UserDoc{ + Name: "Christal", + }, + } + initialRevCtx := driver.WithRevisions(ctx, metas.Revs()) + var replacedRevCtx context.Context + if metas2, errs, err := vc.ReplaceDocuments(initialRevCtx, metas.Keys(), replacements); err != nil { + t.Fatalf("Failed to replace documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } else { + replacedRevCtx = driver.WithRevisions(ctx, metas2.Revs()) + if strings.Join(metas2.Revs(), ",") == strings.Join(metas.Revs(), ",") { + t.Errorf("Expected revisions to change, got initial revisions '%s', replaced revisions '%s'", strings.Join(metas.Revs(), ","), strings.Join(metas2.Revs(), ",")) + } + } + + // Replace documents with incorrect revision + replacements[0].Name = "Wrong deal 1" + replacements[1].Name = "Wrong deal 2" + if _, errs, err := vc.ReplaceDocuments(initialRevCtx, metas.Keys(), replacements); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } else { + for i, err := range errs { + if !driver.IsPreconditionFailed(err) { + t.Errorf("Expected PreconditionFailedError at %d, got %s", i, describe(err)) + } + } + } + + // Replace document once more with correct revision + replacements[0].Name = "Good deal 1" + replacements[1].Name = "Good deal 2" + if _, errs, err := vc.ReplaceDocuments(replacedRevCtx, metas.Keys(), replacements); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } +} + +// TestReplaceVerticesKeyEmpty replaces a document it with an empty key. +func TestReplaceVerticesKeyEmpty(t *testing.T) { + ctx := context.TODO() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "replace_vertices_keyEmpty_test", nil, t) + vc := ensureVertexCollection(ctx, g, "planets", t) + + // Replacement document + replacement := map[string]interface{}{ + "name": "Updated", + } + if _, _, err := vc.ReplaceDocuments(nil, []string{""}, replacement); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestReplaceVerticesUpdateNil replaces a document it with a nil update. +func TestReplaceVerticesUpdateNil(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "replace_vertices_updateNil_test", nil, t) + vc := ensureVertexCollection(ctx, g, "relations", t) + + if _, _, err := vc.ReplaceDocuments(nil, []string{"validKey"}, nil); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestReplaceVerticesUpdateLenDiff replacements documents with a different number of documents, keys. +func TestReplaceVerticesUpdateLenDiff(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "replace_vertices_updateNil_test", nil, t) + vc := ensureVertexCollection(ctx, g, "failures", t) + + replacements := []map[string]interface{}{ + map[string]interface{}{ + "name": "name1", + }, + map[string]interface{}{ + "name": "name2", + }, + } + if _, _, err := vc.ReplaceDocuments(nil, []string{"only1"}, replacements); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/test/vertices_update_test.go b/deps/github.com/arangodb/go-driver/test/vertices_update_test.go new file mode 100644 index 000000000..007647c5b --- /dev/null +++ b/deps/github.com/arangodb/go-driver/test/vertices_update_test.go @@ -0,0 +1,466 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "context" + "fmt" + "reflect" + "strings" + "testing" + + driver "github.com/arangodb/go-driver" +) + +// TestUpdateVertices creates documents, updates them and then checks the updates have succeeded. +func TestUpdateVertices(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "update_vertices_test", nil, t) + ec := ensureVertexCollection(ctx, g, "relations", t) + + docs := []UserDoc{ + UserDoc{ + Name: "Bob", + }, + UserDoc{ + Name: "Anna", + }, + } + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Update documents + updates := []map[string]interface{}{ + map[string]interface{}{ + "name": "Updated1", + }, + map[string]interface{}{ + "name": "Updated2", + }, + } + if _, _, err := ec.UpdateDocuments(ctx, metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } + // Read updated documents + for i, meta := range metas { + var readDoc UserDoc + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + doc := docs[i] + doc.Name = fmt.Sprintf("Updated%d", i+1) + if !reflect.DeepEqual(doc, readDoc) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, doc, readDoc) + } + } +} + +// TestUpdateVerticesReturnOld creates documents, updates them checks the ReturnOld values. +func TestUpdateVerticesReturnOld(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2365 + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "update_vertices_returnOld_test", nil, t) + ec := ensureVertexCollection(ctx, g, "books", t) + + docs := []Book{ + Book{ + Title: "Pinkeltje op de maan", + }, + Book{ + Title: "Pinkeltje in het bos", + }, + } + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Update documents + updates := []map[string]interface{}{ + map[string]interface{}{ + "Title": "Updated1", + }, + map[string]interface{}{ + "Title": "Updated2", + }, + } + oldDocs := make([]Book, len(docs)) + ctx = driver.WithReturnOld(ctx, oldDocs) + if _, _, err := ec.UpdateDocuments(ctx, metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } + // Check old documents + for i, doc := range docs { + if !reflect.DeepEqual(doc, oldDocs[i]) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, doc, oldDocs[i]) + } + } +} + +// TestUpdateVerticesReturnNew creates documents, updates them checks the ReturnNew values. +func TestUpdateVerticesReturnNew(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + skipBelowVersion(c, "3.4", t) // See https://github.com/arangodb/arangodb/issues/2365 + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "update_vertices_returnOld_test", nil, t) + ec := ensureVertexCollection(ctx, g, "users", t) + + docs := []UserDoc{ + UserDoc{ + Name: "Tony", + }, + UserDoc{ + Name: "Parker", + }, + } + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Update documents + updates := []map[string]interface{}{ + map[string]interface{}{ + "name": "Updated1", + }, + map[string]interface{}{ + "name": "Updated2", + }, + } + newDocs := make([]UserDoc, len(docs)) + ctx = driver.WithReturnNew(ctx, newDocs) + if _, _, err := ec.UpdateDocuments(ctx, metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } + // Check new documents + for i, doc := range docs { + expected := doc + expected.Name = fmt.Sprintf("Updated%d", i+1) + if !reflect.DeepEqual(expected, newDocs[i]) { + t.Errorf("Got wrong document %d. Expected %+v, got %+v", i, expected, newDocs[i]) + } + } +} + +// TestUpdateVerticesKeepNullTrue creates documents, updates them with KeepNull(true) and then checks the updates have succeeded. +func TestUpdateVerticesKeepNullTrue(t *testing.T) { + ctx := context.Background() + var conn driver.Connection + c := createClientFromEnv(t, true, &conn) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "update_vertices_keepNullTrue_test", nil, t) + ec := ensureVertexCollection(ctx, g, "keepers", t) + + docs := []Account{ + Account{ + ID: "123", + User: &UserDoc{ + "Greata", + 77, + }, + }, + Account{ + ID: "456", + User: &UserDoc{ + "Mathilda", + 45, + }, + }, + } + + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + + // Update documents + updates := []map[string]interface{}{ + map[string]interface{}{ + "id": "abc", + "user": nil, + }, + map[string]interface{}{ + "id": "def", + "user": nil, + }, + } + if _, _, err := ec.UpdateDocuments(driver.WithKeepNull(ctx, true), metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } + // Read updated documents + for i, meta := range metas { + var readDoc map[string]interface{} + var rawResponse []byte + ctx = driver.WithRawResponse(ctx, &rawResponse) + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document %d '%s': %s", i, meta.Key, describe(err)) + } + // We parse to this type of map, since unmarshalling nil values to a map of type map[string]interface{} + // will cause the entry to be deleted. + var jsonMap map[string]*driver.RawObject + if err := conn.Unmarshal(rawResponse, &jsonMap); err != nil { + t.Fatalf("Failed to parse raw response: %s", describe(err)) + } + // Get "vertex" field and unmarshal it + if raw, found := jsonMap["vertex"]; !found { + t.Errorf("Expected vertex to be found but got not found") + } else { + jsonMap = nil + if err := conn.Unmarshal(*raw, &jsonMap); err != nil { + t.Fatalf("Failed to parse raw vertex object: %s", describe(err)) + } + if raw, found := jsonMap["user"]; !found { + t.Errorf("Expected user to be found but got not found") + } else if raw != nil { + t.Errorf("Expected user to be found and nil, got %s", string(*raw)) + } + } + } +} + +// TestUpdateVerticesKeepNullFalse creates documents, updates them with KeepNull(false) and then checks the updates have succeeded. +func TestUpdateVerticesKeepNullFalse(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "update_vertices_keepNullFalse_test", nil, t) + ec := ensureVertexCollection(ctx, g, "accounts", t) + + docs := []Account{ + Account{ + ID: "123", + User: &UserDoc{ + "Greata", + 77, + }, + }, + Account{ + ID: "456", + User: &UserDoc{ + "Mathilda", + 45, + }, + }, + } + + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + // Update document + updates := []map[string]interface{}{ + map[string]interface{}{ + "id": "abc", + "user": nil, + }, + map[string]interface{}{ + "id": "def", + "user": nil, + }, + } + if _, _, err := ec.UpdateDocuments(driver.WithKeepNull(ctx, false), metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } + // Read updated documents + for i, meta := range metas { + readDoc := docs[i] + if _, err := ec.ReadDocument(ctx, meta.Key, &readDoc); err != nil { + t.Fatalf("Failed to read document '%s': %s", meta.Key, describe(err)) + } + if readDoc.User == nil { + t.Errorf("Expected user to be untouched, got %v", readDoc.User) + } + } +} + +// TestUpdateVerticesSilent creates documents, updates them with Silent() and then checks the metas are indeed empty. +func TestUpdateVerticesSilent(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "update_vertices_silent_test", nil, t) + ec := ensureVertexCollection(ctx, g, "moments", t) + + docs := []Book{ + Book{ + Title: "Foo", + }, + Book{ + Title: "Oops", + }, + } + metas, _, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new documents: %s", describe(err)) + } + // Update documents + updates := []map[string]interface{}{ + map[string]interface{}{ + "Title": 61, + }, + map[string]interface{}{ + "Title": 16, + }, + } + ctx = driver.WithSilent(ctx) + if metas, errs, err := ec.UpdateDocuments(ctx, metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } else if strings.Join(metas.Keys(), "") != "" { + t.Errorf("Expected empty meta, got %v", metas) + } +} + +// TestUpdateVerticesRevision creates documents, updates them with a specific (correct) revisions. +// Then it attempts an update with an incorrect revisions which must fail. +func TestUpdateVerticesRevision(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "update_vertices_revision_test", nil, t) + ec := ensureVertexCollection(ctx, g, "revisions", t) + + docs := []Book{ + Book{ + Title: "Roman age", + }, + Book{ + Title: "New age", + }, + } + metas, errs, err := ec.CreateDocuments(ctx, docs) + if err != nil { + t.Fatalf("Failed to create new document: %s", describe(err)) + } else if len(metas) != len(docs) { + t.Fatalf("Expected %d metas, got %d", len(docs), len(metas)) + } else if err := errs.FirstNonNil(); err != nil { + t.Fatalf("Expected no errors, got first: %s", describe(err)) + } + + // Update documents with correct revisions + updates := []map[string]interface{}{ + map[string]interface{}{ + "Title": 34, + }, + map[string]interface{}{ + "Title": 77, + }, + } + initialRevCtx := driver.WithRevisions(ctx, metas.Revs()) + var updatedRevCtx context.Context + if metas2, _, err := ec.UpdateDocuments(initialRevCtx, metas.Keys(), updates); err != nil { + t.Fatalf("Failed to update documents: %s", describe(err)) + } else { + updatedRevCtx = driver.WithRevisions(ctx, metas2.Revs()) + if strings.Join(metas2.Revs(), ",") == strings.Join(metas.Revs(), ",") { + t.Errorf("Expected revision to change, got initial revision '%s', updated revision '%s'", strings.Join(metas.Revs(), ","), strings.Join(metas2.Revs(), ",")) + } + } + + // Update documents with incorrect revisions + updates[0]["Title"] = 35 + var rawResponse []byte + if _, errs, err := ec.UpdateDocuments(driver.WithRawResponse(initialRevCtx, &rawResponse), metas.Keys(), updates); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } else { + for _, err := range errs { + if !driver.IsPreconditionFailed(err) { + t.Errorf("Expected PreconditionFailedError, got %s (resp: %s", describe(err), string(rawResponse)) + } + } + } + + // Update documents once more with correct revisions + updates[0]["Title"] = 36 + if _, _, err := ec.UpdateDocuments(updatedRevCtx, metas.Keys(), updates); err != nil { + t.Errorf("Expected success, got %s", describe(err)) + } +} + +// TestUpdateVerticesKeyEmpty updates documents with an empty key. +func TestUpdateVerticesKeyEmpty(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "update_vertices_keyEmpty_test", nil, t) + ec := ensureVertexCollection(ctx, g, "lonely", t) + + // Update document + updates := []map[string]interface{}{ + map[string]interface{}{ + "name": "Updated", + }, + } + if _, _, err := ec.UpdateDocuments(nil, []string{""}, updates); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestUpdateVerticesUpdateNil updates documents it with a nil update. +func TestUpdateVerticesUpdateNil(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "update_vertices_updateNil_test", nil, t) + ec := ensureVertexCollection(ctx, g, "nilAndSome", t) + + if _, _, err := ec.UpdateDocuments(nil, []string{"validKey"}, nil); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} + +// TestUpdateVerticesUpdateLenDiff updates documents with a different number of updates, keys. +func TestUpdateVerticesUpdateLenDiff(t *testing.T) { + ctx := context.Background() + c := createClientFromEnv(t, true) + db := ensureDatabase(ctx, c, "vertices_test", nil, t) + g := ensureGraph(ctx, db, "update_vertices_updateLenDiff_test", nil, t) + ec := ensureVertexCollection(ctx, g, "diffs", t) + + updates := []map[string]interface{}{ + map[string]interface{}{ + "name": "name1", + }, + map[string]interface{}{ + "name": "name2", + }, + } + if _, _, err := ec.UpdateDocuments(nil, []string{"only1"}, updates); !driver.IsInvalidArgument(err) { + t.Errorf("Expected InvalidArgumentError, got %s", describe(err)) + } +} diff --git a/deps/github.com/arangodb/go-driver/transaction.go b/deps/github.com/arangodb/go-driver/transaction.go new file mode 100644 index 000000000..c5448c13a --- /dev/null +++ b/deps/github.com/arangodb/go-driver/transaction.go @@ -0,0 +1,53 @@ +package driver + +// TransactionOptions contains options that customize the transaction. +type TransactionOptions struct { + // Transaction size limit in bytes. Honored by the RocksDB storage engine only. + MaxTransactionSize int + + // An optional numeric value that can be used to set a timeout for waiting on collection + // locks. If not specified, a default value will be used. + // Setting lockTimeout to 0 will make ArangoDB not time out waiting for a lock. + LockTimeout *int + + // An optional boolean flag that, if set, will force the transaction to write + // all data to disk before returning. + WaitForSync bool + + // Maximum number of operations after which an intermediate commit is performed + // automatically. Honored by the RocksDB storage engine only. + IntermediateCommitCount *int + + // Optional arguments passed to action. + Params []interface{} + + // Maximum total size of operations after which an intermediate commit is + // performed automatically. Honored by the RocksDB storage engine only. + IntermediateCommitSize *int + + // Collections that the transaction reads from. + ReadCollections []string + + // Collections that the transaction writes to. + WriteCollections []string +} + +type transactionRequest struct { + MaxTransactionSize int `json:"maxTransactionSize"` + LockTimeout *int `json:"lockTimeout,omitempty"` + WaitForSync bool `json:"waitForSync"` + IntermediateCommitCount *int `json:"intermediateCommitCount,omitempty"` + Params []interface{} `json:"params"` + IntermediateCommitSize *int `json:"intermediateCommitSize,omitempty"` + Action string `json:"action"` + Collections transactionCollectionsRequest `json:"collections"` +} + +type transactionCollectionsRequest struct { + Read []string `json:"read,omitempty"` + Write []string `json:"write,omitempty"` +} + +type transactionResponse struct { + Result interface{} `json:"result"` +} diff --git a/deps/github.com/arangodb/go-driver/user.go b/deps/github.com/arangodb/go-driver/user.go new file mode 100644 index 000000000..987628a97 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/user.go @@ -0,0 +1,128 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// User provides access to a single user of a single server / cluster of servers. +type User interface { + // Name returns the name of the user. + Name() string + + // Is this an active user? + IsActive() bool + + // Is a password change for this user needed? + IsPasswordChangeNeeded() bool + + // Get extra information about this user that was passed during its creation/update/replacement + Extra(result interface{}) error + + // Remove removes the user. + // If the user does not exist, a NotFoundError is returned. + Remove(ctx context.Context) error + + // Update updates individual properties of the user. + // If the user does not exist, a NotFoundError is returned. + Update(ctx context.Context, options UserOptions) error + + // Replace replaces all properties of the user. + // If the user does not exist, a NotFoundError is returned. + Replace(ctx context.Context, options UserOptions) error + + // AccessibleDatabases returns a list of all databases that can be accessed (read/write or read-only) by this user. + AccessibleDatabases(ctx context.Context) ([]Database, error) + + // SetDatabaseAccess sets the access this user has to the given database. + // Pass a `nil` database to set the default access this user has to any new database. + // This function requires ArangoDB 3.2 and up for access value `GrantReadOnly`. + SetDatabaseAccess(ctx context.Context, db Database, access Grant) error + + // GetDatabaseAccess gets the access rights for this user to the given database. + // Pass a `nil` database to get the default access this user has to any new database. + // This function requires ArangoDB 3.2 and up. + // By default this function returns the "effective" grant. + // To return the "configured" grant, pass a context configured with `WithConfigured`. + // This distinction is only relevant in ArangoDB 3.3 in the context of a readonly database. + GetDatabaseAccess(ctx context.Context, db Database) (Grant, error) + + // RemoveDatabaseAccess removes the access this user has to the given database. + // As a result the users access falls back to its default access. + // If you remove default access (db==`nil`) for a user (and there are no specific access + // rules for a database), the user's access falls back to no-access. + // Pass a `nil` database to set the default access this user has to any new database. + // This function requires ArangoDB 3.2 and up. + RemoveDatabaseAccess(ctx context.Context, db Database) error + + // SetCollectionAccess sets the access this user has to a collection. + // If you pass a `Collection`, it will set access for that collection. + // If you pass a `Database`, it will set the default collection access for that database. + // If you pass `nil`, it will set the default collection access for the default database. + // This function requires ArangoDB 3.2 and up. + SetCollectionAccess(ctx context.Context, col AccessTarget, access Grant) error + + // GetCollectionAccess gets the access rights for this user to the given collection. + // If you pass a `Collection`, it will get access for that collection. + // If you pass a `Database`, it will get the default collection access for that database. + // If you pass `nil`, it will get the default collection access for the default database. + // By default this function returns the "effective" grant. + // To return the "configured" grant, pass a context configured with `WithConfigured`. + // This distinction is only relevant in ArangoDB 3.3 in the context of a readonly database. + GetCollectionAccess(ctx context.Context, col AccessTarget) (Grant, error) + + // RemoveCollectionAccess removes the access this user has to a collection. + // If you pass a `Collection`, it will removes access for that collection. + // If you pass a `Database`, it will removes the default collection access for that database. + // If you pass `nil`, it will removes the default collection access for the default database. + // This function requires ArangoDB 3.2 and up. + RemoveCollectionAccess(ctx context.Context, col AccessTarget) error + + // GrantReadWriteAccess grants this user read/write access to the given database. + // + // Deprecated: use GrantDatabaseReadWriteAccess instead. + GrantReadWriteAccess(ctx context.Context, db Database) error + + // RevokeAccess revokes this user access to the given database. + // + // Deprecated: use `SetDatabaseAccess(ctx, db, GrantNone)` instead. + RevokeAccess(ctx context.Context, db Database) error +} + +// Grant specifies access rights for an object +type Grant string + +const ( + // GrantReadWrite indicates read/write access to an object + GrantReadWrite Grant = "rw" + // GrantReadOnly indicates read-only access to an object + GrantReadOnly Grant = "ro" + // GrantNone indicates no access to an object + GrantNone Grant = "none" +) + +// AccessTarget is implemented by Database & Collection and it used to +// get/set/remove collection permissions. +type AccessTarget interface { + // Name returns the name of the database/collection. + Name() string +} diff --git a/deps/github.com/arangodb/go-driver/user_impl.go b/deps/github.com/arangodb/go-driver/user_impl.go new file mode 100644 index 000000000..9415aa4d5 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/user_impl.go @@ -0,0 +1,400 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +// newUser creates a new User implementation. +func newUser(data userData, conn Connection) (User, error) { + if data.Name == "" { + return nil, WithStack(InvalidArgumentError{Message: "data.Name is empty"}) + } + if conn == nil { + return nil, WithStack(InvalidArgumentError{Message: "conn is nil"}) + } + return &user{ + data: data, + conn: conn, + }, nil +} + +type user struct { + data userData + conn Connection +} + +type userData struct { + Name string `json:"user,omitempty"` + Active bool `json:"active,omitempty"` + Extra *RawObject `json:"extra,omitempty"` + ChangePassword bool `json:"changePassword,omitempty"` +} + +// relPath creates the relative path to this index (`_api/user/`) +func (u *user) relPath() string { + escapedName := pathEscape(u.data.Name) + return path.Join("_api", "user", escapedName) +} + +// Name returns the name of the user. +func (u *user) Name() string { + return u.data.Name +} + +// Is this an active user? +func (u *user) IsActive() bool { + return u.data.Active +} + +// Is a password change for this user needed? +func (u *user) IsPasswordChangeNeeded() bool { + return u.data.ChangePassword +} + +// Get extra information about this user that was passed during its creation/update/replacement +func (u *user) Extra(result interface{}) error { + if u.data.Extra == nil { + return nil + } + if err := u.conn.Unmarshal(*u.data.Extra, result); err != nil { + return WithStack(err) + } + return nil +} + +// Remove removes the entire user. +// If the user does not exist, a NotFoundError is returned. +func (u *user) Remove(ctx context.Context) error { + req, err := u.conn.NewRequest("DELETE", u.relPath()) + if err != nil { + return WithStack(err) + } + resp, err := u.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(202); err != nil { + return WithStack(err) + } + return nil +} + +// Update updates individual properties of the user. +// If the user does not exist, a NotFoundError is returned. +func (u *user) Update(ctx context.Context, options UserOptions) error { + req, err := u.conn.NewRequest("PATCH", u.relPath()) + if err != nil { + return WithStack(err) + } + if _, err := req.SetBody(options); err != nil { + return WithStack(err) + } + resp, err := u.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + var data userData + if err := resp.ParseBody("", &data); err != nil { + return WithStack(err) + } + u.data = data + return nil +} + +// Replace replaces all properties of the user. +// If the user does not exist, a NotFoundError is returned. +func (u *user) Replace(ctx context.Context, options UserOptions) error { + req, err := u.conn.NewRequest("PUT", u.relPath()) + if err != nil { + return WithStack(err) + } + if _, err := req.SetBody(options); err != nil { + return WithStack(err) + } + resp, err := u.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + var data userData + if err := resp.ParseBody("", &data); err != nil { + return WithStack(err) + } + u.data = data + return nil +} + +type userAccessibleDatabasesResponse struct { + Result map[string]string `json:"result"` +} + +// AccessibleDatabases returns a list of all databases that can be accessed by this user. +func (u *user) AccessibleDatabases(ctx context.Context) ([]Database, error) { + req, err := u.conn.NewRequest("GET", path.Join(u.relPath(), "database")) + if err != nil { + return nil, WithStack(err) + } + resp, err := u.conn.Do(ctx, req) + if err != nil { + return nil, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return nil, WithStack(err) + } + var data userAccessibleDatabasesResponse + if err := resp.ParseBody("", &data); err != nil { + return nil, WithStack(err) + } + result := make([]Database, 0, len(data.Result)) + for name := range data.Result { + db, err := newDatabase(name, u.conn) + if err != nil { + return nil, WithStack(err) + } + result = append(result, db) + } + return result, nil +} + +// SetDatabaseAccess sets the access this user has to the given database. +// Pass a `nil` database to set the default access this user has to any new database. +// This function requires ArangoDB 3.2 and up for access value `GrantReadOnly`. +func (u *user) SetDatabaseAccess(ctx context.Context, db Database, access Grant) error { + dbName, _, err := getDatabaseAndCollectionName(db) + if err != nil { + return WithStack(err) + } + escapedDbName := pathEscape(dbName) + req, err := u.conn.NewRequest("PUT", path.Join(u.relPath(), "database", escapedDbName)) + if err != nil { + return WithStack(err) + } + input := struct { + Grant Grant `json:"grant"` + }{ + Grant: access, + } + if _, err := req.SetBody(input); err != nil { + return WithStack(err) + } + resp, err := u.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +type getAccessResponse struct { + Result string `json:"result"` +} + +// GetDatabaseAccess gets the access rights for this user to the given database. +// Pass a `nil` database to get the default access this user has to any new database. +// This function requires ArangoDB 3.2 and up. +func (u *user) GetDatabaseAccess(ctx context.Context, db Database) (Grant, error) { + dbName, _, err := getDatabaseAndCollectionName(db) + if err != nil { + return GrantNone, WithStack(err) + } + escapedDbName := pathEscape(dbName) + req, err := u.conn.NewRequest("GET", path.Join(u.relPath(), "database", escapedDbName)) + if err != nil { + return GrantNone, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := u.conn.Do(ctx, req) + if err != nil { + return GrantNone, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return GrantNone, WithStack(err) + } + + var data getAccessResponse + if err := resp.ParseBody("", &data); err != nil { + return GrantNone, WithStack(err) + } + return Grant(data.Result), nil +} + +// RemoveDatabaseAccess removes the access this user has to the given database. +// As a result the users access falls back to its default access. +// If you remove default access (db==`nil`) for a user (and there are no specific access +// rules for a database), the user's access falls back to no-access. +// Pass a `nil` database to set the default access this user has to any new database. +// This function requires ArangoDB 3.2 and up. +func (u *user) RemoveDatabaseAccess(ctx context.Context, db Database) error { + dbName, _, err := getDatabaseAndCollectionName(db) + if err != nil { + return WithStack(err) + } + escapedDbName := pathEscape(dbName) + req, err := u.conn.NewRequest("DELETE", path.Join(u.relPath(), "database", escapedDbName)) + if err != nil { + return WithStack(err) + } + resp, err := u.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200, 202); err != nil { + return WithStack(err) + } + return nil +} + +// SetCollectionAccess sets the access this user has to a collection. +// If you pass a `Collection`, it will set access for that collection. +// If you pass a `Database`, it will set the default collection access for that database. +// If you pass `nil`, it will set the default collection access for the default database. +// This function requires ArangoDB 3.2 and up. +func (u *user) SetCollectionAccess(ctx context.Context, col AccessTarget, access Grant) error { + dbName, colName, err := getDatabaseAndCollectionName(col) + if err != nil { + return WithStack(err) + } + escapedDbName := pathEscape(dbName) + escapedColName := pathEscape(colName) + req, err := u.conn.NewRequest("PUT", path.Join(u.relPath(), "database", escapedDbName, escapedColName)) + if err != nil { + return WithStack(err) + } + input := struct { + Grant Grant `json:"grant"` + }{ + Grant: access, + } + if _, err := req.SetBody(input); err != nil { + return WithStack(err) + } + resp, err := u.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return WithStack(err) + } + return nil +} + +// GetCollectionAccess gets the access rights for this user to the given collection. +// If you pass a `Collection`, it will get access for that collection. +// If you pass a `Database`, it will get the default collection access for that database. +// If you pass `nil`, it will get the default collection access for the default database. +func (u *user) GetCollectionAccess(ctx context.Context, col AccessTarget) (Grant, error) { + dbName, colName, err := getDatabaseAndCollectionName(col) + if err != nil { + return GrantNone, WithStack(err) + } + escapedDbName := pathEscape(dbName) + escapedColName := pathEscape(colName) + req, err := u.conn.NewRequest("GET", path.Join(u.relPath(), "database", escapedDbName, escapedColName)) + if err != nil { + return GrantNone, WithStack(err) + } + applyContextSettings(ctx, req) + resp, err := u.conn.Do(ctx, req) + if err != nil { + return GrantNone, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return GrantNone, WithStack(err) + } + + var data getAccessResponse + if err := resp.ParseBody("", &data); err != nil { + return GrantNone, WithStack(err) + } + return Grant(data.Result), nil +} + +// RemoveCollectionAccess removes the access this user has to a collection. +// If you pass a `Collection`, it will removes access for that collection. +// If you pass a `Database`, it will removes the default collection access for that database. +// If you pass `nil`, it will removes the default collection access for the default database. +// This function requires ArangoDB 3.2 and up. +func (u *user) RemoveCollectionAccess(ctx context.Context, col AccessTarget) error { + dbName, colName, err := getDatabaseAndCollectionName(col) + if err != nil { + return WithStack(err) + } + escapedDbName := pathEscape(dbName) + escapedColName := pathEscape(colName) + req, err := u.conn.NewRequest("DELETE", path.Join(u.relPath(), "database", escapedDbName, escapedColName)) + if err != nil { + return WithStack(err) + } + resp, err := u.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(200, 202); err != nil { + return WithStack(err) + } + return nil +} + +// getDatabaseAndCollectionName returns database-name, collection-name from given access target. +func getDatabaseAndCollectionName(col AccessTarget) (string, string, error) { + if col == nil { + return "*", "*", nil + } + if x, ok := col.(Collection); ok { + return x.Database().Name(), x.Name(), nil + } + if x, ok := col.(Database); ok { + return x.Name(), "*", nil + } + return "", "", WithStack(InvalidArgumentError{"Need Collection or Database or nil"}) +} + +// GrantReadWriteAccess grants this user read/write access to the given database. +// +// Deprecated: use GrantDatabaseReadWriteAccess instead. +func (u *user) GrantReadWriteAccess(ctx context.Context, db Database) error { + if err := u.SetDatabaseAccess(ctx, db, GrantReadWrite); err != nil { + return WithStack(err) + } + return nil +} + +// RevokeAccess revokes this user access to the given database. +// +// Deprecated: use `SetDatabaseAccess(ctx, db, GrantNone)` instead. +func (u *user) RevokeAccess(ctx context.Context, db Database) error { + if err := u.SetDatabaseAccess(ctx, db, GrantNone); err != nil { + return WithStack(err) + } + return nil +} diff --git a/deps/github.com/arangodb/go-driver/util/endpoints.go b/deps/github.com/arangodb/go-driver/util/endpoints.go new file mode 100644 index 000000000..aa2263249 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/util/endpoints.go @@ -0,0 +1,38 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package util + +import "strings" + +var ( + urlFixer = strings.NewReplacer( + "tcp://", "http://", + "ssl://", "https://", + ) +) + +// FixupEndpointURLScheme changes endpoint URL schemes used by arangod to ones used by go. +// E.g. "tcp://localhost:8529" -> "http://localhost:8529" +func FixupEndpointURLScheme(u string) string { + return urlFixer.Replace(u) +} diff --git a/deps/github.com/arangodb/go-driver/version.go b/deps/github.com/arangodb/go-driver/version.go new file mode 100644 index 000000000..f52e85dae --- /dev/null +++ b/deps/github.com/arangodb/go-driver/version.go @@ -0,0 +1,109 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "strconv" + "strings" +) + +// Version holds a server version string. The string has the format "major.minor.sub". +// Major and minor will be numeric, and sub may contain a number or a textual version. +type Version string + +// Major returns the major part of the version +// E.g. "3.1.7" -> 3 +func (v Version) Major() int { + parts := strings.Split(string(v), ".") + result, _ := strconv.Atoi(parts[0]) + return result +} + +// Minor returns the minor part of the version. +// E.g. "3.1.7" -> 1 +func (v Version) Minor() int { + parts := strings.Split(string(v), ".") + if len(parts) >= 2 { + result, _ := strconv.Atoi(parts[1]) + return result + } + return 0 +} + +// Sub returns the sub part of the version. +// E.g. "3.1.7" -> "7" +func (v Version) Sub() string { + parts := strings.SplitN(string(v), ".", 3) + if len(parts) == 3 { + return parts[2] + } + return "" +} + +// SubInt returns the sub part of the version as integer. +// The bool return value indicates if the sub part is indeed a number. +// E.g. "3.1.7" -> 7, true +// E.g. "3.1.foo" -> 0, false +func (v Version) SubInt() (int, bool) { + result, err := strconv.Atoi(v.Sub()) + return result, err == nil +} + +// CompareTo returns an integer comparing two version. +// The result will be 0 if v==other, -1 if v < other, and +1 if v > other. +// If major & minor parts are equal and sub part is not a number, +// the sub part will be compared using lexicographical string comparison. +func (v Version) CompareTo(other Version) int { + a := v.Major() + b := other.Major() + if a < b { + return -1 + } + if a > b { + return 1 + } + + a = v.Minor() + b = other.Minor() + if a < b { + return -1 + } + if a > b { + return 1 + } + + a, aIsInt := v.SubInt() + b, bIsInt := other.SubInt() + + if !aIsInt || !bIsInt { + // Do a string comparison + return strings.Compare(v.Sub(), other.Sub()) + } + if a < b { + return -1 + } + if a > b { + return 1 + } + return 0 +} diff --git a/deps/github.com/arangodb/go-driver/vertex_collection_documents_impl.go b/deps/github.com/arangodb/go-driver/vertex_collection_documents_impl.go new file mode 100644 index 000000000..233231dc1 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/vertex_collection_documents_impl.go @@ -0,0 +1,503 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "fmt" + "path" + "reflect" +) + +// DocumentExists checks if a document with given key exists in the collection. +func (c *vertexCollection) DocumentExists(ctx context.Context, key string) (bool, error) { + if result, err := c.rawCollection().DocumentExists(ctx, key); err != nil { + return false, WithStack(err) + } else { + return result, nil + } +} + +// ReadDocument reads a single document with given key from the collection. +// The document data is stored into result, the document meta data is returned. +// If no document exists with given key, a NotFoundError is returned. +func (c *vertexCollection) ReadDocument(ctx context.Context, key string, result interface{}) (DocumentMeta, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, WithStack(err) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("GET", path.Join(c.relPath(), escapedKey)) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return DocumentMeta{}, WithStack(err) + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("vertex", &meta); err != nil { + return DocumentMeta{}, WithStack(err) + } + // Parse result + if result != nil { + if err := resp.ParseBody("vertex", result); err != nil { + return meta, WithStack(err) + } + } + return meta, nil +} + +// CreateDocument creates a single document in the collection. +// The document data is loaded from the given document, the document meta data is returned. +// If the document data already contains a `_key` field, this will be used as key of the new document, +// otherwise a unique key is created. +// A ConflictError is returned when a `_key` field contains a duplicate key, other any other field violates an index constraint. +// To return the NEW document, prepare a context with `WithReturnNew`. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +func (c *vertexCollection) CreateDocument(ctx context.Context, document interface{}) (DocumentMeta, error) { + meta, _, err := c.createDocument(ctx, document) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} + +func (c *vertexCollection) createDocument(ctx context.Context, document interface{}) (DocumentMeta, contextSettings, error) { + if document == nil { + return DocumentMeta{}, contextSettings{}, WithStack(InvalidArgumentError{Message: "document nil"}) + } + req, err := c.conn.NewRequest("POST", c.relPath()) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if _, err := req.SetBody(document); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if err := resp.CheckStatus(cs.okStatus(201, 202)); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, cs, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("vertex", &meta); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + if err := resp.ParseBody("new", cs.ReturnNew); err != nil { + return meta, cs, WithStack(err) + } + } + return meta, cs, nil +} + +// CreateDocuments creates multiple documents in the collection. +// The document data is loaded from the given documents slice, the documents meta data is returned. +// If a documents element already contains a `_key` field, this will be used as key of the new document, +// otherwise a unique key is created. +// If a documents element contains a `_key` field with a duplicate key, other any other field violates an index constraint, +// a ConflictError is returned in its inded in the errors slice. +// To return the NEW documents, prepare a context with `WithReturnNew`. The data argument passed to `WithReturnNew` must be +// a slice with the same number of entries as the `documents` slice. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +// If the create request itself fails or one of the arguments is invalid, an error is returned. +func (c *vertexCollection) CreateDocuments(ctx context.Context, documents interface{}) (DocumentMetaSlice, ErrorSlice, error) { + documentsVal := reflect.ValueOf(documents) + switch documentsVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("documents data must be of kind Array, got %s", documentsVal.Kind())}) + } + documentCount := documentsVal.Len() + metas := make(DocumentMetaSlice, documentCount) + errs := make(ErrorSlice, documentCount) + silent := false + for i := 0; i < documentCount; i++ { + doc := documentsVal.Index(i) + ctx, err := withDocumentAt(ctx, i) + if err != nil { + return nil, nil, WithStack(err) + } + meta, cs, err := c.createDocument(ctx, doc.Interface()) + if cs.Silent { + silent = true + } else { + metas[i], errs[i] = meta, err + } + } + if silent { + return nil, nil, nil + } + return metas, errs, nil +} + +// UpdateDocument updates a single document with given key in the collection. +// The document meta data is returned. +// To return the NEW document, prepare a context with `WithReturnNew`. +// To return the OLD document, prepare a context with `WithReturnOld`. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with given key, a NotFoundError is returned. +func (c *vertexCollection) UpdateDocument(ctx context.Context, key string, update interface{}) (DocumentMeta, error) { + meta, _, err := c.updateDocument(ctx, key, update) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} + +func (c *vertexCollection) updateDocument(ctx context.Context, key string, update interface{}) (DocumentMeta, contextSettings, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if update == nil { + return DocumentMeta{}, contextSettings{}, WithStack(InvalidArgumentError{Message: "update nil"}) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("PATCH", path.Join(c.relPath(), escapedKey)) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if _, err := req.SetBody(update); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if err := resp.CheckStatus(200, 201, 202); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, cs, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("vertex", &meta); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + if err := resp.ParseBody("old", cs.ReturnOld); err != nil { + return meta, cs, WithStack(err) + } + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + if err := resp.ParseBody("new", cs.ReturnNew); err != nil { + return meta, cs, WithStack(err) + } + } + return meta, cs, nil +} + +// UpdateDocuments updates multiple document with given keys in the collection. +// The updates are loaded from the given updates slice, the documents meta data are returned. +// To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents. +// To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. +// To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *vertexCollection) UpdateDocuments(ctx context.Context, keys []string, updates interface{}) (DocumentMetaSlice, ErrorSlice, error) { + updatesVal := reflect.ValueOf(updates) + switch updatesVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("updates data must be of kind Array, got %s", updatesVal.Kind())}) + } + updateCount := updatesVal.Len() + if keys != nil { + if len(keys) != updateCount { + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("expected %d keys, got %d", updateCount, len(keys))}) + } + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + } + metas := make(DocumentMetaSlice, updateCount) + errs := make(ErrorSlice, updateCount) + silent := false + for i := 0; i < updateCount; i++ { + update := updatesVal.Index(i) + ctx, err := withDocumentAt(ctx, i) + if err != nil { + return nil, nil, WithStack(err) + } + var key string + if keys != nil { + key = keys[i] + } else { + var err error + key, err = getKeyFromDocument(update) + if err != nil { + errs[i] = err + continue + } + } + meta, cs, err := c.updateDocument(ctx, key, update.Interface()) + if cs.Silent { + silent = true + } else { + metas[i], errs[i] = meta, err + } + } + if silent { + return nil, nil, nil + } + return metas, errs, nil +} + +// ReplaceDocument replaces a single document with given key in the collection with the document given in the document argument. +// The document meta data is returned. +// To return the NEW document, prepare a context with `WithReturnNew`. +// To return the OLD document, prepare a context with `WithReturnOld`. +// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with given key, a NotFoundError is returned. +func (c *vertexCollection) ReplaceDocument(ctx context.Context, key string, document interface{}) (DocumentMeta, error) { + meta, _, err := c.replaceDocument(ctx, key, document) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} + +func (c *vertexCollection) replaceDocument(ctx context.Context, key string, document interface{}) (DocumentMeta, contextSettings, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if document == nil { + return DocumentMeta{}, contextSettings{}, WithStack(InvalidArgumentError{Message: "document nil"}) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("PUT", path.Join(c.relPath(), escapedKey)) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + if _, err := req.SetBody(document); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if err := resp.CheckStatus(cs.okStatus(201, 202)); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, cs, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("vertex", &meta); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + if err := resp.ParseBody("old", cs.ReturnOld); err != nil { + return meta, cs, WithStack(err) + } + } + // Parse returnNew (if needed) + if cs.ReturnNew != nil { + if err := resp.ParseBody("new", cs.ReturnNew); err != nil { + return meta, cs, WithStack(err) + } + } + return meta, cs, nil +} + +// ReplaceDocuments replaces multiple documents with given keys in the collection with the documents given in the documents argument. +// The replacements are loaded from the given documents slice, the documents meta data are returned. +// To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents. +// To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. +// To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *vertexCollection) ReplaceDocuments(ctx context.Context, keys []string, documents interface{}) (DocumentMetaSlice, ErrorSlice, error) { + documentsVal := reflect.ValueOf(documents) + switch documentsVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("documents data must be of kind Array, got %s", documentsVal.Kind())}) + } + documentCount := documentsVal.Len() + if keys != nil { + if len(keys) != documentCount { + return nil, nil, WithStack(InvalidArgumentError{Message: fmt.Sprintf("expected %d keys, got %d", documentCount, len(keys))}) + } + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + } + metas := make(DocumentMetaSlice, documentCount) + errs := make(ErrorSlice, documentCount) + silent := false + for i := 0; i < documentCount; i++ { + doc := documentsVal.Index(i) + ctx, err := withDocumentAt(ctx, i) + if err != nil { + return nil, nil, WithStack(err) + } + var key string + if keys != nil { + key = keys[i] + } else { + var err error + key, err = getKeyFromDocument(doc) + if err != nil { + errs[i] = err + continue + } + } + meta, cs, err := c.replaceDocument(ctx, key, doc.Interface()) + if cs.Silent { + silent = true + } else { + metas[i], errs[i] = meta, err + } + } + if silent { + return nil, nil, nil + } + return metas, errs, nil +} + +// RemoveDocument removes a single document with given key from the collection. +// The document meta data is returned. +// To return the OLD document, prepare a context with `WithReturnOld`. +// To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with given key, a NotFoundError is returned. +func (c *vertexCollection) RemoveDocument(ctx context.Context, key string) (DocumentMeta, error) { + meta, _, err := c.removeDocument(ctx, key) + if err != nil { + return DocumentMeta{}, WithStack(err) + } + return meta, nil +} + +func (c *vertexCollection) removeDocument(ctx context.Context, key string) (DocumentMeta, contextSettings, error) { + if err := validateKey(key); err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + escapedKey := pathEscape(key) + req, err := c.conn.NewRequest("DELETE", path.Join(c.relPath(), escapedKey)) + if err != nil { + return DocumentMeta{}, contextSettings{}, WithStack(err) + } + cs := applyContextSettings(ctx, req) + if cs.ReturnOld != nil { + return DocumentMeta{}, contextSettings{}, WithStack(InvalidArgumentError{Message: "ReturnOld is not support when removing vertices"}) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if err := resp.CheckStatus(cs.okStatus(200, 202)); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + if cs.Silent { + // Empty response, we're done + return DocumentMeta{}, cs, nil + } + // Parse metadata + var meta DocumentMeta + if err := resp.ParseBody("vertex", &meta); err != nil { + return DocumentMeta{}, cs, WithStack(err) + } + // Parse returnOld (if needed) + if cs.ReturnOld != nil { + if err := resp.ParseBody("old", cs.ReturnOld); err != nil { + return meta, cs, WithStack(err) + } + } + return meta, cs, nil +} + +// RemoveDocuments removes multiple documents with given keys from the collection. +// The document meta data are returned. +// To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents. +// To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`. +// If no document exists with a given key, a NotFoundError is returned at its errors index. +func (c *vertexCollection) RemoveDocuments(ctx context.Context, keys []string) (DocumentMetaSlice, ErrorSlice, error) { + keyCount := len(keys) + for _, key := range keys { + if err := validateKey(key); err != nil { + return nil, nil, WithStack(err) + } + } + metas := make(DocumentMetaSlice, keyCount) + errs := make(ErrorSlice, keyCount) + silent := false + for i := 0; i < keyCount; i++ { + key := keys[i] + ctx, err := withDocumentAt(ctx, i) + if err != nil { + return nil, nil, WithStack(err) + } + meta, cs, err := c.removeDocument(ctx, key) + if cs.Silent { + silent = true + } else { + metas[i], errs[i] = meta, err + } + } + if silent { + return nil, nil, nil + } + return metas, errs, nil +} + +// ImportDocuments imports one or more documents into the collection. +// The document data is loaded from the given documents argument, statistics are returned. +// The documents argument can be one of the following: +// - An array of structs: All structs will be imported as individual documents. +// - An array of maps: All maps will be imported as individual documents. +// To wait until all documents have been synced to disk, prepare a context with `WithWaitForSync`. +// To return details about documents that could not be imported, prepare a context with `WithImportDetails`. +func (c *vertexCollection) ImportDocuments(ctx context.Context, documents interface{}, options *ImportDocumentOptions) (ImportDocumentStatistics, error) { + stats, err := c.rawCollection().ImportDocuments(ctx, documents, options) + if err != nil { + return ImportDocumentStatistics{}, WithStack(err) + } + return stats, nil +} diff --git a/deps/github.com/arangodb/go-driver/vertex_collection_impl.go b/deps/github.com/arangodb/go-driver/vertex_collection_impl.go new file mode 100644 index 000000000..324302f84 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/vertex_collection_impl.go @@ -0,0 +1,168 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import ( + "context" + "path" +) + +// newVertexCollection creates a new Vertex Collection implementation. +func newVertexCollection(name string, g *graph) (Collection, error) { + if name == "" { + return nil, WithStack(InvalidArgumentError{Message: "name is empty"}) + } + if g == nil { + return nil, WithStack(InvalidArgumentError{Message: "g is nil"}) + } + return &vertexCollection{ + name: name, + g: g, + conn: g.db.conn, + }, nil +} + +type vertexCollection struct { + name string + g *graph + conn Connection +} + +// relPath creates the relative path to this edge collection (`_db//_api/gharial//vertex/`) +func (c *vertexCollection) relPath() string { + escapedName := pathEscape(c.name) + return path.Join(c.g.relPath(), "vertex", escapedName) +} + +// Name returns the name of the edge collection. +func (c *vertexCollection) Name() string { + return c.name +} + +// Database returns the database containing the collection. +func (c *vertexCollection) Database() Database { + return c.g.db +} + +// rawCollection returns a standard document implementation of Collection +// for this vertex collection. +func (c *vertexCollection) rawCollection() Collection { + result, _ := newCollection(c.name, c.g.db) + return result +} + +// Status fetches the current status of the collection. +func (c *vertexCollection) Status(ctx context.Context) (CollectionStatus, error) { + result, err := c.rawCollection().Status(ctx) + if err != nil { + return CollectionStatus(0), WithStack(err) + } + return result, nil +} + +// Count fetches the number of document in the collection. +func (c *vertexCollection) Count(ctx context.Context) (int64, error) { + result, err := c.rawCollection().Count(ctx) + if err != nil { + return 0, WithStack(err) + } + return result, nil +} + +// Statistics returns the number of documents and additional statistical information about the collection. +func (c *vertexCollection) Statistics(ctx context.Context) (CollectionStatistics, error) { + result, err := c.rawCollection().Statistics(ctx) + if err != nil { + return CollectionStatistics{}, WithStack(err) + } + return result, nil +} + +// Revision fetches the revision ID of the collection. +// The revision ID is a server-generated string that clients can use to check whether data +// in a collection has changed since the last revision check. +func (c *vertexCollection) Revision(ctx context.Context) (string, error) { + result, err := c.rawCollection().Revision(ctx) + if err != nil { + return "", WithStack(err) + } + return result, nil +} + +// Properties fetches extended information about the collection. +func (c *vertexCollection) Properties(ctx context.Context) (CollectionProperties, error) { + result, err := c.rawCollection().Properties(ctx) + if err != nil { + return CollectionProperties{}, WithStack(err) + } + return result, nil +} + +// SetProperties changes properties of the collection. +func (c *vertexCollection) SetProperties(ctx context.Context, options SetCollectionPropertiesOptions) error { + if err := c.rawCollection().SetProperties(ctx, options); err != nil { + return WithStack(err) + } + return nil +} + +// Load the collection into memory. +func (c *vertexCollection) Load(ctx context.Context) error { + if err := c.rawCollection().Load(ctx); err != nil { + return WithStack(err) + } + return nil +} + +// UnLoad the collection from memory. +func (c *vertexCollection) Unload(ctx context.Context) error { + if err := c.rawCollection().Unload(ctx); err != nil { + return WithStack(err) + } + return nil +} + +// Remove removes the entire collection. +// If the collection does not exist, a NotFoundError is returned. +func (c *vertexCollection) Remove(ctx context.Context) error { + req, err := c.conn.NewRequest("DELETE", c.relPath()) + if err != nil { + return WithStack(err) + } + resp, err := c.conn.Do(ctx, req) + if err != nil { + return WithStack(err) + } + if err := resp.CheckStatus(201, 202); err != nil { + return WithStack(err) + } + return nil +} + +// Truncate removes all documents from the collection, but leaves the indexes intact. +func (c *vertexCollection) Truncate(ctx context.Context) error { + if err := c.rawCollection().Truncate(ctx); err != nil { + return WithStack(err) + } + return nil +} diff --git a/deps/github.com/arangodb/go-driver/vertex_collection_indexes_impl.go b/deps/github.com/arangodb/go-driver/vertex_collection_indexes_impl.go new file mode 100644 index 000000000..385c1038f --- /dev/null +++ b/deps/github.com/arangodb/go-driver/vertex_collection_indexes_impl.go @@ -0,0 +1,116 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package driver + +import "context" + +// Index opens a connection to an existing index within the collection. +// If no index with given name exists, an NotFoundError is returned. +func (c *vertexCollection) Index(ctx context.Context, name string) (Index, error) { + result, err := c.rawCollection().Index(ctx, name) + if err != nil { + return nil, WithStack(err) + } + return result, nil +} + +// IndexExists returns true if an index with given name exists within the collection. +func (c *vertexCollection) IndexExists(ctx context.Context, name string) (bool, error) { + result, err := c.rawCollection().IndexExists(ctx, name) + if err != nil { + return false, WithStack(err) + } + return result, nil +} + +// Indexes returns a list of all indexes in the collection. +func (c *vertexCollection) Indexes(ctx context.Context) ([]Index, error) { + result, err := c.rawCollection().Indexes(ctx) + if err != nil { + return nil, WithStack(err) + } + return result, nil +} + +// EnsureFullTextIndex creates a fulltext index in the collection, if it does not already exist. +// +// Fields is a slice of attribute names. Currently, the slice is limited to exactly one attribute. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *vertexCollection) EnsureFullTextIndex(ctx context.Context, fields []string, options *EnsureFullTextIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureFullTextIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsureGeoIndex creates a hash index in the collection, if it does not already exist. +// +// Fields is a slice with one or two attribute paths. If it is a slice with one attribute path location, +// then a geo-spatial index on all documents is created using location as path to the coordinates. +// The value of the attribute must be a slice with at least two double values. The slice must contain the latitude (first value) +// and the longitude (second value). All documents, which do not have the attribute path or with value that are not suitable, are ignored. +// If it is a slice with two attribute paths latitude and longitude, then a geo-spatial index on all documents is created +// using latitude and longitude as paths the latitude and the longitude. The value of the attribute latitude and of the +// attribute longitude must a double. All documents, which do not have the attribute paths or which values are not suitable, are ignored. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *vertexCollection) EnsureGeoIndex(ctx context.Context, fields []string, options *EnsureGeoIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureGeoIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsureHashIndex creates a hash index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *vertexCollection) EnsureHashIndex(ctx context.Context, fields []string, options *EnsureHashIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureHashIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsurePersistentIndex creates a persistent index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *vertexCollection) EnsurePersistentIndex(ctx context.Context, fields []string, options *EnsurePersistentIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsurePersistentIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} + +// EnsureSkipListIndex creates a skiplist index in the collection, if it does not already exist. +// Fields is a slice of attribute paths. +// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). +func (c *vertexCollection) EnsureSkipListIndex(ctx context.Context, fields []string, options *EnsureSkipListIndexOptions) (Index, bool, error) { + result, created, err := c.rawCollection().EnsureSkipListIndex(ctx, fields, options) + if err != nil { + return nil, false, WithStack(err) + } + return result, created, nil +} diff --git a/deps/github.com/arangodb/go-driver/vst/authentication.go b/deps/github.com/arangodb/go-driver/vst/authentication.go new file mode 100644 index 000000000..97a66dab6 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/vst/authentication.go @@ -0,0 +1,155 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package vst + +import ( + "context" + + driver "github.com/arangodb/go-driver" + "github.com/arangodb/go-driver/vst/protocol" + velocypack "github.com/arangodb/go-velocypack" +) + +// Authentication implements a kind of authentication. +type vstAuthentication interface { + // PrepareFunc is called when the given Connection has been created. + // The returned function is then called once. + PrepareFunc(c *vstConnection) func(ctx context.Context, conn *protocol.Connection) error +} + +// newBasicAuthentication creates an authentication implementation based on the given username & password. +func newBasicAuthentication(userName, password string) vstAuthentication { + return &vstAuthenticationImpl{ + encryption: "plain", + userName: userName, + password: password, + } +} + +// newJWTAuthentication creates a JWT token authentication implementation based on the given username & password. +func newJWTAuthentication(userName, password string) vstAuthentication { + return &vstAuthenticationImpl{ + encryption: "jwt", + userName: userName, + password: password, + } +} + +// vstAuthenticationImpl implements VST implementation for JWT & Plain. +type vstAuthenticationImpl struct { + encryption string + userName string + password string +} + +type jwtOpenRequest struct { + UserName string `json:"username"` + Password string `json:"password"` +} + +type jwtOpenResponse struct { + Token string `json:"jwt"` + MustChangePassword bool `json:"must_change_password,omitempty"` +} + +// Prepare is called before the first request of the given connection is made. +func (a *vstAuthenticationImpl) PrepareFunc(vstConn *vstConnection) func(ctx context.Context, conn *protocol.Connection) error { + return func(ctx context.Context, conn *protocol.Connection) error { + var authReq velocypack.Slice + var err error + + if a.encryption == "jwt" { + // Call _open/auth + // Prepare request + r, err := vstConn.NewRequest("POST", "/_open/auth") + if err != nil { + return driver.WithStack(err) + } + r.SetBody(jwtOpenRequest{ + UserName: a.userName, + Password: a.password, + }) + + // Perform request + resp, err := vstConn.do(ctx, r, conn) + if err != nil { + return driver.WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return driver.WithStack(err) + } + + // Parse response + var data jwtOpenResponse + if err := resp.ParseBody("", &data); err != nil { + return driver.WithStack(err) + } + + // Create request + var b velocypack.Builder + b.OpenArray() + b.AddValue(velocypack.NewIntValue(1)) // Version + b.AddValue(velocypack.NewIntValue(1000)) // Type (1000=Auth) + b.AddValue(velocypack.NewStringValue("jwt")) // Encryption type + b.AddValue(velocypack.NewStringValue(data.Token)) // Token + b.Close() // request + authReq, err = b.Slice() + if err != nil { + return driver.WithStack(err) + } + } else { + // Create request + var b velocypack.Builder + b.OpenArray() + b.AddValue(velocypack.NewIntValue(1)) // Version + b.AddValue(velocypack.NewIntValue(1000)) // Type (1000=Auth) + b.AddValue(velocypack.NewStringValue(a.encryption)) // Encryption type + b.AddValue(velocypack.NewStringValue(a.userName)) // Username + b.AddValue(velocypack.NewStringValue(a.password)) // Password + b.Close() // request + authReq, err = b.Slice() + if err != nil { + return driver.WithStack(err) + } + } + + // Send request + respChan, err := conn.Send(ctx, authReq) + if err != nil { + return driver.WithStack(err) + } + + // Wait for response + m := <-respChan + resp, err := newResponse(m, "", nil) + if err != nil { + return driver.WithStack(err) + } + if err := resp.CheckStatus(200); err != nil { + return driver.WithStack(err) + } + + // Ok + return nil + } +} diff --git a/deps/github.com/arangodb/go-driver/vst/connection.go b/deps/github.com/arangodb/go-driver/vst/connection.go new file mode 100644 index 000000000..1250bd2d1 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/vst/connection.go @@ -0,0 +1,264 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package vst + +import ( + "context" + "crypto/tls" + "encoding/json" + "fmt" + "net/url" + "strings" + + driver "github.com/arangodb/go-driver" + "github.com/arangodb/go-driver/cluster" + "github.com/arangodb/go-driver/util" + "github.com/arangodb/go-driver/vst/protocol" + velocypack "github.com/arangodb/go-velocypack" +) + +const ( + keyRawResponse driver.ContextKey = "arangodb-rawResponse" + keyResponse driver.ContextKey = "arangodb-response" +) + +// ConnectionConfig provides all configuration options for a Velocypack connection. +type ConnectionConfig struct { + // Endpoints holds 1 or more URL's used to connect to the database. + // In case of a connection to an ArangoDB cluster, you must provide the URL's of all coordinators. + Endpoints []string + // TLSConfig holds settings used to configure a TLS (HTTPS) connection. + // This is only used for endpoints using the HTTPS scheme. + TLSConfig *tls.Config + // Transport allows the use of a custom round tripper. + // If Transport is not of type `*http.Transport`, the `TLSConfig` property is not used. + // Otherwise a `TLSConfig` property other than `nil` will overwrite the `TLSClientConfig` + // property of `Transport`. + // Use the Version field in Transport to switch between Velocypack 1.0 / 1.1. + // Note that Velocypack 1.1 requires ArangoDB 3.2 or higher. + // Note that Velocypack 1.0 does not support JWT authentication. + Transport protocol.TransportConfig + // Cluster configuration settings + cluster.ConnectionConfig +} + +type messageTransport interface { + Send(ctx context.Context, messageParts ...[]byte) (<-chan protocol.Message, error) +} + +// NewConnection creates a new Velocystream connection based on the given configuration settings. +func NewConnection(config ConnectionConfig) (driver.Connection, error) { + c, err := cluster.NewConnection(config.ConnectionConfig, func(endpoint string) (driver.Connection, error) { + conn, err := newVSTConnection(endpoint, config) + if err != nil { + return nil, driver.WithStack(err) + } + return conn, nil + }, config.Endpoints) + if err != nil { + return nil, driver.WithStack(err) + } + return c, nil +} + +// newVSTConnection creates a new Velocystream connection for a single endpoint and the remainder of the given configuration settings. +func newVSTConnection(endpoint string, config ConnectionConfig) (driver.Connection, error) { + endpoint = util.FixupEndpointURLScheme(endpoint) + u, err := url.Parse(endpoint) + if err != nil { + return nil, driver.WithStack(err) + } + hostAddr := u.Host + tlsConfig := config.TLSConfig + switch strings.ToLower(u.Scheme) { + case "http": + tlsConfig = nil + case "https": + if tlsConfig == nil { + tlsConfig = &tls.Config{} + } + } + c := &vstConnection{ + endpoint: *u, + transport: protocol.NewTransport(hostAddr, tlsConfig, config.Transport), + } + return c, nil +} + +// vstConnection implements an Velocystream connection to an arangodb server. +type vstConnection struct { + endpoint url.URL + transport *protocol.Transport +} + +// String returns the endpoint as string +func (c *vstConnection) String() string { + return c.endpoint.String() +} + +// NewRequest creates a new request with given method and path. +func (c *vstConnection) NewRequest(method, path string) (driver.Request, error) { + switch method { + case "GET", "POST", "DELETE", "HEAD", "PATCH", "PUT", "OPTIONS": + // Ok + default: + return nil, driver.WithStack(driver.InvalidArgumentError{Message: fmt.Sprintf("Invalid method '%s'", method)}) + } + r := &vstRequest{ + method: method, + path: path, + } + return r, nil +} + +// Do performs a given request, returning its response. +func (c *vstConnection) Do(ctx context.Context, req driver.Request) (driver.Response, error) { + resp, err := c.do(ctx, req, c.transport) + if err != nil { + return nil, driver.WithStack(err) + } + return resp, nil +} + +// Do performs a given request, returning its response. +func (c *vstConnection) do(ctx context.Context, req driver.Request, transport messageTransport) (driver.Response, error) { + vstReq, ok := req.(*vstRequest) + if !ok { + return nil, driver.WithStack(driver.InvalidArgumentError{Message: "request is not a *vstRequest"}) + } + msgParts, err := vstReq.createMessageParts() + if err != nil { + return nil, driver.WithStack(err) + } + resp, err := transport.Send(ctx, msgParts...) + if err != nil { + return nil, driver.WithStack(err) + } + // All data was send now + vstReq.WroteRequest() + + // Wait for response + msg, ok := <-resp + if !ok { + // Message was cancelled / timeout + return nil, driver.WithStack(context.DeadlineExceeded) + } + + //fmt.Printf("Received msg: %d\n", msg.ID) + var rawResponse *[]byte + if ctx != nil { + if v := ctx.Value(keyRawResponse); v != nil { + if buf, ok := v.(*[]byte); ok { + rawResponse = buf + } + } + } + + vstResp, err := newResponse(msg, c.endpoint.String(), rawResponse) + if err != nil { + fmt.Printf("Cannot decode msg %d: %#v\n", msg.ID, err) + return nil, driver.WithStack(err) + } + if ctx != nil { + if v := ctx.Value(keyResponse); v != nil { + if respPtr, ok := v.(*driver.Response); ok { + *respPtr = vstResp + } + } + } + return vstResp, nil +} + +// Unmarshal unmarshals the given raw object into the given result interface. +func (c *vstConnection) Unmarshal(data driver.RawObject, result interface{}) error { + ct := driver.ContentTypeVelocypack + if len(data) >= 2 { + // Poor mans auto detection of json + l := len(data) + if (data[0] == '{' && data[l-1] == '}') || (data[0] == '[' && data[l-1] == ']') { + ct = driver.ContentTypeJSON + } + } + switch ct { + case driver.ContentTypeJSON: + if err := json.Unmarshal(data, result); err != nil { + return driver.WithStack(err) + } + case driver.ContentTypeVelocypack: + //panic(velocypack.Slice(data)) + if err := velocypack.Unmarshal(velocypack.Slice(data), result); err != nil { + return driver.WithStack(err) + } + default: + return driver.WithStack(fmt.Errorf("Unsupported content type %d", int(ct))) + } + return nil +} + +// Endpoints returns the endpoints used by this connection. +func (c *vstConnection) Endpoints() []string { + return []string{c.endpoint.String()} +} + +// UpdateEndpoints reconfigures the connection to use the given endpoints. +func (c *vstConnection) UpdateEndpoints(endpoints []string) error { + // Do nothing here. + // The real updating is done in cluster Connection. + return nil +} + +// Configure the authentication used for this connection. +func (c *vstConnection) SetAuthentication(auth driver.Authentication) (driver.Connection, error) { + var vstAuth vstAuthentication + switch auth.Type() { + case driver.AuthenticationTypeBasic: + userName := auth.Get("username") + password := auth.Get("password") + vstAuth = newBasicAuthentication(userName, password) + case driver.AuthenticationTypeJWT: + userName := auth.Get("username") + password := auth.Get("password") + vstAuth = newJWTAuthentication(userName, password) + default: + return nil, driver.WithStack(fmt.Errorf("Unsupported authentication type %d", int(auth.Type()))) + } + + // Set authentication callback + c.transport.SetOnConnectionCreated(vstAuth.PrepareFunc(c)) + // Close all existing connections + c.transport.CloseAllConnections() + + return c, nil +} + +// Protocols returns all protocols used by this connection. +func (c *vstConnection) Protocols() driver.ProtocolSet { + switch c.transport.Version { + case protocol.Version1_0: + return driver.ProtocolSet{driver.ProtocolVST1_0} + case protocol.Version1_1: + return driver.ProtocolSet{driver.ProtocolVST1_1} + default: + return driver.ProtocolSet{ /*unknown*/ } + } +} diff --git a/deps/github.com/arangodb/go-driver/vst/protocol/chunk.go b/deps/github.com/arangodb/go-driver/vst/protocol/chunk.go new file mode 100644 index 000000000..eb1e59bb4 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/vst/protocol/chunk.go @@ -0,0 +1,123 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package protocol + +import ( + "fmt" + "io" +) + +// chunk is a part of a larger message. +type chunk struct { + chunkX uint32 + MessageID uint64 + MessageLength uint64 + Data []byte +} + +const ( + minChunkHeaderSize = 16 + maxChunkHeaderSize = 24 +) + +// buildChunks splits a message consisting of 1 or more parts into chunks. +func buildChunks(messageID uint64, maxChunkSize uint32, messageParts ...[]byte) ([]chunk, error) { + if maxChunkSize <= maxChunkHeaderSize { + return nil, fmt.Errorf("maxChunkSize is too small (%d)", maxChunkSize) + } + messageLength := uint64(0) + for _, m := range messageParts { + messageLength += uint64(len(m)) + } + minChunkCount := int(messageLength / uint64(maxChunkSize)) + maxDataLength := int(maxChunkSize - maxChunkHeaderSize) + chunks := make([]chunk, 0, minChunkCount+len(messageParts)) + chunkIndex := uint32(0) + for _, m := range messageParts { + offset := 0 + remaining := len(m) + for remaining > 0 { + dataLength := remaining + if dataLength > maxDataLength { + dataLength = maxDataLength + } + chunkX := chunkIndex << 1 + c := chunk{ + chunkX: chunkX, + MessageID: messageID, + MessageLength: messageLength, + Data: m[offset : offset+dataLength], + } + chunks = append(chunks, c) + remaining -= dataLength + offset += dataLength + chunkIndex++ + } + } + // Set chunkX of first chunk + if len(chunks) == 1 { + chunks[0].chunkX = 3 + } else { + chunks[0].chunkX = uint32((len(chunks) << 1) + 1) + } + return chunks, nil +} + +// readBytes tries to read len(dst) bytes into dst. +func readBytes(dst []byte, r io.Reader) error { + offset := 0 + remaining := len(dst) + for { + n, err := r.Read(dst[offset:]) + offset += n + remaining -= n + if remaining == 0 { + return nil + } + if err != nil { + return err + } + } +} + +// IsFirst returns true when the "first chunk" flag has been set. +func (c chunk) IsFirst() bool { + return (c.chunkX & 0x01) == 1 +} + +// Index return the index of this chunk in the message. +func (c chunk) Index() uint32 { + if (c.chunkX & 0x01) == 1 { + return 0 + } + return c.chunkX >> 1 +} + +// NumberOfChunks return the number of chunks that make up the entire message. +// This function is only valid for first chunks. +func (c chunk) NumberOfChunks() uint32 { + if (c.chunkX & 0x01) == 1 { + return c.chunkX >> 1 + } + return 0 // Not known +} diff --git a/deps/github.com/arangodb/go-driver/vst/protocol/chunk_1_0.go b/deps/github.com/arangodb/go-driver/vst/protocol/chunk_1_0.go new file mode 100644 index 000000000..974b1dbeb --- /dev/null +++ b/deps/github.com/arangodb/go-driver/vst/protocol/chunk_1_0.go @@ -0,0 +1,102 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package protocol + +import ( + "encoding/binary" + "fmt" + "io" + + driver "github.com/arangodb/go-driver" +) + +// readChunkVST1_0 reads an entire chunk from the given reader in VST 1.0 format. +func readChunkVST1_0(r io.Reader) (chunk, error) { + hdr := [maxChunkHeaderSize]byte{} + if err := readBytes(hdr[:minChunkHeaderSize], r); err != nil { + return chunk{}, driver.WithStack(err) + } + le := binary.LittleEndian + length := le.Uint32(hdr[0:]) + chunkX := le.Uint32(hdr[4:]) + messageID := le.Uint64(hdr[8:]) + var messageLength uint64 + contentLength := length - minChunkHeaderSize + + if (1 == (chunkX & 0x1)) && ((chunkX >> 1) > 1) { + // First chunk, numberOfChunks>1 -> read messageLength + fmt.Println("Reading maxHdr") + if err := readBytes(hdr[minChunkHeaderSize:], r); err != nil { + return chunk{}, driver.WithStack(err) + } + messageLength = le.Uint64(hdr[16:]) + contentLength = length - maxChunkHeaderSize + } + + data := make([]byte, contentLength) + if err := readBytes(data, r); err != nil { + return chunk{}, driver.WithStack(err) + } + //fmt.Printf("data: " + hex.EncodeToString(data) + "\n") + return chunk{ + chunkX: chunkX, + MessageID: messageID, + MessageLength: messageLength, + Data: data, + }, nil +} + +// WriteToVST1_0 write the chunk to the given writer in VST 1.0 format. +// An error is returned when less than the entire chunk was written. +func (c chunk) WriteToVST1_0(w io.Writer) (int64, error) { + le := binary.LittleEndian + hdrArr := [maxChunkHeaderSize]byte{} + var hdr []byte + if c.IsFirst() && c.NumberOfChunks() > 1 { + // Use extended header + hdr = hdrArr[:maxChunkHeaderSize] + le.PutUint64(hdr[16:], c.MessageLength) // message length + } else { + // Use minimal header + hdr = hdrArr[:minChunkHeaderSize] + } + + le.PutUint32(hdr[0:], uint32(len(c.Data)+len(hdr))) // length + le.PutUint32(hdr[4:], c.chunkX) // chunkX + le.PutUint64(hdr[8:], c.MessageID) // message ID + + // Write header + //fmt.Printf("Writing hdr: %s\n", hex.EncodeToString(hdr)) + if n, err := w.Write(hdr); err != nil { + return int64(n), driver.WithStack(err) + } + + // Write data + //fmt.Printf("Writing data: %s\n", hex.EncodeToString(c.Data)) + n, err := w.Write(c.Data) + result := int64(n) + int64(len(hdr)) + if err != nil { + return result, driver.WithStack(err) + } + return result, nil +} diff --git a/deps/github.com/arangodb/go-driver/vst/protocol/chunk_1_1.go b/deps/github.com/arangodb/go-driver/vst/protocol/chunk_1_1.go new file mode 100644 index 000000000..8264c106b --- /dev/null +++ b/deps/github.com/arangodb/go-driver/vst/protocol/chunk_1_1.go @@ -0,0 +1,83 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package protocol + +import ( + "encoding/binary" + "io" + + driver "github.com/arangodb/go-driver" +) + +// readChunkVST1_1 reads an entire chunk from the given reader in VST 1.1 format. +func readChunkVST1_1(r io.Reader) (chunk, error) { + hdr := [maxChunkHeaderSize]byte{} + if err := readBytes(hdr[:maxChunkHeaderSize], r); err != nil { + return chunk{}, driver.WithStack(err) + } + le := binary.LittleEndian + length := le.Uint32(hdr[0:]) + chunkX := le.Uint32(hdr[4:]) + messageID := le.Uint64(hdr[8:]) + messageLength := le.Uint64(hdr[16:]) + contentLength := length - maxChunkHeaderSize + + data := make([]byte, contentLength) + if err := readBytes(data, r); err != nil { + return chunk{}, driver.WithStack(err) + } + //fmt.Printf("data: " + hex.EncodeToString(data) + "\n") + return chunk{ + chunkX: chunkX, + MessageID: messageID, + MessageLength: messageLength, + Data: data, + }, nil +} + +// WriteToVST1_1 write the chunk to the given writer in VST 1.0 format. +// An error is returned when less than the entire chunk was written. +func (c chunk) WriteToVST1_1(w io.Writer) (int64, error) { + le := binary.LittleEndian + hdr := [maxChunkHeaderSize]byte{} + + le.PutUint32(hdr[0:], uint32(len(c.Data)+len(hdr))) // length + le.PutUint32(hdr[4:], c.chunkX) // chunkX + le.PutUint64(hdr[8:], c.MessageID) // message ID + le.PutUint64(hdr[16:], c.MessageLength) // message length + + // Write header + //fmt.Printf("Writing hdr: %s\n", hex.EncodeToString(hdr)) + if n, err := w.Write(hdr[:]); err != nil { + return int64(n), driver.WithStack(err) + } + + // Write data + //fmt.Printf("Writing data: %s\n", hex.EncodeToString(c.Data)) + n, err := w.Write(c.Data) + result := int64(n) + int64(len(hdr)) + if err != nil { + return result, driver.WithStack(err) + } + return result, nil +} diff --git a/deps/github.com/arangodb/go-driver/vst/protocol/chunk_test.go b/deps/github.com/arangodb/go-driver/vst/protocol/chunk_test.go new file mode 100644 index 000000000..06d3cfb09 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/vst/protocol/chunk_test.go @@ -0,0 +1,158 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package protocol + +import ( + "bytes" + "encoding/hex" + "reflect" + "testing" +) + +type readChunksTest struct { + ChunkHex string + MessageID uint64 + MessageLength uint64 + IsFirst bool + Index uint32 + NumberOfChunks uint32 + Data []byte +} + +func TestReadChunk(t *testing.T) { + tests := []readChunksTest{ + readChunksTest{ + ChunkHex: "1b0000000900000037020000000000000c00000000000000010203", + MessageID: 567, + MessageLength: 12, + IsFirst: true, + Index: 0, + NumberOfChunks: 4, + Data: []byte{1, 2, 3}, + }, + readChunksTest{ + ChunkHex: "1b0000000200000037020000000000000c00000000000000040506", + MessageID: 567, + MessageLength: 12, + IsFirst: false, + Index: 1, + NumberOfChunks: 0, + Data: []byte{4, 5, 6}, + }, + } + + for _, test := range tests { + raw, err := hex.DecodeString(test.ChunkHex) + if err != nil { + t.Fatalf("Hex decode failed: %#v", err) + } + r := bytes.NewReader(raw) + c, err := readChunk(r) + if err != nil { + t.Errorf("ReadChunk for '%s' failed: %#v", test.ChunkHex, err) + } + if c.IsFirst() != test.IsFirst { + t.Errorf("IsFirst for '%s' is invalid. \nGot '%v'\nExpected '%v'", test.ChunkHex, c.IsFirst(), test.IsFirst) + } + if c.Index() != test.Index { + t.Errorf("Index for '%s' is invalid. \nGot '%v'\nExpected '%v'", test.ChunkHex, c.Index(), test.Index) + } + if c.NumberOfChunks() != test.NumberOfChunks { + t.Errorf("NumberOfChunks for '%s' is invalid. \nGot '%v'\nExpected '%v'", test.ChunkHex, c.NumberOfChunks(), test.NumberOfChunks) + } + if c.MessageID != test.MessageID { + t.Errorf("MessageID for '%s' is invalid. \nGot '%v'\nExpected '%v'", test.ChunkHex, c.MessageID, test.MessageID) + } + if c.MessageLength != test.MessageLength { + t.Errorf("MessageLength for '%s' is invalid. \nGot '%v'\nExpected '%v'", test.ChunkHex, c.MessageLength, test.MessageLength) + } + if !reflect.DeepEqual(c.Data, test.Data) { + t.Errorf("Data for '%s' is invalid. \nGot '%v'\nExpected '%v'", test.ChunkHex, c.Data, test.Data) + } + } +} + +type buildChunksTest struct { + MessageID uint64 + MaxChunkSize uint32 + MessageParts [][]byte + ExpectedChunksHex []string +} + +func TestBuildChunks(t *testing.T) { + tests := []buildChunksTest{ + buildChunksTest{ + MessageID: 567, + MaxChunkSize: 24 + 3, + MessageParts: [][]byte{ + []byte{1, 2, 3}, + []byte{4, 5, 6}, + []byte{7, 8, 9, 10, 11, 12}, + }, + ExpectedChunksHex: []string{ + "1b0000000900000037020000000000000c00000000000000010203", + "1b0000000200000037020000000000000c00000000000000040506", + "1b0000000400000037020000000000000c00000000000000070809", + "1b0000000600000037020000000000000c000000000000000a0b0c", + }, + }, + buildChunksTest{ + MessageID: 567, + MaxChunkSize: 24 + 6, + MessageParts: [][]byte{ + []byte{1, 2, 3}, + []byte{4, 5, 6}, + []byte{7, 8, 9, 10, 11, 12}, + }, + ExpectedChunksHex: []string{ + "1b0000000700000037020000000000000c00000000000000010203", + "1b0000000200000037020000000000000c00000000000000040506", + "1e0000000400000037020000000000000c000000000000000708090a0b0c", + }, + }, + } + + for _, test := range tests { + chunks, err := buildChunks(test.MessageID, test.MaxChunkSize, test.MessageParts...) + if err != nil { + t.Fatalf("BuildChunks failed: %#v", err) + } + + if len(chunks) != len(test.ExpectedChunksHex) { + t.Errorf("Expected %d chunks, got %d", len(test.ExpectedChunksHex), len(chunks)) + } + for i, expected := range test.ExpectedChunksHex { + if i >= len(chunks) { + continue + } + var buf bytes.Buffer + if _, err := chunks[i].WriteTo(&buf); err != nil { + t.Errorf("Failed to write chunk %d: %#v", i, err) + } + actual := hex.EncodeToString(buf.Bytes()) + if expected != actual { + t.Errorf("Chunk %d is invalid. \nGot '%s'\nExpected '%s'", i, actual, expected) + } + } + } +} diff --git a/deps/github.com/arangodb/go-driver/vst/protocol/connection.go b/deps/github.com/arangodb/go-driver/vst/protocol/connection.go new file mode 100644 index 000000000..927f1aae2 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/vst/protocol/connection.go @@ -0,0 +1,270 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package protocol + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + "time" + + driver "github.com/arangodb/go-driver" +) + +// Connection is a single socket connection to a server. +type Connection struct { + version Version + lastMessageID uint64 + maxChunkSize uint32 + msgStore messageStore + conn net.Conn + writeMutex sync.Mutex + closing bool + lastActivity time.Time +} + +const ( + defaultMaxChunkSize = 30000 +) + +var ( + vstProtocolHeader1_0 = []byte("VST/1.0\r\n\r\n") + vstProtocolHeader1_1 = []byte("VST/1.1\r\n\r\n") +) + +// dial opens a new connection to the server on the given address. +func dial(version Version, addr string, tlsConfig *tls.Config) (*Connection, error) { + var conn net.Conn + var err error + if tlsConfig != nil { + conn, err = tls.Dial("tcp", addr, tlsConfig) + } else { + conn, err = net.Dial("tcp", addr) + } + if err != nil { + return nil, driver.WithStack(err) + } + + // Configure connection + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetNoDelay(true) + } + + // Send protocol header + switch version { + case Version1_0: + if _, err := conn.Write(vstProtocolHeader1_0); err != nil { + return nil, driver.WithStack(err) + } + case Version1_1: + if _, err := conn.Write(vstProtocolHeader1_1); err != nil { + return nil, driver.WithStack(err) + } + default: + return nil, driver.WithStack(fmt.Errorf("Unknown protocol version %d", int(version))) + } + + // prepare connection + c := &Connection{ + version: version, + maxChunkSize: defaultMaxChunkSize, + conn: conn, + } + c.updateLastActivity() + + // Start reading responses + go c.readChunkLoop() + + return c, nil +} + +// Close the connection to the server +func (c *Connection) Close() error { + if !c.closing { + c.closing = true + if err := c.conn.Close(); err != nil { + return driver.WithStack(err) + } + c.msgStore.ForEach(func(m *Message) { + if m.response != nil { + close(m.response) + m.response = nil + } + }) + } + return nil +} + +// IsClosed returns true when the connection is closed, false otherwise. +func (c *Connection) IsClosed() bool { + return c.closing +} + +// Send sends a message (consisting of given parts) to the server and returns +// a channel on which the response will be delivered. +// When the connection is closed before a response was received, the returned +// channel will be closed. +func (c *Connection) Send(ctx context.Context, messageParts ...[]byte) (<-chan Message, error) { + if ctx == nil { + ctx = context.Background() + } + msgID := atomic.AddUint64(&c.lastMessageID, 1) + chunks, err := buildChunks(msgID, c.maxChunkSize, messageParts...) + if err != nil { + return nil, driver.WithStack(err) + } + // Prepare for receiving a response + m := c.msgStore.Add(msgID) + + //panic(fmt.Sprintf("chunks: %d, messageParts: %d, first: %s", len(chunks), len(messageParts), hex.EncodeToString(messageParts[0]))) + + // Send all chunks + sendErrors := make(chan error) + deadline, ok := ctx.Deadline() + if !ok { + deadline = time.Time{} + } + go func() { + defer close(sendErrors) + for _, chunk := range chunks { + if err := c.sendChunk(deadline, chunk); err != nil { + // Cancel response + c.msgStore.Remove(msgID) + // Return error + sendErrors <- driver.WithStack(err) + return + } + } + }() + + // Wait for sending to be ready, or context to be cancelled. + select { + case err := <-sendErrors: + if err != nil { + return nil, driver.WithStack(err) + } + return m.response, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +// sendChunk sends a single chunk to the server. +func (c *Connection) sendChunk(deadline time.Time, chunk chunk) error { + c.writeMutex.Lock() + defer c.writeMutex.Unlock() + + c.conn.SetWriteDeadline(deadline) + var err error + switch c.version { + case Version1_0: + _, err = chunk.WriteToVST1_0(c.conn) + case Version1_1: + _, err = chunk.WriteToVST1_1(c.conn) + default: + err = driver.WithStack(fmt.Errorf("Unknown protocol version %d", int(c.version))) + } + c.updateLastActivity() + if err != nil { + return driver.WithStack(err) + } + return nil +} + +// readChunkLoop reads chunks from the connection until it is closed. +func (c *Connection) readChunkLoop() { + for { + if c.closing { + // Closing, we're done + return + } + var chunk chunk + var err error + switch c.version { + case Version1_0: + chunk, err = readChunkVST1_0(c.conn) + case Version1_1: + chunk, err = readChunkVST1_1(c.conn) + default: + err = driver.WithStack(fmt.Errorf("Unknown protocol version %d", int(c.version))) + } + c.updateLastActivity() + if err != nil { + if !c.closing { + // Handle error + if err == io.EOF { + // Connection closed + c.Close() + } else { + fmt.Printf("readChunkLoop error: %#v\n", err) + } + } + } else { + // Process chunk + go c.processChunk(chunk) + } + } +} + +// processChunk adds the given chunk to its message and notifies the listener +// when the message is complete. +func (c *Connection) processChunk(chunk chunk) { + m := c.msgStore.Get(chunk.MessageID) + if m == nil { + // Unexpected chunk, ignore it + return + } + + // Add chunk to message + m.addChunk(chunk) + + // Try to assembly + if m.assemble() { + // Message is complete + // Remove message from store + c.msgStore.Remove(m.ID) + + //fmt.Println("Chunk: " + hex.EncodeToString(chunk.Data) + "\nMessage: " + hex.EncodeToString(m.Data)) + + // Notify listener + if m.response != nil { + m.response <- *m + close(m.response) + } + } +} + +// updateLastActivity sets the lastActivity field to the current time. +func (c *Connection) updateLastActivity() { + c.lastActivity = time.Now() +} + +// IsIdle returns true when the last activity was more than the given timeout ago. +func (c *Connection) IsIdle(idleTimeout time.Duration) bool { + return time.Since(c.lastActivity) > idleTimeout +} diff --git a/deps/github.com/arangodb/go-driver/vst/protocol/message.go b/deps/github.com/arangodb/go-driver/vst/protocol/message.go new file mode 100644 index 000000000..efbaccc8e --- /dev/null +++ b/deps/github.com/arangodb/go-driver/vst/protocol/message.go @@ -0,0 +1,98 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package protocol + +import "sort" + +// Message is what is send back to the client in response to a request. +type Message struct { + ID uint64 + Data []byte + + chunks []chunk + numberOfChunks uint32 + response chan Message +} + +// addChunk adds the given chunks to the list of chunks of the message. +// If the given chunk is the first chunk, the expected number of chunks is recorded. +func (m *Message) addChunk(c chunk) { + m.chunks = append(m.chunks, c) + if c.IsFirst() { + m.numberOfChunks = c.NumberOfChunks() + } +} + +// assemble tries to assemble the message data from all chunks. +// If not all chunks are available yet, nothing is done and false +// is returned. +// If all chunks are available, the Data field is build and set and true is returned. +func (m *Message) assemble() bool { + if m.Data != nil { + // Already assembled + return true + } + if m.numberOfChunks == 0 { + // We don't have the first chunk yet + return false + } + if len(m.chunks) < int(m.numberOfChunks) { + // Not all chunks have arrived yet + return false + } + + // Fast path, only 1 chunk + if m.numberOfChunks == 1 { + m.Data = m.chunks[0].Data + return true + } + + // Sort chunks by index + sort.Sort(chunkByIndex(m.chunks)) + + // Build data buffer and copy chunks into it + data := make([]byte, m.chunks[0].MessageLength) + offset := 0 + for _, c := range m.chunks { + copy(data[offset:], c.Data) + offset += len(c.Data) + } + m.Data = data + return true +} + +type chunkByIndex []chunk + +// Len is the number of elements in the collection. +func (l chunkByIndex) Len() int { return len(l) } + +// Less reports whether the element with +// index i should sort before the element with index j. +func (l chunkByIndex) Less(i, j int) bool { + ii := l[i].Index() + ij := l[j].Index() + return ii < ij +} + +// Swap swaps the elements with indexes i and j. +func (l chunkByIndex) Swap(i, j int) { l[i], l[j] = l[j], l[i] } diff --git a/deps/github.com/arangodb/go-driver/vst/protocol/message_store.go b/deps/github.com/arangodb/go-driver/vst/protocol/message_store.go new file mode 100644 index 000000000..5c60d4bf5 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/vst/protocol/message_store.go @@ -0,0 +1,84 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package protocol + +import ( + "fmt" + "sync" +) + +type messageStore struct { + mutex sync.RWMutex + messages map[uint64]*Message +} + +// Get returns the message with given id, or nil if not found +func (s *messageStore) Get(id uint64) *Message { + s.mutex.RLock() + defer s.mutex.RUnlock() + + m, ok := s.messages[id] + if ok { + return m + } + return nil +} + +// Add adds a new message to the store with given ID. +// If the ID is not unique this function will panic. +func (s *messageStore) Add(id uint64) *Message { + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.messages == nil { + s.messages = make(map[uint64]*Message) + } + if _, ok := s.messages[id]; ok { + panic(fmt.Sprintf("ID %v is not unique", id)) + } + + m := &Message{ + ID: id, + response: make(chan Message), + } + s.messages[id] = m + return m +} + +// Remove removes the message with given ID from the store. +func (s *messageStore) Remove(id uint64) { + s.mutex.Lock() + defer s.mutex.Unlock() + + delete(s.messages, id) +} + +// ForEach calls the given function for each message in the store. +func (s *messageStore) ForEach(cb func(*Message)) { + s.mutex.RLock() + defer s.mutex.RUnlock() + + for _, m := range s.messages { + cb(m) + } +} diff --git a/deps/github.com/arangodb/go-driver/vst/protocol/protocol_version.go b/deps/github.com/arangodb/go-driver/vst/protocol/protocol_version.go new file mode 100644 index 000000000..7268fd6ce --- /dev/null +++ b/deps/github.com/arangodb/go-driver/vst/protocol/protocol_version.go @@ -0,0 +1,31 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package protocol + +// Version indicates the version of the Velocystream protocol +type Version int + +const ( + Version1_0 Version = iota // VST 1.0 + Version1_1 // VST 1.1 +) diff --git a/deps/github.com/arangodb/go-driver/vst/protocol/transport.go b/deps/github.com/arangodb/go-driver/vst/protocol/transport.go new file mode 100644 index 000000000..a943bc60f --- /dev/null +++ b/deps/github.com/arangodb/go-driver/vst/protocol/transport.go @@ -0,0 +1,198 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package protocol + +import ( + "context" + "crypto/tls" + "sync" + "time" + + driver "github.com/arangodb/go-driver" +) + +const ( + DefaultIdleConnTimeout = time.Minute +) + +// TransportConfig contains configuration options for Transport. +type TransportConfig struct { + // IdleConnTimeout is the maximum amount of time an idle + // (keep-alive) connection will remain idle before closing + // itself. + // Zero means no limit. + IdleConnTimeout time.Duration + + // Version specifies the version of the Velocystream protocol + Version Version +} + +// Transport manages client-server connections using the VST protocol to a specific host. +type Transport struct { + TransportConfig + + hostAddr string + tlsConfig *tls.Config + connMutex sync.Mutex + connections []*Connection + onConnectionCreated func(context.Context, *Connection) error +} + +// NewTransport creates a new Transport for given address & tls settings. +func NewTransport(hostAddr string, tlsConfig *tls.Config, config TransportConfig) *Transport { + if config.IdleConnTimeout == 0 { + config.IdleConnTimeout = DefaultIdleConnTimeout + } + return &Transport{ + TransportConfig: config, + hostAddr: hostAddr, + tlsConfig: tlsConfig, + } +} + +// Send sends a message (consisting of given parts) to the server and returns +// a channel on which the response will be delivered. +// When the connection is closed before a response was received, the returned +// channel will be closed. +func (c *Transport) Send(ctx context.Context, messageParts ...[]byte) (<-chan Message, error) { + conn, err := c.getConnection(ctx) + if err != nil { + return nil, driver.WithStack(err) + } + result, err := conn.Send(ctx, messageParts...) + if err != nil { + return nil, driver.WithStack(err) + } + return result, nil +} + +// CloseIdleConnections closes all connections which are closed or have been idle for more than the configured idle timeout. +func (c *Transport) CloseIdleConnections() (closed, remaining int) { + c.connMutex.Lock() + defer c.connMutex.Unlock() + + for i, conn := range c.connections { + if conn.IsClosed() || conn.IsIdle(c.IdleConnTimeout) { + // Remove connection from list + c.connections = append(c.connections[:i], c.connections[i+1:]...) + // Close connection + go conn.Close() + closed++ + } + } + + remaining = len(c.connections) + return closed, remaining +} + +// CloseAllConnections closes all connections. +func (c *Transport) CloseAllConnections() { + c.connMutex.Lock() + defer c.connMutex.Unlock() + + for _, conn := range c.connections { + // Close connection + go conn.Close() + } +} + +// SetOnConnectionCreated stores a callback function that is called every time a new connection has been created. +func (c *Transport) SetOnConnectionCreated(handler func(context.Context, *Connection) error) { + c.onConnectionCreated = handler +} + +// getConnection returns the first available connection, or when no such connection is available, +// is created a new connection. +func (c *Transport) getConnection(ctx context.Context) (*Connection, error) { + conn := c.getAvailableConnection() + if conn != nil { + return conn, nil + } + + // No connections available, make a new one + conn, err := c.createConnection() + if err != nil { + if conn != nil { + conn.Close() + } + return nil, driver.WithStack(err) + } + + // Invoke callback + if cb := c.onConnectionCreated; cb != nil { + if err := cb(ctx, conn); err != nil { + return nil, driver.WithStack(err) + } + } + + return conn, nil +} + +// getAvailableConnection returns the first available connection. +// If no such connection is available, nil is returned. +func (c *Transport) getAvailableConnection() *Connection { + c.connMutex.Lock() + defer c.connMutex.Unlock() + + for _, conn := range c.connections { + if !conn.IsClosed() { + conn.updateLastActivity() + return conn + } + } + + // No connections available + return nil +} + +// createConnection creates a new connection. +func (c *Transport) createConnection() (*Connection, error) { + conn, err := dial(c.Version, c.hostAddr, c.tlsConfig) + if err != nil { + return nil, driver.WithStack(err) + } + + // Record connection + c.connMutex.Lock() + c.connections = append(c.connections, conn) + startCleanup := len(c.connections) == 1 + c.connMutex.Unlock() + + if startCleanup { + // TODO enable cleanup + go c.cleanup() + } + + return conn, nil +} + +// cleanup keeps removing idle connections +func (c *Transport) cleanup() { + for { + time.Sleep(c.IdleConnTimeout / 10) + remaining, _ := c.CloseIdleConnections() + if remaining == 0 { + return + } + } +} diff --git a/deps/github.com/arangodb/go-driver/vst/request.go b/deps/github.com/arangodb/go-driver/vst/request.go new file mode 100644 index 000000000..c88d8d35c --- /dev/null +++ b/deps/github.com/arangodb/go-driver/vst/request.go @@ -0,0 +1,294 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package vst + +import ( + "bytes" + "fmt" + "net/url" + "reflect" + "strings" + + driver "github.com/arangodb/go-driver" + velocypack "github.com/arangodb/go-velocypack" +) + +// vstRequest implements driver.Request using Velocystream. +type vstRequest struct { + method string + path string + q url.Values + hdr map[string]string + body []byte + written bool +} + +// Clone creates a new request containing the same data as this request +func (r *vstRequest) Clone() driver.Request { + clone := *r + clone.q = url.Values{} + for k, v := range r.q { + for _, x := range v { + clone.q.Add(k, x) + } + } + if clone.hdr != nil { + clone.hdr = make(map[string]string) + for k, v := range r.hdr { + clone.hdr[k] = v + } + } + return &clone +} + +// SetQuery sets a single query argument of the request. +// Any existing query argument with the same key is overwritten. +func (r *vstRequest) SetQuery(key, value string) driver.Request { + if r.q == nil { + r.q = url.Values{} + } + r.q.Set(key, value) + return r +} + +// SetBody sets the content of the request. +// The protocol of the connection determines what kinds of marshalling is taking place. +// When multiple bodies are given, they are merged, with fields in the first document prevailing. +func (r *vstRequest) SetBody(body ...interface{}) (driver.Request, error) { + switch len(body) { + case 0: + return r, driver.WithStack(fmt.Errorf("Must provide at least 1 body")) + case 1: + if data, err := velocypack.Marshal(body[0]); err != nil { + return r, driver.WithStack(err) + } else { + r.body = data + } + return r, nil + default: + slices := make([]velocypack.Slice, len(body)) + for i, b := range body { + var err error + slices[i], err = velocypack.Marshal(b) + if err != nil { + return r, driver.WithStack(err) + } + } + merged, err := velocypack.Merge(slices...) + if err != nil { + return r, driver.WithStack(err) + } + r.body = merged + return r, nil + } +} + +// SetBodyArray sets the content of the request as an array. +// If the given mergeArray is not nil, its elements are merged with the elements in the body array (mergeArray data overrides bodyArray data). +// The protocol of the connection determines what kinds of marshalling is taking place. +func (r *vstRequest) SetBodyArray(bodyArray interface{}, mergeArray []map[string]interface{}) (driver.Request, error) { + bodyArrayVal := reflect.ValueOf(bodyArray) + switch bodyArrayVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, driver.WithStack(driver.InvalidArgumentError{Message: fmt.Sprintf("bodyArray must be slice, got %s", bodyArrayVal.Kind())}) + } + if mergeArray == nil { + // Simple case; just marshal bodyArray directly. + if data, err := velocypack.Marshal(bodyArray); err != nil { + return r, driver.WithStack(err) + } else { + r.body = data + } + return r, nil + } + + // Complex case, mergeArray is not nil + b := velocypack.Builder{} + // Start array + if err := b.OpenArray(); err != nil { + return nil, driver.WithStack(err) + } + + elementCount := bodyArrayVal.Len() + for i := 0; i < elementCount; i++ { + // Marshal body element + bodySlice, err := velocypack.Marshal(bodyArrayVal.Index(i).Interface()) + if err != nil { + return nil, driver.WithStack(err) + } + var sliceToAdd velocypack.Slice + if maElem := mergeArray[i]; maElem != nil { + // Marshal merge array element + elemSlice, err := velocypack.Marshal(maElem) + if err != nil { + return nil, driver.WithStack(err) + } + // Merge elemSlice with bodySlice + sliceToAdd, err = velocypack.Merge(elemSlice, bodySlice) + if err != nil { + return nil, driver.WithStack(err) + } + } else { + // Just use bodySlice + sliceToAdd = bodySlice + } + + // Add resulting slice + if err := b.AddValue(velocypack.NewSliceValue(sliceToAdd)); err != nil { + return nil, driver.WithStack(err) + } + } + + // Close array + if err := b.Close(); err != nil { + return nil, driver.WithStack(err) + } + + // Get resulting slice + arraySlice, err := b.Slice() + if err != nil { + return nil, driver.WithStack(err) + } + r.body = arraySlice + + return r, nil +} + +// SetBodyImportArray sets the content of the request as an array formatted for importing documents. +// The protocol of the connection determines what kinds of marshalling is taking place. +func (r *vstRequest) SetBodyImportArray(bodyArray interface{}) (driver.Request, error) { + bodyArrayVal := reflect.ValueOf(bodyArray) + switch bodyArrayVal.Kind() { + case reflect.Array, reflect.Slice: + // OK + default: + return nil, driver.WithStack(driver.InvalidArgumentError{Message: fmt.Sprintf("bodyArray must be slice, got %s", bodyArrayVal.Kind())}) + } + // Render elements + buf := &bytes.Buffer{} + encoder := velocypack.NewEncoder(buf) + if err := encoder.Encode(bodyArray); err != nil { + return nil, driver.WithStack(err) + } + r.body = buf.Bytes() + r.SetQuery("type", "list") + return r, nil +} + +// SetHeader sets a single header arguments of the request. +// Any existing header argument with the same key is overwritten. +func (r *vstRequest) SetHeader(key, value string) driver.Request { + if r.hdr == nil { + r.hdr = make(map[string]string) + } + r.hdr[key] = value + return r +} + +// Written returns true as soon as this request has been written completely to the network. +// This does not guarantee that the server has received or processed the request. +func (r *vstRequest) Written() bool { + return r.written +} + +// WroteRequest sets written to true. +func (r *vstRequest) WroteRequest() { + r.written = true +} + +// createHTTPRequest creates a golang http.Request based on the configured arguments. +func (r *vstRequest) createMessageParts() ([][]byte, error) { + r.written = false + + // Build path & database + path := strings.TrimPrefix(r.path, "/") + databaseValue := velocypack.NewStringValue("_system") + if strings.HasPrefix(path, "_db/") { + path = path[4:] // Remove '_db/' + parts := strings.SplitN(path, "/", 2) + if len(parts) == 1 { + databaseValue = velocypack.NewStringValue(parts[0]) + path = "" + } else { + databaseValue = velocypack.NewStringValue(parts[0]) + path = parts[1] + } + } + path = "/" + path + + // Create header + var b velocypack.Builder + b.OpenArray() + b.AddValue(velocypack.NewIntValue(1)) // Version + b.AddValue(velocypack.NewIntValue(1)) // Type (1=Req) + b.AddValue(databaseValue) // Database name + b.AddValue(velocypack.NewIntValue(r.requestType())) // Request type + b.AddValue(velocypack.NewStringValue(path)) // Request + b.OpenObject() // Parameters + for k, v := range r.q { + if len(v) > 0 { + b.AddKeyValue(k, velocypack.NewStringValue(v[0])) + } + } + b.Close() // Parameters + b.OpenObject() // Meta + for k, v := range r.hdr { + b.AddKeyValue(k, velocypack.NewStringValue(v)) + } + b.Close() // Meta + b.Close() // Header + + hdr, err := b.Bytes() + if err != nil { + return nil, driver.WithStack(err) + } + + if len(r.body) == 0 { + return [][]byte{hdr}, nil + } + return [][]byte{hdr, r.body}, nil +} + +// requestType converts method to request type. +func (r *vstRequest) requestType() int64 { + switch r.method { + case "DELETE": + return 0 + case "GET": + return 1 + case "POST": + return 2 + case "PUT": + return 3 + case "HEAD": + return 4 + case "PATCH": + return 5 + case "OPTIONS": + return 6 + default: + panic(fmt.Errorf("Unknown method '%s'", r.method)) + } +} diff --git a/deps/github.com/arangodb/go-driver/vst/response.go b/deps/github.com/arangodb/go-driver/vst/response.go new file mode 100644 index 000000000..e290e6c12 --- /dev/null +++ b/deps/github.com/arangodb/go-driver/vst/response.go @@ -0,0 +1,244 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package vst + +import ( + "fmt" + "strings" + "sync" + + driver "github.com/arangodb/go-driver" + "github.com/arangodb/go-driver/vst/protocol" + velocypack "github.com/arangodb/go-velocypack" +) + +// vstResponse implements driver.Response for Velocystream responses. +type vstResponse struct { + endpoint string + Version int + Type int + ResponseCode int + meta velocypack.Slice + metaMutex sync.Mutex + metaMap map[string]string + slice velocypack.Slice + bodyArray []driver.Response +} + +// newResponse builds a vstResponse from given message. +func newResponse(msg protocol.Message, endpoint string, rawResponse *[]byte) (*vstResponse, error) { + // Decode header + hdr := velocypack.Slice(msg.Data) + if err := hdr.AssertType(velocypack.Array); err != nil { + return nil, driver.WithStack(err) + } + //panic("hdr: " + hex.EncodeToString(hdr)) + var hdrLen velocypack.ValueLength + if l, err := hdr.Length(); err != nil { + return nil, driver.WithStack(err) + } else if l < 3 { + return nil, driver.WithStack(fmt.Errorf("Expected a header of 3 elements, got %d", l)) + } else { + hdrLen = l + } + + resp := &vstResponse{ + endpoint: endpoint, + } + // Decode version + if elem, err := hdr.At(0); err != nil { + return nil, driver.WithStack(err) + } else if version, err := elem.GetInt(); err != nil { + return nil, driver.WithStack(err) + } else { + resp.Version = int(version) + } + // Decode type + if elem, err := hdr.At(1); err != nil { + return nil, driver.WithStack(err) + } else if tp, err := elem.GetInt(); err != nil { + return nil, driver.WithStack(err) + } else { + resp.Type = int(tp) + } + // Decode responseCode + if elem, err := hdr.At(2); err != nil { + return nil, driver.WithStack(err) + } else if code, err := elem.GetInt(); err != nil { + return nil, driver.WithStack(err) + } else { + resp.ResponseCode = int(code) + } + // Decode meta + if hdrLen >= 4 { + if elem, err := hdr.At(3); err != nil { + return nil, driver.WithStack(err) + } else if !elem.IsObject() { + return nil, driver.WithStack(fmt.Errorf("Expected meta field to be of type Object, got %s", elem.Type())) + } else { + resp.meta = elem + } + } + + // Fetch body directly after hdr + if body, err := hdr.Next(); err != nil { + return nil, driver.WithStack(err) + } else { + resp.slice = body + if rawResponse != nil { + *rawResponse = body + } + } + //fmt.Printf("got response: code=%d, body=%s\n", resp.ResponseCode, hex.EncodeToString(resp.slice)) + return resp, nil +} + +// StatusCode returns an HTTP compatible status code of the response. +func (r *vstResponse) StatusCode() int { + return r.ResponseCode +} + +// Endpoint returns the endpoint that handled the request. +func (r *vstResponse) Endpoint() string { + return r.endpoint +} + +// CheckStatus checks if the status of the response equals to one of the given status codes. +// If so, nil is returned. +// If not, an attempt is made to parse an error response in the body and an error is returned. +func (r *vstResponse) CheckStatus(validStatusCodes ...int) error { + for _, x := range validStatusCodes { + if x == r.ResponseCode { + // Found valid status code + return nil + } + } + // Invalid status code, try to parse arango error response. + var aerr driver.ArangoError + if err := r.ParseBody("", &aerr); err == nil { + // Found correct arango error. + return aerr + } + + // We do not have a valid error code, so we can only create one based on the HTTP status code. + return driver.ArangoError{ + HasError: true, + Code: r.ResponseCode, + ErrorMessage: fmt.Sprintf("Unexpected status code %d", r.ResponseCode), + } +} + +// Header returns the value of a response header with given key. +// If no such header is found, an empty string is returned. +func (r *vstResponse) Header(key string) string { + r.metaMutex.Lock() + defer r.metaMutex.Unlock() + + if r.meta != nil { + if r.metaMap == nil { + // Read all headers + metaMap := make(map[string]string) + keyCount, err := r.meta.Length() + if err != nil { + return "" + } + for k := velocypack.ValueLength(0); k < keyCount; k++ { + key, err := r.meta.KeyAt(k) + if err != nil { + continue + } + value, err := r.meta.ValueAt(k) + if err != nil { + continue + } + keyStr, err := key.GetString() + if err != nil { + continue + } + valueStr, err := value.GetString() + if err != nil { + continue + } + metaMap[strings.ToLower(keyStr)] = valueStr + } + r.metaMap = metaMap + } + key = strings.ToLower(key) + if value, found := r.metaMap[key]; found { + return value + } + } + return "" +} + +// ParseBody performs protocol specific unmarshalling of the response data into the given result. +// If the given field is non-empty, the contents of that field will be parsed into the given result. +func (r *vstResponse) ParseBody(field string, result interface{}) error { + slice := r.slice + if field != "" { + var err error + slice, err = slice.Get(field) + if err != nil { + return driver.WithStack(err) + } + if slice.IsNone() { + // Field not found + return nil + } + } + if result != nil { + if err := velocypack.Unmarshal(slice, result); err != nil { + return driver.WithStack(err) + } + } + return nil +} + +// ParseArrayBody performs protocol specific unmarshalling of the response array data into individual response objects. +// This can only be used for requests that return an array of objects. +func (r *vstResponse) ParseArrayBody() ([]driver.Response, error) { + if r.bodyArray == nil { + slice := r.slice + l, err := slice.Length() + if err != nil { + return nil, driver.WithStack(err) + } + + bodyArray := make([]driver.Response, 0, l) + it, err := velocypack.NewArrayIterator(slice) + if err != nil { + return nil, driver.WithStack(err) + } + for it.IsValid() { + v, err := it.Value() + if err != nil { + return nil, driver.WithStack(err) + } + bodyArray = append(bodyArray, &vstResponseElement{slice: v}) + it.Next() + } + r.bodyArray = bodyArray + } + + return r.bodyArray, nil +} diff --git a/deps/github.com/arangodb/go-driver/vst/response_element.go b/deps/github.com/arangodb/go-driver/vst/response_element.go new file mode 100644 index 000000000..4120cf2df --- /dev/null +++ b/deps/github.com/arangodb/go-driver/vst/response_element.go @@ -0,0 +1,123 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package vst + +import ( + "fmt" + + driver "github.com/arangodb/go-driver" + velocypack "github.com/arangodb/go-velocypack" +) + +// vstResponseElement implements driver.Response for an entry of an array response. +type vstResponseElement struct { + statusCode *int + slice velocypack.Slice +} + +// StatusCode returns an HTTP compatible status code of the response. +func (r *vstResponseElement) StatusCode() int { + if r.statusCode == nil { + statusCode := 200 + // Look for "error" field + if errorFieldSlice, _ := r.slice.Get("error"); !errorFieldSlice.IsNone() { + if hasError, err := errorFieldSlice.GetBool(); err == nil && hasError { + // We have an error, look for code field + statusCode = 500 + if codeFieldSlice, _ := r.slice.Get("code"); !codeFieldSlice.IsNone() { + if code, err := codeFieldSlice.GetInt(); err == nil { + statusCode = int(code) + } + } + } + } + r.statusCode = &statusCode + } + return *r.statusCode +} + +// Endpoint returns the endpoint that handled the request. +func (r *vstResponseElement) Endpoint() string { + return "" +} + +// CheckStatus checks if the status of the response equals to one of the given status codes. +// If so, nil is returned. +// If not, an attempt is made to parse an error response in the body and an error is returned. +func (r *vstResponseElement) CheckStatus(validStatusCodes ...int) error { + statusCode := r.StatusCode() + for _, x := range validStatusCodes { + if x == statusCode { + // Found valid status code + return nil + } + } + // Invalid status code, try to parse arango error response. + var aerr driver.ArangoError + if err := r.ParseBody("", &aerr); err == nil { + // Found correct arango error. + return aerr + } + + // We do not have a valid error code, so we can only create one based on the HTTP status code. + return driver.ArangoError{ + HasError: true, + Code: statusCode, + ErrorMessage: fmt.Sprintf("Unexpected status code %d", statusCode), + } +} + +// Header returns the value of a response header with given key. +// If no such header is found, an empty string is returned. +func (r *vstResponseElement) Header(key string) string { + return "" +} + +// ParseBody performs protocol specific unmarshalling of the response data into the given result. +// If the given field is non-empty, the contents of that field will be parsed into the given result. +func (r *vstResponseElement) ParseBody(field string, result interface{}) error { + slice := r.slice + if field != "" { + var err error + slice, err = slice.Get(field) + if err != nil { + return driver.WithStack(err) + } + if slice.IsNone() { + // Field not found + return nil + } + } + if result != nil { + if err := velocypack.Unmarshal(slice, result); err != nil { + return driver.WithStack(err) + } + } + return nil +} + +// ParseArrayBody performs protocol specific unmarshalling of the response array data into individual response objects. +// This can only be used for requests that return an array of objects. +func (r *vstResponseElement) ParseArrayBody() ([]driver.Response, error) { + return nil, driver.WithStack(driver.InvalidArgumentError{Message: "ParseArrayBody not allowed"}) +} diff --git a/deps/github.com/arangodb/go-velocypack/.envrc b/deps/github.com/arangodb/go-velocypack/.envrc new file mode 100644 index 000000000..143b78233 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/.envrc @@ -0,0 +1,8 @@ +export GOBUILDDIR=$(pwd)/.gobuild +export GOPATH=$GOBUILDDIR:$GOPATH +PATH_add $GOBUILDDIR/bin + +if [ ! -e ${GOBUILDDIR} ]; then + mkdir -p ${GOBUILDDIR}/src/github.com/arangodb/ + ln -s ../../../.. ${GOBUILDDIR}/src/github.com/arangodb/go-velocypack +fi \ No newline at end of file diff --git a/deps/github.com/arangodb/go-velocypack/.gitignore b/deps/github.com/arangodb/go-velocypack/.gitignore new file mode 100644 index 000000000..7e3401533 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/.gitignore @@ -0,0 +1,4 @@ +.gobuild +coverage.out +cpu.out +test.test diff --git a/deps/github.com/arangodb/go-velocypack/.travis.yml b/deps/github.com/arangodb/go-velocypack/.travis.yml new file mode 100644 index 000000000..60975a7e7 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/.travis.yml @@ -0,0 +1,8 @@ +sudo: required + +services: + - docker + +language: go + +script: make run-tests diff --git a/deps/github.com/arangodb/go-velocypack/.vscode/settings.json b/deps/github.com/arangodb/go-velocypack/.vscode/settings.json new file mode 100644 index 000000000..92424beb3 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/.vscode/settings.json @@ -0,0 +1,36 @@ +// Place your settings in this file to overwrite default and user settings. +{ + "fileHeaderComment.parameter": { + "*": { + "commentprefix": "//", + "company": "ArangoDB GmbH, Cologne, Germany", + "author": "Ewout Prangsma" + } + }, + "fileHeaderComment.template": { + "*": [ + "${commentprefix} ", + "${commentprefix} DISCLAIMER", + "${commentprefix} ", + "${commentprefix} Copyright ${year} ArangoDB GmbH, Cologne, Germany", + "${commentprefix} ", + "${commentprefix} Licensed under the Apache License, Version 2.0 (the \"License\");", + "${commentprefix} you may not use this file except in compliance with the License.", + "${commentprefix} You may obtain a copy of the License at", + "${commentprefix} ", + "${commentprefix} http://www.apache.org/licenses/LICENSE-2.0", + "${commentprefix} ", + "${commentprefix} Unless required by applicable law or agreed to in writing, software", + "${commentprefix} distributed under the License is distributed on an \"AS IS\" BASIS,", + "${commentprefix} WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", + "${commentprefix} See the License for the specific language governing permissions and", + "${commentprefix} limitations under the License.", + "${commentprefix} ", + "${commentprefix} Copyright holder is ArangoDB GmbH, Cologne, Germany", + "${commentprefix} ", + "${commentprefix} Author ${author}", + "${commentprefix} ", + "" + ] + } +} \ No newline at end of file diff --git a/deps/github.com/arangodb/go-velocypack/LICENSE b/deps/github.com/arangodb/go-velocypack/LICENSE new file mode 100644 index 000000000..b8ff39b5a --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 ArangoDB GmbH + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/deps/github.com/arangodb/go-velocypack/Makefile b/deps/github.com/arangodb/go-velocypack/Makefile new file mode 100644 index 000000000..bdc8bf2cf --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/Makefile @@ -0,0 +1,78 @@ +PROJECT := go-velocypack +SCRIPTDIR := $(shell pwd) +ROOTDIR := $(shell cd $(SCRIPTDIR) && pwd) + +GOBUILDDIR := $(SCRIPTDIR)/.gobuild +GOVERSION := 1.7.5-alpine + +TESTOPTIONS := +ifdef VERBOSE + TESTOPTIONS := -v +endif + +ORGPATH := github.com/arangodb +ORGDIR := $(GOBUILDDIR)/src/$(ORGPATH) +REPONAME := $(PROJECT) +REPODIR := $(ORGDIR)/$(REPONAME) +REPOPATH := $(ORGPATH)/$(REPONAME) + +SOURCES := $(shell find . -name '*.go') + +.PHONY: all build clean run-tests show-coverage + +all: build + +build: $(GOBUILDDIR) $(SOURCES) + GOPATH=$(GOBUILDDIR) go build -v github.com/arangodb/go-velocypack + +clean: + rm -Rf $(GOBUILDDIR) + +$(GOBUILDDIR): + @mkdir -p $(ORGDIR) + @rm -f $(REPODIR) && ln -s ../../../.. $(REPODIR) + +# All unit tests +run-tests: $(GOBUILDDIR) + @GOPATH=$(GOBUILDDIR) go get github.com/stretchr/testify/assert + @docker run \ + --rm \ + -v $(ROOTDIR):/usr/code \ + -e GOPATH=/usr/code/.gobuild \ + -w /usr/code/ \ + golang:$(GOVERSION) \ + sh -c "go test -v $(REPOPATH)/test/runtime && go test $(TESTOPTIONS) $(REPOPATH) && go test -cover -coverpkg $(REPOPATH) -coverprofile=coverage.out $(TESTOPTIONS) $(REPOPATH)/test" + +# All benchmarks +run-benchmarks: $(GOBUILDDIR) + @GOPATH=$(GOBUILDDIR) go get github.com/stretchr/testify/assert + @docker run \ + --rm \ + -v $(ROOTDIR):/usr/code \ + -e GOPATH=/usr/code/.gobuild \ + -w /usr/code/ \ + golang:$(GOVERSION) \ + go test $(TESTOPTIONS) -bench=. -run=notests -cpu=1,2,4 $(REPOPATH)/test + +# All benchmarks using local profiling +run-benchmarks-prof: $(GOBUILDDIR) + @GOPATH=$(GOBUILDDIR) go get github.com/stretchr/testify/assert + @GOPATH=$(GOBUILDDIR) go test $(TESTOPTIONS) -bench=. -run=notests -cpu=1,2,4 -cpuprofile=cpu.out $(REPOPATH)/test + @echo Now profile using: go tool pprof test.test cpu.out + +# All unit tests using local Go tools +run-tests-local: $(GOBUILDDIR) + @GOPATH=$(GOBUILDDIR) go get github.com/stretchr/testify/assert + @GOPATH=$(GOBUILDDIR) go test -v $(REPOPATH)/test/runtime + @GOPATH=$(GOBUILDDIR) go test $(TESTOPTIONS) $(REPOPATH) + @GOPATH=$(GOBUILDDIR) go test -cover -coverpkg $(REPOPATH) -coverprofile=coverage.out $(TESTOPTIONS) $(REPOPATH)/test + +# All (except large memory) unit tests using local Go tools +run-tests-local-nolarge: $(GOBUILDDIR) + @GOPATH=$(GOBUILDDIR) go get github.com/stretchr/testify/assert + @GOPATH=$(GOBUILDDIR) go test -tags nolarge -v $(REPOPATH)/test/runtime + @GOPATH=$(GOBUILDDIR) go test -tags nolarge $(TESTOPTIONS) $(REPOPATH) + @GOPATH=$(GOBUILDDIR) go test -tags nolarge -cover -coverpkg $(REPOPATH) -coverprofile=coverage.out $(TESTOPTIONS) $(REPOPATH)/test + +show-coverage: run-tests + go tool cover -html coverage.out diff --git a/deps/github.com/arangodb/go-velocypack/README.md b/deps/github.com/arangodb/go-velocypack/README.md new file mode 100644 index 000000000..208321c2a --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/README.md @@ -0,0 +1,7 @@ +# ArangoDB VelocyPack Go implementation. + + +[![Build Status](https://travis-ci.org/arangodb/go-velocypack.svg?branch=master)](https://travis-ci.org/arangodb/go-velocypack) +[![GoDoc](https://godoc.org/github.com/arangodb/go-velocypack?status.svg)](http://godoc.org/github.com/arangodb/go-velocypack) + +NOTE: THIS IS WORK IN PROGRESS. diff --git a/deps/github.com/arangodb/go-velocypack/array_iterator.go b/deps/github.com/arangodb/go-velocypack/array_iterator.go new file mode 100644 index 000000000..a078746fc --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/array_iterator.go @@ -0,0 +1,91 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +type ArrayIterator struct { + s Slice + position ValueLength + size ValueLength + current Slice +} + +// NewArrayIterator initializes an iterator at position 0 of the given object slice. +func NewArrayIterator(s Slice) (*ArrayIterator, error) { + if !s.IsArray() { + return nil, InvalidTypeError{"Expected Array slice"} + } + size, err := s.Length() + if err != nil { + return nil, WithStack(err) + } + i := &ArrayIterator{ + s: s, + position: 0, + size: size, + } + if size > 0 { + i.current, err = s.At(0) + if err != nil { + return nil, WithStack(err) + } + } + return i, nil +} + +// IsValid returns true if the given position of the iterator is valid. +func (i *ArrayIterator) IsValid() bool { + return i.position < i.size +} + +// IsFirst returns true if the current position is 0. +func (i *ArrayIterator) IsFirst() bool { + return i.position == 0 +} + +// Value returns the value of the current position of the iterator +func (i *ArrayIterator) Value() (Slice, error) { + if i.position >= i.size { + return nil, WithStack(IndexOutOfBoundsError) + } + if current := i.current; current != nil { + return current, nil + } + value, err := i.s.At(i.position) + return value, WithStack(err) +} + +// Next moves to the next position. +func (i *ArrayIterator) Next() error { + i.position++ + if i.position < i.size && i.current != nil { + var err error + // skip over entry + i.current, err = i.current.Next() + if err != nil { + return WithStack(err) + } + } else { + i.current = nil + } + return nil +} diff --git a/deps/github.com/arangodb/go-velocypack/attribute_translator.go b/deps/github.com/arangodb/go-velocypack/attribute_translator.go new file mode 100644 index 000000000..7d062cad9 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/attribute_translator.go @@ -0,0 +1,51 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import "strconv" + +var attributeTranslator attributeIDTranslator = &arangoAttributeIDTranslator{} + +// attributeIDTranslator is used to translation integer style object keys to strings. +type attributeIDTranslator interface { + IDToString(id uint64) string +} + +type arangoAttributeIDTranslator struct{} + +func (t *arangoAttributeIDTranslator) IDToString(id uint64) string { + switch id { + case 1: + return "_key" + case 2: + return "_rev" + case 3: + return "_id" + case 4: + return "_from" + case 5: + return "_to" + default: + return strconv.FormatUint(id, 10) + } +} diff --git a/deps/github.com/arangodb/go-velocypack/builder.go b/deps/github.com/arangodb/go-velocypack/builder.go new file mode 100644 index 000000000..add8df7b9 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/builder.go @@ -0,0 +1,1188 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "reflect" +) + +// BuilderOptions contains options that influence how Builder builds slices. +type BuilderOptions struct { + BuildUnindexedArrays bool + BuildUnindexedObjects bool + CheckAttributeUniqueness bool +} + +// Builder is used to build VPack structures. +type Builder struct { + BuilderOptions + buf builderBuffer + stack builderStack + index []indexVector + keyWritten bool +} + +func NewBuilder(capacity uint) *Builder { + b := &Builder{ + buf: make(builderBuffer, 0, capacity), + } + return b +} + +// Clear and start from scratch: +func (b *Builder) Clear() { + b.buf = nil + b.stack.Clear() + b.keyWritten = false +} + +// Bytes return the generated bytes. +// The returned slice is shared with the builder itself, so you must not modify it. +// When the builder is not closed, an error is returned. +func (b *Builder) Bytes() ([]byte, error) { + if !b.IsClosed() { + return nil, WithStack(BuilderNotClosedError) + } + return b.buf, nil +} + +// Slice returns a slice of the result. +func (b *Builder) Slice() (Slice, error) { + if b.buf.IsEmpty() { + return Slice{}, nil + } + bytes, err := b.Bytes() + return bytes, WithStack(err) +} + +// WriteTo writes the generated bytes to the given writer. +// When the builder is not closed, an error is returned. +func (b *Builder) WriteTo(w io.Writer) (int64, error) { + if !b.IsClosed() { + return 0, WithStack(BuilderNotClosedError) + } + if n, err := w.Write(b.buf); err != nil { + return 0, WithStack(err) + } else { + return int64(n), nil + } +} + +// Size returns the actual size of the generated slice. +// Returns an error when builder is not closed. +func (b *Builder) Size() (ValueLength, error) { + if !b.IsClosed() { + return 0, WithStack(BuilderNotClosedError) + } + return b.buf.Len(), nil +} + +// IsEmpty returns true when no bytes have been generated yet. +func (b *Builder) IsEmpty() bool { + return b.buf.IsEmpty() +} + +// IsOpenObject returns true when the builder has an open object at the top of the stack. +func (b *Builder) IsOpenObject() bool { + if b.stack.IsEmpty() { + return false + } + tos, _ := b.stack.Tos() + h := b.buf[tos] + return h == 0x0b || h == 0x014 +} + +// IsOpenArray returns true when the builder has an open array at the top of the stack. +func (b *Builder) IsOpenArray() bool { + if b.stack.IsEmpty() { + return false + } + tos, _ := b.stack.Tos() + h := b.buf[tos] + return h == 0x06 || h == 0x013 +} + +// OpenObject starts a new object. +// This must be closed using Close. +func (b *Builder) OpenObject(unindexed ...bool) error { + var vType byte + if optionalBool(unindexed, false) { + vType = 0x14 + } else { + vType = 0x0b + } + return WithStack(b.openCompoundValue(vType)) +} + +// OpenArray starts a new array. +// This must be closed using Close. +func (b *Builder) OpenArray(unindexed ...bool) error { + var vType byte + if optionalBool(unindexed, false) { + vType = 0x13 + } else { + vType = 0x06 + } + return WithStack(b.openCompoundValue(vType)) +} + +// Close ends an open object or array. +func (b *Builder) Close() error { + if b.IsClosed() { + return WithStack(BuilderNeedOpenCompoundError) + } + tos, _ := b.stack.Tos() + head := b.buf[tos] + + vpackAssert(head == 0x06 || head == 0x0b || head == 0x13 || head == 0x14) + + isArray := (head == 0x06 || head == 0x13) + index := b.index[b.stack.Len()-1] + + if index.IsEmpty() { + b.closeEmptyArrayOrObject(tos, isArray) + return nil + } + + // From now on index.size() > 0 + vpackAssert(len(index) > 0) + + // check if we can use the compact Array / Object format + if head == 0x13 || head == 0x14 || + (head == 0x06 && b.BuilderOptions.BuildUnindexedArrays) || + (head == 0x0b && (b.BuilderOptions.BuildUnindexedObjects || len(index) == 1)) { + if b.closeCompactArrayOrObject(tos, isArray, index) { + return nil + } + // This might fall through, if closeCompactArrayOrObject gave up! + } + + if isArray { + b.closeArray(tos, index) + return nil + } + + // From now on we're closing an object + + // fix head byte in case a compact Array / Object was originally requested + b.buf[tos] = 0x0b + + // First determine byte length and its format: + offsetSize := uint(8) + // can be 1, 2, 4 or 8 for the byte width of the offsets, + // the byte length and the number of subvalues: + if b.buf.Len()-tos+ValueLength(len(index))-6 <= 0xff { + // We have so far used _pos - tos bytes, including the reserved 8 + // bytes for byte length and number of subvalues. In the 1-byte number + // case we would win back 6 bytes but would need one byte per subvalue + // for the index table + offsetSize = 1 + + // Maybe we need to move down data: + targetPos := ValueLength(3) + if b.buf.Len() > (tos + 9) { + _len := ValueLength(b.buf.Len() - (tos + 9)) + checkOverflow(_len) + src := b.buf[tos+9:] + copy(b.buf[tos+targetPos:], src[:_len]) + } + diff := ValueLength(9 - targetPos) + b.buf.Shrink(uint(diff)) + n := len(index) + for i := 0; i < n; i++ { + index[i] -= diff + } + + // One could move down things in the offsetSize == 2 case as well, + // since we only need 4 bytes in the beginning. However, saving these + // 4 bytes has been sacrificed on the Altar of Performance. + } else if b.buf.Len()-tos+2*ValueLength(len(index)) <= 0xffff { + offsetSize = 2 + } else if b.buf.Len()-tos+4*ValueLength(len(index)) <= 0xffffffff { + offsetSize = 4 + } + + // Now build the table: + extraSpace := offsetSize * uint(len(index)) + if offsetSize == 8 { + extraSpace += 8 + } + b.buf.ReserveSpace(extraSpace) + tableBase := b.buf.Len() + b.buf.Grow(offsetSize * uint(len(index))) + // Object + if len(index) >= 2 { + if err := b.sortObjectIndex(b.buf[tos:], index); err != nil { + return WithStack(err) + } + } + for i := uint(0); i < uint(len(index)); i++ { + indexBase := tableBase + ValueLength(offsetSize*i) + x := uint64(index[i]) + for j := uint(0); j < offsetSize; j++ { + b.buf[indexBase+ValueLength(j)] = byte(x & 0xff) + x >>= 8 + } + } + // Finally fix the byte width in the type byte: + if offsetSize > 1 { + if offsetSize == 2 { + b.buf[tos] += 1 + } else if offsetSize == 4 { + b.buf[tos] += 2 + } else { // offsetSize == 8 + b.buf[tos] += 3 + b.appendLength(ValueLength(len(index)), 8) + } + } + + // Fix the byte length in the beginning: + x := ValueLength(b.buf.Len() - tos) + for i := uint(1); i <= offsetSize; i++ { + b.buf[tos+ValueLength(i)] = byte(x & 0xff) + x >>= 8 + } + + if offsetSize < 8 { + x := len(index) + for i := uint(offsetSize + 1); i <= 2*offsetSize; i++ { + b.buf[tos+ValueLength(i)] = byte(x & 0xff) + x >>= 8 + } + } + + // And, if desired, check attribute uniqueness: + if b.BuilderOptions.CheckAttributeUniqueness && len(index) > 1 { + // check uniqueness of attribute names + if err := b.checkAttributeUniqueness(Slice(b.buf[tos:])); err != nil { + return WithStack(err) + } + } + + // Now the array or object is complete, we pop a ValueLength off the _stack: + b.stack.Pop() + // Intentionally leave _index[depth] intact to avoid future allocs! + return nil +} + +// IsClosed returns true if there are no more open objects or arrays. +func (b *Builder) IsClosed() bool { + return b.stack.IsEmpty() +} + +// HasKey checks whether an Object value has a specific key attribute. +func (b *Builder) HasKey(key string) (bool, error) { + if b.stack.IsEmpty() { + return false, WithStack(BuilderNeedOpenObjectError) + } + tos, _ := b.stack.Tos() + h := b.buf[tos] + if h != 0x0b && h != 0x14 { + return false, WithStack(BuilderNeedOpenObjectError) + } + index := b.index[b.stack.Len()-1] + if index.IsEmpty() { + return false, nil + } + for _, idx := range index { + s := Slice(b.buf[tos+idx:]) + k, err := s.makeKey() + if err != nil { + return false, WithStack(err) + } + if eq, err := k.IsEqualString(key); err != nil { + return false, WithStack(err) + } else if eq { + return true, nil + } + } + return false, nil +} + +// GetKey returns the value for a specific key of an Object value. +// Returns Slice of type None when key is not found. +func (b *Builder) GetKey(key string) (Slice, error) { + if b.stack.IsEmpty() { + return nil, WithStack(BuilderNeedOpenObjectError) + } + tos, _ := b.stack.Tos() + h := b.buf[tos] + if h != 0x0b && h != 0x14 { + return nil, WithStack(BuilderNeedOpenObjectError) + } + index := b.index[b.stack.Len()-1] + if index.IsEmpty() { + return nil, nil + } + for _, idx := range index { + s := Slice(b.buf[tos+idx:]) + k, err := s.makeKey() + if err != nil { + return nil, WithStack(err) + } + if eq, err := k.IsEqualString(key); err != nil { + return nil, WithStack(err) + } else if eq { + value, err := s.Next() + if err != nil { + return nil, WithStack(err) + } + return value, nil + } + } + return nil, nil +} + +// RemoveLast removes last subvalue written to an (unclosed) object or array. +func (b *Builder) RemoveLast() error { + if b.stack.IsEmpty() { + return WithStack(BuilderNeedOpenCompoundError) + } + tos, _ := b.stack.Tos() + index := &b.index[b.stack.Len()-1] + if index.IsEmpty() { + return WithStack(BuilderNeedSubValueError) + } + newLength := tos + (*index)[len(*index)-1] + lastSize := b.buf.Len() - newLength + b.buf.Shrink(uint(lastSize)) + index.RemoveLast() + return nil +} + +// addNull adds a null value to the buffer. +func (b *Builder) addNull() { + b.buf.WriteByte(0x18) +} + +// addFalse adds a bool false value to the buffer. +func (b *Builder) addFalse() { + b.buf.WriteByte(0x19) +} + +// addTrue adds a bool true value to the buffer. +func (b *Builder) addTrue() { + b.buf.WriteByte(0x1a) +} + +// addBool adds a bool value to the buffer. +func (b *Builder) addBool(v bool) { + if v { + b.addTrue() + } else { + b.addFalse() + } +} + +// addDouble adds a double value to the buffer. +func (b *Builder) addDouble(v float64) { + bits := math.Float64bits(v) + b.buf.ReserveSpace(9) + b.buf.WriteByte(0x1b) + binary.LittleEndian.PutUint64(b.buf.Grow(8), bits) +} + +// addInt adds an int value to the buffer. +func (b *Builder) addInt(v int64) { + if v >= 0 && v <= 9 { + b.buf.WriteByte(0x30 + byte(v)) + } else if v < 0 && v >= -6 { + b.buf.WriteByte(byte(0x40 + int(v))) + } else { + b.appendInt(v, 0x1f) + } +} + +// addUInt adds an uint value to the buffer. +func (b *Builder) addUInt(v uint64) { + if v <= 9 { + b.buf.WriteByte(0x30 + byte(v)) + } else { + b.appendUInt(v, 0x27) + } +} + +// addUTCDate adds an UTC date value to the buffer. +func (b *Builder) addUTCDate(v int64) { + x := toUInt64(v) + dst := b.buf.Grow(9) + dst[0] = 0x1c + setLength(dst[1:], ValueLength(x), 8) +} + +// addString adds a string value to the buffer. +func (b *Builder) addString(v string) { + strLen := uint(len(v)) + if strLen > 126 { + // long string + dst := b.buf.Grow(1 + 8 + strLen) + dst[0] = 0xbf + setLength(dst[1:], ValueLength(strLen), 8) // string length + copy(dst[9:], v) // string data + } else { + dst := b.buf.Grow(1 + strLen) + dst[0] = byte(0x40 + strLen) // short string (with length) + copy(dst[1:], v) // string data + } +} + +// addBinary adds a binary value to the buffer. +func (b *Builder) addBinary(v []byte) { + l := uint(len(v)) + b.buf.ReserveSpace(1 + 8 + l) + b.appendUInt(uint64(l), 0xbf) // data length + b.buf.Write(v) // data +} + +// addIllegal adds an Illegal value to the buffer. +func (b *Builder) addIllegal() { + b.buf.WriteByte(0x17) +} + +// addMinKey adds a MinKey value to the buffer. +func (b *Builder) addMinKey() { + b.buf.WriteByte(0x1e) +} + +// addMaxKey adds a MaxKey value to the buffer. +func (b *Builder) addMaxKey() { + b.buf.WriteByte(0x1f) +} + +// Add adds a raw go value value to an array/raw value/object. +func (b *Builder) Add(v interface{}) error { + if it, ok := v.(*ObjectIterator); ok { + return WithStack(b.AddKeyValuesFromIterator(it)) + } + if it, ok := v.(*ArrayIterator); ok { + return WithStack(b.AddValuesFromIterator(it)) + } + value := NewValue(v) + if value.IsIllegal() { + return WithStack(BuilderUnexpectedTypeError{fmt.Sprintf("Cannot convert value of type %s", reflect.TypeOf(v).Name())}) + } + if err := b.addInternal(value); err != nil { + return WithStack(err) + } + return nil +} + +// AddValue adds a value to an array/raw value/object. +func (b *Builder) AddValue(v Value) error { + if err := b.addInternal(v); err != nil { + return WithStack(err) + } + return nil +} + +// AddKeyValue adds a key+value to an open object. +func (b *Builder) AddKeyValue(key string, v Value) error { + if err := b.addInternalKeyValue(key, v); err != nil { + return WithStack(err) + } + return nil +} + +// AddValuesFromIterator adds values to an array from the given iterator. +// The array must be opened before a call to this function and the array is left open Intentionally. +func (b *Builder) AddValuesFromIterator(it *ArrayIterator) error { + if b.stack.IsEmpty() { + return WithStack(BuilderNeedOpenArrayError) + } + tos, _ := b.stack.Tos() + h := b.buf[tos] + if h != 0x06 && h != 0x13 { + return WithStack(BuilderNeedOpenArrayError) + } + for it.IsValid() { + v, err := it.Value() + if err != nil { + return WithStack(err) + } + if err := b.addInternal(NewSliceValue(v)); err != nil { + return WithStack(err) + } + if err := it.Next(); err != nil { + return WithStack(err) + } + } + return nil +} + +// AddKeyValuesFromIterator adds values to an object from the given iterator. +// The object must be opened before a call to this function and the object is left open Intentionally. +func (b *Builder) AddKeyValuesFromIterator(it *ObjectIterator) error { + if b.stack.IsEmpty() { + return WithStack(BuilderNeedOpenObjectError) + } + tos, _ := b.stack.Tos() + h := b.buf[tos] + if h != 0x0b && h != 0x14 { + return WithStack(BuilderNeedOpenObjectError) + } + if b.keyWritten { + return WithStack(BuilderKeyAlreadyWrittenError) + } + for it.IsValid() { + k, err := it.Key(true) + if err != nil { + return WithStack(err) + } + key, err := k.GetString() + if err != nil { + return WithStack(err) + } + v, err := it.Value() + if err != nil { + return WithStack(err) + } + if err := b.addInternalKeyValue(key, NewSliceValue(v)); err != nil { + return WithStack(err) + } + if err := it.Next(); err != nil { + return WithStack(err) + } + } + return nil +} + +// returns number of bytes required to store the value in 2s-complement +func intLength(value int64) uint { + if value >= -0x80 && value <= 0x7f { + // shortcut for the common case + return 1 + } + var x uint64 + if value >= 0 { + x = uint64(value) + } else { + x = uint64(-(value + 1)) + } + xSize := uint(0) + for { + xSize++ + x >>= 8 + if x < 0x80 { + return xSize + 1 + } + } +} + +func (b *Builder) appendInt(v int64, base uint) { + vSize := intLength(v) + var x uint64 + if vSize == 8 { + x = toUInt64(v) + } else { + shift := int64(1) << (vSize*8 - 1) // will never overflow! + if v >= 0 { + x = uint64(v) + } else { + x = uint64(v+shift) + uint64(shift) + } + // x = v >= 0 ? static_cast(v) + // : static_cast(v + shift) + shift; + } + dst := b.buf.Grow(1 + vSize) + dst[0] = byte(base + vSize) + off := 1 + for ; vSize > 0; vSize-- { + dst[off] = byte(x & 0xff) + x >>= 8 + off++ + } +} + +func (b *Builder) appendUInt(v uint64, base uint) { + b.buf.ReserveSpace(9) + save := b.buf.Len() + b.buf.WriteByte(0) // Will be overwritten at end of function. + vSize := uint(0) + for { + vSize++ + b.buf.WriteByte(byte(v & 0xff)) + v >>= 8 + if v == 0 { + break + } + } + b.buf[save] = byte(base + vSize) +} + +func (b *Builder) appendLength(v ValueLength, n uint) { + dst := b.buf.Grow(n) + setLength(dst, v, n) +} + +func setLength(dst []byte, v ValueLength, n uint) { + for i := uint(0); i < n; i++ { + dst[i] = byte(v & 0xff) + v >>= 8 + } +} + +// openCompoundValue opens an array/object, checking the context. +func (b *Builder) openCompoundValue(vType byte) error { + //haveReported := false + tos, stackLen := b.stack.Tos() + if stackLen > 0 { + h := b.buf[tos] + if !b.keyWritten { + if h != 0x06 && h != 0x13 { + return WithStack(BuilderNeedOpenArrayError) + } + b.reportAdd() + //haveReported = true + } else { + b.keyWritten = false + } + } + b.addCompoundValue(vType) + // if err && haveReported { b.cleanupAdd() } + return nil +} + +// addCompoundValue adds the start of a component value to the stream & stack. +func (b *Builder) addCompoundValue(vType byte) { + pos := b.buf.Len() + b.stack.Push(pos) + stackLen := b.stack.Len() + toAdd := stackLen - len(b.index) + for toAdd > 0 { + newIndex := make(indexVector, 0, 16) // Pre-allocate 16 entries so we don't have to allocate memory for the first 16 entries + b.index = append(b.index, newIndex) + toAdd-- + } + b.index[stackLen-1].Clear() + dst := b.buf.Grow(9) + dst[0] = vType + //b.buf.WriteBytes(0, 8) // Will be filled later with bytelength and nr subs +} + +// closeEmptyArrayOrObject closes an empty array/object, removing the pre-allocated length space. +func (b *Builder) closeEmptyArrayOrObject(tos ValueLength, isArray bool) { + // empty Array or Object + if isArray { + b.buf[tos] = 0x01 + } else { + b.buf[tos] = 0x0a + } + vpackAssert(b.buf.Len() == tos+9) + b.buf.Shrink(8) + b.stack.Pop() +} + +// closeCompactArrayOrObject tries to close an array/object using compact notation. +// Returns true when a compact notation was possible, false otherwise. +func (b *Builder) closeCompactArrayOrObject(tos ValueLength, isArray bool, index indexVector) bool { + // use compact notation + nrItems := len(index) + nrItemsLen := getVariableValueLength(ValueLength(nrItems)) + vpackAssert(nrItemsLen > 0) + + byteSize := b.buf.Len() - (tos + 8) + nrItemsLen + vpackAssert(byteSize > 0) + + byteSizeLen := getVariableValueLength(byteSize) + byteSize += byteSizeLen + if getVariableValueLength(byteSize) != byteSizeLen { + byteSize++ + byteSizeLen++ + } + + if byteSizeLen < 9 { + // can only use compact notation if total byte length is at most 8 bytes long + if isArray { + b.buf[tos] = 0x13 + } else { + b.buf[tos] = 0x14 + } + + valuesLen := b.buf.Len() - (tos + 9) // Amount of bytes taken up by array/object values. + if valuesLen > 0 && byteSizeLen < 8 { + // We have array/object values and our byteSize needs less than the pre-allocated 8 bytes. + // So we move the array/object values back. + checkOverflow(valuesLen) + src := b.buf[tos+9:] + copy(b.buf[tos+1+byteSizeLen:], src[:valuesLen]) + } + // Shrink buffer, removing unused space allocated for byteSize. + b.buf.Shrink(uint(8 - byteSizeLen)) + + // store byte length + vpackAssert(byteSize > 0) + storeVariableValueLength(b.buf, tos+1, byteSize, false) + + // store nrItems + b.buf.Grow(uint(nrItemsLen)) + storeVariableValueLength(b.buf, tos+byteSize-1, ValueLength(len(index)), true) + + b.stack.Pop() + return true + } + return false +} + +// checkAttributeUniqueness checks the given slice for duplicate keys. +// It returns an error when duplicate keys are found, nil otherwise. +func (b *Builder) checkAttributeUniqueness(obj Slice) error { + vpackAssert(b.BuilderOptions.CheckAttributeUniqueness) + n, err := obj.Length() + if err != nil { + return WithStack(err) + } + + if obj.IsSorted() { + // object attributes are sorted + previous, err := obj.KeyAt(0) + if err != nil { + return WithStack(err) + } + p, err := previous.GetString() + if err != nil { + return WithStack(err) + } + + // compare each two adjacent attribute names + for i := ValueLength(1); i < n; i++ { + current, err := obj.KeyAt(i) + if err != nil { + return WithStack(err) + } + // keyAt() guarantees a string as returned type + vpackAssert(current.IsString()) + + q, err := current.GetString() + if err != nil { + return WithStack(err) + } + + if p == q { + // identical key + return WithStack(DuplicateAttributeNameError) + } + // re-use already calculated values for next round + p = q + } + } else { + keys := make(map[string]struct{}) + + for i := ValueLength(0); i < n; i++ { + // note: keyAt() already translates integer attributes + key, err := obj.KeyAt(i) + if err != nil { + return WithStack(err) + } + // keyAt() guarantees a string as returned type + vpackAssert(key.IsString()) + + k, err := key.GetString() + if err != nil { + return WithStack(err) + } + if _, found := keys[k]; found { + return WithStack(DuplicateAttributeNameError) + } + keys[k] = struct{}{} + } + } + return nil +} + +func findAttrName(base []byte) ([]byte, error) { + b := base[0] + if b >= 0x40 && b <= 0xbe { + // short UTF-8 string + l := b - 0x40 + return base[1 : 1+l], nil + } + if b == 0xbf { + // long UTF-8 string + l := uint(0) + // read string length + for i := 8; i >= 1; i-- { + l = (l << 8) + uint(base[i]) + } + return base[1+8 : 1+8+l], nil + } + + // translate attribute name + key, err := Slice(base).makeKey() + if err != nil { + return nil, WithStack(err) + } + return findAttrName(key) +} + +func (b *Builder) sortObjectIndex(objBase []byte, offsets []ValueLength) error { + list := make(sortEntries, len(offsets)) + for i, off := range offsets { + name, err := findAttrName(objBase[off:]) + if err != nil { + return WithStack(err) + } + list[i] = sortEntry{ + Offset: off, + Name: name, + } + } + list.Sort() + //sort.Sort(list) + for i, entry := range list { + offsets[i] = entry.Offset + } + return nil +} + +func (b *Builder) closeArray(tos ValueLength, index []ValueLength) { + // fix head byte in case a compact Array was originally requested: + b.buf[tos] = 0x06 + + needIndexTable := true + needNrSubs := true + if len(index) == 1 { + needIndexTable = false + needNrSubs = false + } else if (b.buf.Len()-tos)-index[0] == ValueLength(len(index))*(index[1]-index[0]) { + // In this case it could be that all entries have the same length + // and we do not need an offset table at all: + noTable := true + subLen := index[1] - index[0] + if (b.buf.Len()-tos)-index[len(index)-1] != subLen { + noTable = false + } else { + for i := 1; i < len(index)-1; i++ { + if index[i+1]-index[i] != subLen { + noTable = false + break + } + } + } + if noTable { + needIndexTable = false + needNrSubs = false + } + } + + // First determine byte length and its format: + var offsetSize uint + // can be 1, 2, 4 or 8 for the byte width of the offsets, + // the byte length and the number of subvalues: + var indexLenIfNeeded ValueLength + if needIndexTable { + indexLenIfNeeded = ValueLength(len(index)) + } + nrSubsLenIfNeeded := ValueLength(7) + if needNrSubs { + nrSubsLenIfNeeded = 6 + } + if b.buf.Len()-tos+(indexLenIfNeeded)-(nrSubsLenIfNeeded) <= 0xff { + // We have so far used _pos - tos bytes, including the reserved 8 + // bytes for byte length and number of subvalues. In the 1-byte number + // case we would win back 6 bytes but would need one byte per subvalue + // for the index table + offsetSize = 1 + } else if b.buf.Len()-tos+(indexLenIfNeeded*2) <= 0xffff { + offsetSize = 2 + } else if b.buf.Len()-tos+(indexLenIfNeeded*4) <= 0xffffffff { + offsetSize = 4 + } else { + offsetSize = 8 + } + + // Maybe we need to move down data: + if offsetSize == 1 { + targetPos := ValueLength(3) + if !needIndexTable { + targetPos = 2 + } + if b.buf.Len() > (tos + 9) { + _len := ValueLength(b.buf.Len() - (tos + 9)) + checkOverflow(_len) + src := b.buf[tos+9:] + copy(b.buf[tos+targetPos:], src[:_len]) + } + diff := ValueLength(9 - targetPos) + b.buf.Shrink(uint(diff)) + if needIndexTable { + n := len(index) + for i := 0; i < n; i++ { + index[i] -= diff + } + } // Note: if !needIndexTable the index array is now wrong! + } + // One could move down things in the offsetSize == 2 case as well, + // since we only need 4 bytes in the beginning. However, saving these + // 4 bytes has been sacrificed on the Altar of Performance. + + // Now build the table: + if needIndexTable { + extraSpaceNeeded := offsetSize * uint(len(index)) + if offsetSize == 8 { + extraSpaceNeeded += 8 + } + b.buf.ReserveSpace(extraSpaceNeeded) + tableBase := b.buf.Grow(offsetSize * uint(len(index))) + for i := uint(0); i < uint(len(index)); i++ { + x := uint64(index[i]) + for j := uint(0); j < offsetSize; j++ { + tableBase[offsetSize*i+j] = byte(x & 0xff) + x >>= 8 + } + } + } else { // no index table + b.buf[tos] = 0x02 + } + // Finally fix the byte width in the type byte: + if offsetSize > 1 { + if offsetSize == 2 { + b.buf[tos] += 1 + } else if offsetSize == 4 { + b.buf[tos] += 2 + } else { // offsetSize == 8 + b.buf[tos] += 3 + if needNrSubs { + b.appendLength(ValueLength(len(index)), 8) + } + } + } + + // Fix the byte length in the beginning: + x := ValueLength(b.buf.Len() - tos) + for i := uint(1); i <= offsetSize; i++ { + b.buf[tos+ValueLength(i)] = byte(x & 0xff) + x >>= 8 + } + + if offsetSize < 8 && needNrSubs { + x = ValueLength(len(index)) + for i := offsetSize + 1; i <= 2*offsetSize; i++ { + b.buf[tos+ValueLength(i)] = byte(x & 0xff) + x >>= 8 + } + } + + // Now the array or object is complete, we pop a ValueLength + // off the _stack: + b.stack.Pop() + // Intentionally leave _index[depth] intact to avoid future allocs! +} + +func (b *Builder) cleanupAdd() { + depth := b.stack.Len() - 1 + b.index[depth].RemoveLast() +} + +func (b *Builder) reportAdd() { + tos, stackLen := b.stack.Tos() + depth := stackLen - 1 + b.index[depth].Add(b.buf.Len() - tos) +} + +func (b *Builder) addArray(unindexed ...bool) { + h := byte(0x06) + if optionalBool(unindexed, false) { + h = 0x13 + } + b.addCompoundValue(h) +} + +func (b *Builder) addObject(unindexed ...bool) { + h := byte(0x0b) + if optionalBool(unindexed, false) { + h = 0x14 + } + b.addCompoundValue(h) +} + +func (b *Builder) addInternal(v Value) error { + haveReported := false + if !b.stack.IsEmpty() { + if !b.keyWritten { + b.reportAdd() + haveReported = true + } + } + if err := b.set(v); err != nil { + if haveReported { + b.cleanupAdd() + } + return WithStack(err) + } + return nil +} + +func (b *Builder) addInternalKeyValue(attrName string, v Value) error { + haveReported, err := b.addInternalKey(attrName) + if err != nil { + return WithStack(err) + } + if err := b.set(v); err != nil { + if haveReported { + b.cleanupAdd() + } + return WithStack(err) + } + return nil +} + +func (b *Builder) addInternalKey(attrName string) (haveReported bool, err error) { + haveReported = false + tos, stackLen := b.stack.Tos() + if stackLen > 0 { + h := b.buf[tos] + if h != 0x0b && h != 0x14 { + return haveReported, WithStack(BuilderNeedOpenObjectError) + } + if b.keyWritten { + return haveReported, WithStack(BuilderKeyAlreadyWrittenError) + } + b.reportAdd() + haveReported = true + } + + onError := func() { + if haveReported { + b.cleanupAdd() + haveReported = false + } + } + + if err := b.set(NewStringValue(attrName)); err != nil { + onError() + return haveReported, WithStack(err) + } + b.keyWritten = true + return haveReported, nil +} + +func (b *Builder) checkKeyIsString(isString bool) error { + tos, stackLen := b.stack.Tos() + if stackLen > 0 { + h := b.buf[tos] + if h == 0x0b || h == 0x14 { + if !b.keyWritten { + if isString { + b.keyWritten = true + } else { + return WithStack(BuilderKeyMustBeStringError) + } + } else { + b.keyWritten = false + } + } + } + return nil +} + +func (b *Builder) set(item Value) error { + //oldPos := b.buf.Len() + //ctype := item.vt + + if err := b.checkKeyIsString(item.vt == String); err != nil { + return WithStack(err) + } + + if item.IsSlice() { + switch item.vt { + case None: + return WithStack(BuilderUnexpectedTypeError{"Cannot set a ValueType::None"}) + case External: + return fmt.Errorf("External not supported") + case Custom: + return WithStack(fmt.Errorf("Cannot set a ValueType::Custom with this method")) + } + s := item.sliceValue() + // Determine length of slice + l, err := s.ByteSize() + if err != nil { + return WithStack(err) + } + b.buf.Write(s[:l]) + return nil + } + + // This method builds a single further VPack item at the current + // append position. If this is an array or object, then an index + // table is created and a new ValueLength is pushed onto the stack. + switch item.vt { + case None: + return WithStack(BuilderUnexpectedTypeError{"Cannot set a ValueType::None"}) + case Null: + b.addNull() + case Bool: + b.addBool(item.boolValue()) + case Double: + b.addDouble(item.doubleValue()) + case External: + return fmt.Errorf("External not supported") + /*if (options->disallowExternals) { + // External values explicitly disallowed as a security + // precaution + throw Exception(Exception::BuilderExternalsDisallowed); + } + if (ctype != Value::CType::VoidPtr) { + throw Exception(Exception::BuilderUnexpectedValue, + "Must give void pointer for ValueType::External"); + } + reserveSpace(1 + sizeof(void*)); + // store pointer. this doesn't need to be portable + _start[_pos++] = 0x1d; + void const* value = item.getExternal(); + memcpy(_start + _pos, &value, sizeof(void*)); + _pos += sizeof(void*); + break; + }*/ + case SmallInt: + b.addInt(item.intValue()) + case Int: + b.addInt(item.intValue()) + case UInt: + b.addUInt(item.uintValue()) + case UTCDate: + b.addUTCDate(item.utcDateValue()) + case String: + b.addString(item.stringValue()) + case Array: + b.addArray(item.unindexed) + case Object: + b.addObject(item.unindexed) + case Binary: + b.addBinary(item.binaryValue()) + case Illegal: + b.addIllegal() + case MinKey: + b.addMinKey() + case MaxKey: + b.addMaxKey() + case BCD: + return WithStack(fmt.Errorf("Not implemented")) + case Custom: + return WithStack(fmt.Errorf("Cannot set a ValueType::Custom with this method")) + } + return nil +} diff --git a/deps/github.com/arangodb/go-velocypack/builder_buffer.go b/deps/github.com/arangodb/go-velocypack/builder_buffer.go new file mode 100644 index 000000000..fe900f13b --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/builder_buffer.go @@ -0,0 +1,131 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +// builderBuffer is a byte slice used for building slices. +type builderBuffer []byte + +const ( + minGrowDelta = 128 // Minimum amount of extra bytes to add to a buffer when growing + maxGrowDelta = 1024 * 1024 // Maximum amount of extra bytes to add to a buffer when growing +) + +// IsEmpty returns 0 if there are no values in the buffer. +func (b builderBuffer) IsEmpty() bool { + l := len(b) + return l == 0 +} + +// Len returns the length of the buffer. +func (b builderBuffer) Len() ValueLength { + l := len(b) + return ValueLength(l) +} + +// Bytes returns the bytes written to the buffer. +// The returned slice is only valid until the next modification. +func (b *builderBuffer) Bytes() []byte { + return *b +} + +// WriteByte appends a single byte to the buffer. +func (b *builderBuffer) WriteByte(v byte) { + off := len(*b) + b.growCapacity(1) + *b = (*b)[:off+1] + (*b)[off] = v +} + +// WriteBytes appends a series of identical bytes to the buffer. +func (b *builderBuffer) WriteBytes(v byte, count uint) { + if count == 0 { + return + } + off := uint(len(*b)) + b.growCapacity(count) + *b = (*b)[:off+count] + for i := uint(0); i < count; i++ { + (*b)[off+i] = v + } +} + +// Write appends a series of bytes to the buffer. +func (b *builderBuffer) Write(v []byte) { + l := uint(len(v)) + if l > 0 { + off := uint(len(*b)) + b.growCapacity(l) + *b = (*b)[:off+l] + copy((*b)[off:], v) + } +} + +// ReserveSpace ensures that at least n bytes can be added to the buffer without allocating new memory. +func (b *builderBuffer) ReserveSpace(n uint) { + if n > 0 { + b.growCapacity(n) + } +} + +// Shrink reduces the length of the buffer by n elements (removing the last elements). +func (b *builderBuffer) Shrink(n uint) { + if n > 0 { + newLen := uint(len(*b)) - n + if newLen < 0 { + newLen = 0 + } + *b = (*b)[:newLen] + } +} + +// Grow adds n elements to the buffer, returning a slice where the added elements start. +func (b *builderBuffer) Grow(n uint) []byte { + l := uint(len(*b)) + if n > 0 { + b.growCapacity(n) + *b = (*b)[:l+n] + } + return (*b)[l:] +} + +// growCapacity ensures that there is enough capacity in the buffer to add n elements. +func (b *builderBuffer) growCapacity(n uint) { + _b := *b + curLen := uint(len(_b)) + curCap := uint(cap(_b)) + newCap := curLen + n + if newCap <= curCap { + // No need to do anything + return + } + // Increase the capacity + extra := newCap // Grow a bit more to avoid copying all the time + if extra < minGrowDelta { + extra = minGrowDelta + } else if extra > maxGrowDelta { + extra = maxGrowDelta + } + newBuffer := make(builderBuffer, curLen, newCap+extra) + copy(newBuffer, _b) + *b = newBuffer +} diff --git a/deps/github.com/arangodb/go-velocypack/builder_buffer_test.go b/deps/github.com/arangodb/go-velocypack/builder_buffer_test.go new file mode 100644 index 000000000..5cfe10570 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/builder_buffer_test.go @@ -0,0 +1,101 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBuilderBufferEmpty(t *testing.T) { + var b builderBuffer + assert.Equal(t, ValueLength(0), b.Len()) + assert.True(t, b.IsEmpty()) +} + +func TestBuilderBufferWriteByte(t *testing.T) { + var b builderBuffer + b.WriteByte(5) + assert.Equal(t, ValueLength(1), b.Len()) + assert.False(t, b.IsEmpty()) + assert.Equal(t, []byte{5}, b.Bytes()) +} + +func TestBuilderBufferWriteBytes(t *testing.T) { + var b builderBuffer + b.WriteBytes(3, 7) + assert.Equal(t, ValueLength(7), b.Len()) + assert.False(t, b.IsEmpty()) + assert.Equal(t, []byte{3, 3, 3, 3, 3, 3, 3}, b.Bytes()) +} + +func TestBuilderBufferWrite(t *testing.T) { + var b builderBuffer + data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9} + b.Write(data) + assert.Equal(t, ValueLength(len(data)), b.Len()) + assert.False(t, b.IsEmpty()) + assert.Equal(t, data, b.Bytes()) +} + +func TestBuilderBufferReserveSpace(t *testing.T) { + var b builderBuffer + b.ReserveSpace(32) + assert.Equal(t, ValueLength(0), b.Len()) + assert.True(t, b.IsEmpty()) + data := []byte{1, 2, 3, 4} + b.Write(data) + assert.Equal(t, ValueLength(len(data)), b.Len()) + assert.False(t, b.IsEmpty()) + assert.Equal(t, data, b.Bytes()) +} + +func TestBuilderBufferShrink(t *testing.T) { + var b builderBuffer + data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9} + b.Write(data) + assert.Equal(t, ValueLength(len(data)), b.Len()) + assert.False(t, b.IsEmpty()) + assert.Equal(t, data, b.Bytes()) + b.Shrink(3) + assert.Equal(t, ValueLength(len(data)-3), b.Len()) + assert.False(t, b.IsEmpty()) + assert.Equal(t, data[:len(data)-3], b.Bytes()) +} + +func TestBuilderBufferGrow(t *testing.T) { + var b builderBuffer + data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9} + b.Write(data) + assert.Equal(t, ValueLength(len(data)), b.Len()) + assert.False(t, b.IsEmpty()) + assert.Equal(t, data, b.Bytes()) + + data2 := []byte{5, 6, 7, 8} + dst := b.Grow(uint(len(data2))) + copy(dst, data2) + assert.Equal(t, ValueLength(len(data)+len(data2)), b.Len()) + assert.False(t, b.IsEmpty()) + assert.Equal(t, append(data, data2...), b.Bytes()) +} diff --git a/deps/github.com/arangodb/go-velocypack/builder_index_vector.go b/deps/github.com/arangodb/go-velocypack/builder_index_vector.go new file mode 100644 index 000000000..770d490ca --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/builder_index_vector.go @@ -0,0 +1,57 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +const ( + minIndexVectorGrowDelta = 32 + maxIndexVectorGrowDelta = 1024 +) + +// indexVector is a list of index of positions. +type indexVector []ValueLength + +// Add an index position to the end of the list. +func (iv *indexVector) Add(v ValueLength) { + *iv = append(*iv, v) +} + +// RemoveLast removes the last index position from the end of the list. +func (iv *indexVector) RemoveLast() { + l := len(*iv) + if l > 0 { + *iv = (*iv)[:l-1] + } +} + +// Clear removes all entries +func (iv *indexVector) Clear() { + if len(*iv) > 0 { + *iv = (*iv)[0:0] + } +} + +// IsEmpty returns true if there are no values on the vector. +func (iv indexVector) IsEmpty() bool { + l := len(iv) + return l == 0 +} diff --git a/deps/github.com/arangodb/go-velocypack/builder_sort_entry.go b/deps/github.com/arangodb/go-velocypack/builder_sort_entry.go new file mode 100644 index 000000000..fbf19454e --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/builder_sort_entry.go @@ -0,0 +1,82 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "bytes" + "sort" +) + +type sortEntry struct { + Offset ValueLength + Name []byte +} + +type sortEntries []sortEntry + +// Len is the number of elements in the collection. +func (l sortEntries) Len() int { return len(l) } + +// Less reports whether the element with +// index i should sort before the element with index j. +func (l sortEntries) Less(i, j int) bool { return bytes.Compare(l[i].Name, l[j].Name) < 0 } + +// Swap swaps the elements with indexes i and j. +func (l sortEntries) Swap(i, j int) { l[i], l[j] = l[j], l[i] } + +// partition picks the last element as a pivot and reorders the array so that +// all elements with values less than the pivot come before the pivot and all +// elements with values greater than the pivot come after it. +func partition(s sortEntries) int { + hi := len(s) - 1 + pivot := s[hi] + i := 0 + for j := 0; j < hi; j++ { + r := bytes.Compare(s[j].Name, pivot.Name) + if r <= 0 { + s[i], s[j] = s[j], s[i] + i++ + } + } + s[i], s[hi] = s[hi], s[i] + return i +} + +// Sort sorts the slice in ascending order. +func (l sortEntries) qSort() { + if len(l) > 1 { + p := partition(l) + l[:p].qSort() + l[p+1:].qSort() + } +} + +// Sort sorts the slice in ascending order. +func (l sortEntries) Sort() { + x := len(l) + if x > 16 { + sort.Sort(l) + } else if len(l) > 1 { + l.qSort() + } +} diff --git a/deps/github.com/arangodb/go-velocypack/builder_stack.go b/deps/github.com/arangodb/go-velocypack/builder_stack.go new file mode 100644 index 000000000..2bdf9e6cb --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/builder_stack.go @@ -0,0 +1,73 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +// builderStack is a stack of positions. +type builderStack struct { + stack []ValueLength + bootstrap [4]ValueLength +} + +// Push the given value on top of the stack +func (s *builderStack) Push(v ValueLength) { + if s.stack == nil { + s.stack = s.bootstrap[0:1] + s.stack[0] = v + } else { + s.stack = append(s.stack, v) + } +} + +// Pop removes the top of the stack. +func (s *builderStack) Pop() { + l := len(s.stack) + if l > 0 { + s.stack = s.stack[:l-1] + } +} + +func (s *builderStack) Clear() { + s.stack = nil +} + +// Tos returns the value at the top of the stack. +// Returns , +func (s builderStack) Tos() (ValueLength, int) { + // _s := *s + l := len(s.stack) + if l > 0 { + return (s.stack)[l-1], l + } + return 0, 0 +} + +// IsEmpty returns true if there are no values on the stack. +func (s builderStack) IsEmpty() bool { + l := len(s.stack) + return l == 0 +} + +// Len returns the number of elements of the stack. +func (s builderStack) Len() int { + return len(s.stack) +} diff --git a/deps/github.com/arangodb/go-velocypack/builder_stack_test.go b/deps/github.com/arangodb/go-velocypack/builder_stack_test.go new file mode 100644 index 000000000..b333c7a42 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/builder_stack_test.go @@ -0,0 +1,95 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import "testing" + +func TestBuilderStack1(t *testing.T) { + var b builderStack + if empty := b.IsEmpty(); !empty { + t.Errorf("Expected empty, got %v", empty) + } + b.Push(1) + if tos, _ := b.Tos(); tos != 1 { + t.Errorf("Expected 1, got %d", tos) + } + if empty := b.IsEmpty(); empty { + t.Errorf("Expected not empty, got %v", empty) + } + b.Push(17) + if tos, _ := b.Tos(); tos != 17 { + t.Errorf("Expected 17, got %d", tos) + } + if empty := b.IsEmpty(); empty { + t.Errorf("Expected not empty, got %v", empty) + } + b.Pop() + if tos, _ := b.Tos(); tos != 1 { + t.Errorf("Expected 1, got %d", tos) + } + if empty := b.IsEmpty(); empty { + t.Errorf("Expected not empty, got %v", empty) + } + b.Push(77) + if tos, _ := b.Tos(); tos != 77 { + t.Errorf("Expected 77, got %d", tos) + } + if empty := b.IsEmpty(); empty { + t.Errorf("Expected not empty, got %v", empty) + } + b.Push(88) + if tos, _ := b.Tos(); tos != 88 { + t.Errorf("Expected 88, got %d", tos) + } + if empty := b.IsEmpty(); empty { + t.Errorf("Expected not empty, got %v", empty) + } + b.Pop() + if tos, _ := b.Tos(); tos != 77 { + t.Errorf("Expected 77, got %d", tos) + } + if empty := b.IsEmpty(); empty { + t.Errorf("Expected not empty, got %v", empty) + } + b.Pop() + if tos, _ := b.Tos(); tos != 1 { + t.Errorf("Expected 1, got %d", tos) + } + if empty := b.IsEmpty(); empty { + t.Errorf("Expected not empty, got %v", empty) + } + b.Pop() // Now empty + if tos, _ := b.Tos(); tos != 0 { + t.Errorf("Expected 0, got %d", tos) + } + if empty := b.IsEmpty(); !empty { + t.Errorf("Expected empty, got %v", empty) + } + b.Pop() // Already empty + if tos, _ := b.Tos(); tos != 0 { + t.Errorf("Expected 0, got %d", tos) + } + if empty := b.IsEmpty(); !empty { + t.Errorf("Expected empty, got %v", empty) + } +} diff --git a/deps/github.com/arangodb/go-velocypack/decoder.go b/deps/github.com/arangodb/go-velocypack/decoder.go new file mode 100644 index 000000000..3bc2fcc6f --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/decoder.go @@ -0,0 +1,1031 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// This code is heavily inspired by the Go sources. +// See https://golang.org/src/encoding/json/ + +package velocypack + +import ( + "bytes" + "encoding" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "reflect" + "runtime" + "strconv" +) + +// A Decoder decodes velocypack values into Go structures. +type Decoder struct { + r io.Reader +} + +// Unmarshaler is implemented by types that can convert themselves from Velocypack. +type Unmarshaler interface { + UnmarshalVPack(Slice) error +} + +// NewDecoder creates a new Decoder that reads data from the given reader. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + r: r, + } +} + +// Unmarshal reads v from the given Velocypack encoded data slice. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal VelocyPack into a pointer, Unmarshal first handles the case of +// the VelocyPack being the VelocyPack literal Null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the VelocyPack into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal VelocyPack into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalVPack method, including +// when the input is a VelocyPack Null. +// Otherwise, if the value implements encoding.TextUnmarshaler +// and the input is a VelocyPack quoted string, Unmarshal calls that value's +// UnmarshalText method with the unquoted form of the string. +// +// To unmarshal VelocyPack into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal VelocyPack into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for VelocyPack Bool's +// float64 for VelocyPack Double's +// uint64 for VelocyPack UInt's +// int64 for VelocyPack Int's +// string, for VelocyPack String's +// []interface{}, for VelocyPack Array's +// map[string]interface{}, for VelocyPack Object's +// nil for VelocyPack Null. +// []byte for VelocyPack Binary. +// +// To unmarshal a VelocyPack array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty VelocyPack array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a VelocyPack array into a Go array, Unmarshal decodes +// VelocyPack array elements into corresponding Go array elements. +// If the Go array is smaller than the VelocyPack array, +// the additional VelocyPack array elements are discarded. +// If the VelocyPack array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a VelocyPack object into a map, Unmarshal first establishes a map to +// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores +// key-value pairs from the VelocyPack object into the map. The map's key type must +// either be a string, an integer, or implement encoding.TextUnmarshaler. +// +// If a VelocyPack value is not appropriate for a given target type, +// or if a VelocyPack number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The VelocyPack Null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in VelocyPack to mean +// ``not present,'' unmarshaling a VelocyPack Null into any other Go type has no effect +// on the value and produces no error. +// +func Unmarshal(data Slice, v interface{}) error { + if err := unmarshalSlice(data, v); err != nil { + return WithStack(err) + } + return nil +} + +// Decode reads v from the decoder stream. +func (e *Decoder) Decode(v interface{}) error { + s, err := SliceFromReader(e.r) + if err != nil { + return WithStack(err) + } + if err := unmarshalSlice(s, v); err != nil { + return WithStack(err) + } + return nil +} + +// unmarshalSlice reads v from the given slice. +func unmarshalSlice(data Slice, v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d := &decodeState{} + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.unmarshalValue(data, rv) + return d.savedError +} + +var ( + textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() + numberType = reflect.TypeOf(json.Number("")) +) + +type decodeState struct { + useNumber bool + errorContext struct { // provides context for type errors + Struct string + Field string + } + savedError error +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(d.addErrorContext(err)) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = d.addErrorContext(err) + } +} + +// addErrorContext returns a new error enhanced with information from d.errorContext +func (d *decodeState) addErrorContext(err error) error { + if d.errorContext.Struct != "" || d.errorContext.Field != "" { + switch err := err.(type) { + case *UnmarshalTypeError: + err.Struct = d.errorContext.Struct + err.Field = d.errorContext.Field + return err + } + } + return err +} + +// unmarshalValue unmarshals any slice into given v. +func (d *decodeState) unmarshalValue(data Slice, v reflect.Value) { + if !v.IsValid() { + return + } + + switch data.Type() { + case Array: + d.unmarshalArray(data, v) + case Object: + d.unmarshalObject(data, v) + case Bool, Int, SmallInt, UInt, Double, Binary, BCD, String: + d.unmarshalLiteral(data, v) + } +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, nil, reflect.Value{} + } + if u, ok := v.Interface().(json.Unmarshaler); ok { + return nil, u, nil, reflect.Value{} + } + if !decodingNull { + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, nil, u, reflect.Value{} + } + } + } + v = v.Elem() + } + return nil, nil, nil, v +} + +// unmarshalArray unmarshals an array slice into given v. +func (d *decodeState) unmarshalArray(data Slice, v reflect.Value) { + // Check for unmarshaler. + u, ju, ut, pv := d.indirect(v, false) + if u != nil { + if err := u.UnmarshalVPack(data); err != nil { + d.error(err) + } + return + } + if ju != nil { + json, err := data.JSONString() + if err != nil { + d.error(err) + } else { + if err := ju.UnmarshalJSON([]byte(json)); err != nil { + d.error(err) + } + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type()}) + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface(data))) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type()}) + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + it, err := NewArrayIterator(data) + if err != nil { + d.error(err) + } + for it.IsValid() { + value, err := it.Value() + if err != nil { + d.error(err) + } + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.unmarshalValue(value, v.Index(i)) + } + i++ + if err := it.Next(); err != nil { + d.error(err) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +// unmarshalObject unmarshals an object slice into given v. +func (d *decodeState) unmarshalObject(data Slice, v reflect.Value) { + // Check for unmarshaler. + u, ju, ut, pv := d.indirect(v, false) + if u != nil { + if err := u.UnmarshalVPack(data); err != nil { + d.error(err) + } + return + } + if ju != nil { + json, err := data.JSONString() + if err != nil { + d.error(err) + } else { + if err := ju.UnmarshalJSON([]byte(json)); err != nil { + d.error(err) + } + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type()}) + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface(data))) + return + } + + // Check type of target: + // struct or + // map[T1]T2 where T1 is string, an integer type, + // or an encoding.TextUnmarshaler + switch v.Kind() { + case reflect.Map: + // Map key must either have string kind, have an integer kind, + // or be an encoding.TextUnmarshaler. + t := v.Type() + switch t.Key().Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + default: + if !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { + d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type()}) + return + } + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + // ok + default: + d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type()}) + return + } + + var mapElem reflect.Value + + it, err := NewObjectIterator(data) + if err != nil { + d.error(err) + } + for it.IsValid() { + key, err := it.Key(true) + if err != nil { + d.error(err) + } + keyUTF8, err := key.GetStringUTF8() + if err != nil { + d.error(err) + } + value, err := it.Value() + if err != nil { + d.error(err) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, key) { + f = ff + break + } + if f == nil && ff.equalFold(ff.nameBytes, keyUTF8) { + f = ff + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + d.errorContext.Field = f.name + d.errorContext.Struct = v.Type().Name() + } + } + + if destring { + // Value should be a string that we'll decode as JSON + valueUTF8, err := value.GetStringUTF8() + if err != nil { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, expected string, got %s in %v (%v)", value.Type(), subv.Type(), err)) + } + v, err := ParseJSONFromUTF8(valueUTF8) + if err != nil { + d.saveError(err) + } else { + d.unmarshalValue(v, subv) + } + } else { + d.unmarshalValue(value, subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kt := v.Type().Key() + var kv reflect.Value + switch { + case kt.Kind() == reflect.String: + kv = reflect.ValueOf(keyUTF8).Convert(kt) + case reflect.PtrTo(kt).Implements(textUnmarshalerType): + kv = reflect.New(v.Type().Key()) + d.literalStore(key, kv, true) + kv = kv.Elem() + default: + keyStr := string(keyUTF8) + switch kt.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(keyStr, 10, 64) + if err != nil || reflect.Zero(kt).OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + keyStr, Type: kt}) + return + } + kv = reflect.ValueOf(n).Convert(kt) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(keyStr, 10, 64) + if err != nil || reflect.Zero(kt).OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + keyStr, Type: kt}) + return + } + kv = reflect.ValueOf(n).Convert(kt) + default: + panic("json: Unexpected key type") // should never occur + } + } + v.SetMapIndex(kv, subv) + } + + d.errorContext.Struct = "" + d.errorContext.Field = "" + + if err := it.Next(); err != nil { + d.error(err) + } + } +} + +// unmarshalLiteral unmarshals a literal slice into given v. +func (d *decodeState) unmarshalLiteral(data Slice, v reflect.Value) { + d.literalStore(data, v, false) +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface(data Slice) interface{} { + switch data.Type() { + case Array: + return d.arrayInterface(data) + case Object: + return d.objectInterface(data) + default: + return d.literalInterface(data) + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface(data Slice) []interface{} { + l, err := data.Length() + if err != nil { + d.error(err) + } + v := make([]interface{}, 0, l) + it, err := NewArrayIterator(data) + if err != nil { + d.error(err) + } + for it.IsValid() { + value, err := it.Value() + if err != nil { + d.error(err) + } + + v = append(v, d.valueInterface(value)) + + // Move to next field + if err := it.Next(); err != nil { + d.error(err) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface(data Slice) map[string]interface{} { + m := make(map[string]interface{}) + it, err := NewObjectIterator(data) + if err != nil { + d.error(err) + } + for it.IsValid() { + key, err := it.Key(true) + if err != nil { + d.error(err) + } + keyStr, err := key.GetString() + if err != nil { + d.error(err) + } + value, err := it.Value() + if err != nil { + d.error(err) + } + + // Read value. + m[keyStr] = d.valueInterface(value) + + // Move to next field + if err := it.Next(); err != nil { + d.error(err) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface(data Slice) interface{} { + switch data.Type() { + case Null: + return nil + + case Bool: + v, err := data.GetBool() + if err != nil { + d.error(err) + } + return v + + case String: + v, err := data.GetString() + if err != nil { + d.error(err) + } + return v + + case Double: + v, err := data.GetDouble() + if err != nil { + d.error(err) + } + return v + + case Int, SmallInt: + v, err := data.GetInt() + if err != nil { + d.error(err) + } + intV := int(v) + if int64(intV) == v { + // Value fits in int + return intV + } + return v + + case UInt: + v, err := data.GetUInt() + if err != nil { + d.error(err) + } + return v + + case Binary: + v, err := data.GetBinary() + if err != nil { + d.error(err) + } + return v + + default: // ?? + d.error(fmt.Errorf("unknown literal type: %s", data.Type())) + return nil + } +} + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item Slice, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal empty slice into %v", v.Type())) + return + } + isNull := item.IsNull() // null + u, ju, ut, pv := d.indirect(v, isNull) + if u != nil { + if err := u.UnmarshalVPack(item); err != nil { + d.error(err) + } + return + } + if ju != nil { + json, err := item.JSONString() + if err != nil { + d.error(err) + } else { + if err := ju.UnmarshalJSON([]byte(json)); err != nil { + d.error(err) + } + } + return + } + if ut != nil { + if !item.IsString() { + //if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal Slice of type %s into %v", item.Type(), v.Type())) + } else { + val := item.Type().String() + d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type()}) + } + return + } + s, err := item.GetStringUTF8() + if err != nil { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal slice of type %s into %v", item.Type(), v.Type())) + } else { + d.error(InternalError) // Out of sync + } + } + if err := ut.UnmarshalText(s); err != nil { + d.error(err) + } + return + } + + v = pv + + switch item.Type() { + case Null: // null + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted /*&& string(item) != "null"*/ { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case Bool: // true, false + value, err := item.GetBool() + if err != nil { + d.error(err) + } + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted /*&& string(item) != "true" && string(item) != "false"*/ { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type()}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type()}) + } + } + + case String: // string + s, err := item.GetString() + if err != nil { + d.error(err) + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type()}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type()}) + break + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type()}) + } + } + + case Double: + value, err := item.GetDouble() + if err != nil { + d.error(err) + } + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + s, err := item.JSONString() + if err != nil { + d.error(err) + } + v.SetString(s) + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{Value: "number", Type: v.Type()}) + } + case reflect.Interface: + n, err := d.convertNumber(value) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type()}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := int64(value) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: fmt.Sprintf("number %v", value), Type: v.Type()}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n := uint64(value) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: fmt.Sprintf("number %v", value), Type: v.Type()}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n := value + v.SetFloat(n) + } + + case Int, SmallInt: + value, err := item.GetInt() + if err != nil { + d.error(err) + } + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + s, err := item.JSONString() + if err != nil { + d.error(err) + } + v.SetString(s) + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{Value: "number", Type: v.Type()}) + } + case reflect.Interface: + var n interface{} + intValue := int(value) + if int64(intValue) == value { + // When the value fits in an int, use int type. + n, err = d.convertNumber(intValue) + } else { + n, err = d.convertNumber(value) + } + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type()}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := value + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: fmt.Sprintf("number %v", value), Type: v.Type()}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n := uint64(value) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: fmt.Sprintf("number %v", value), Type: v.Type()}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n := float64(value) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{Value: fmt.Sprintf("number %v", value), Type: v.Type()}) + break + } + v.SetFloat(n) + } + + case UInt: + value, err := item.GetUInt() + if err != nil { + d.error(err) + } + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + s, err := item.JSONString() + if err != nil { + d.error(err) + } + v.SetString(s) + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{Value: "number", Type: v.Type()}) + } + case reflect.Interface: + n, err := d.convertNumber(value) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type()}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := int64(value) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: fmt.Sprintf("number %v", value), Type: v.Type()}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n := value + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: fmt.Sprintf("number %v", value), Type: v.Type()}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n := float64(value) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{Value: fmt.Sprintf("number %v", value), Type: v.Type()}) + break + } + v.SetFloat(n) + } + + case Binary: + value, err := item.GetBinary() + if err != nil { + d.error(err) + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type()}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{Value: "binary", Type: v.Type()}) + break + } + v.SetBytes(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{Value: "binary", Type: v.Type()}) + } + } + + default: // number + d.error(fmt.Errorf("Unknown type %s", item.Type())) + } +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s interface{}) (interface{}, error) { + if d.useNumber { + return json.Number(fmt.Sprintf("%v", s)), nil + } + return s, nil +} diff --git a/deps/github.com/arangodb/go-velocypack/doc.go b/deps/github.com/arangodb/go-velocypack/doc.go new file mode 100644 index 000000000..2f7596f98 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/doc.go @@ -0,0 +1,26 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +/* +Velocypack implementation for Go. +*/ +package velocypack diff --git a/deps/github.com/arangodb/go-velocypack/dumper.go b/deps/github.com/arangodb/go-velocypack/dumper.go new file mode 100644 index 000000000..51bd1c36d --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/dumper.go @@ -0,0 +1,381 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "fmt" + "io" + "strconv" +) + +type DumperOptions struct { + // EscapeUnicode turns on escapping multi-byte Unicode characters when dumping them to JSON (creates \uxxxx sequences). + EscapeUnicode bool + // EscapeForwardSlashes turns on escapping forward slashes when serializing VPack values into JSON. + EscapeForwardSlashes bool + UnsupportedTypeBehavior UnsupportedTypeBehavior +} + +type UnsupportedTypeBehavior int + +const ( + NullifyUnsupportedType UnsupportedTypeBehavior = iota + ConvertUnsupportedType + FailOnUnsupportedType +) + +type Dumper struct { + w io.Writer + indentation uint + options DumperOptions +} + +// NewDumper creates a new dumper around the given writer, with an optional options. +func NewDumper(w io.Writer, options *DumperOptions) *Dumper { + d := &Dumper{ + w: w, + } + if options != nil { + d.options = *options + } + return d +} + +func (d *Dumper) Append(s Slice) error { + w := d.w + switch s.Type() { + case Null: + if _, err := w.Write([]byte("null")); err != nil { + return WithStack(err) + } + return nil + case Bool: + if v, err := s.GetBool(); err != nil { + return WithStack(err) + } else if v { + if _, err := w.Write([]byte("true")); err != nil { + return WithStack(err) + } + } else { + if _, err := w.Write([]byte("false")); err != nil { + return WithStack(err) + } + } + return nil + case Double: + if v, err := s.GetDouble(); err != nil { + return WithStack(err) + } else if err := d.appendDouble(v); err != nil { + return WithStack(err) + } + return nil + case Int, SmallInt: + if v, err := s.GetInt(); err != nil { + return WithStack(err) + } else if err := d.appendInt(v); err != nil { + return WithStack(err) + } + return nil + case UInt: + if v, err := s.GetUInt(); err != nil { + return WithStack(err) + } else if err := d.appendUInt(v); err != nil { + return WithStack(err) + } + return nil + case String: + if v, err := s.GetString(); err != nil { + return WithStack(err) + } else if err := d.appendString(v); err != nil { + return WithStack(err) + } + return nil + case Array: + if err := d.appendArray(s); err != nil { + return WithStack(err) + } + return nil + case Object: + if err := d.appendObject(s); err != nil { + return WithStack(err) + } + return nil + default: + switch d.options.UnsupportedTypeBehavior { + case NullifyUnsupportedType: + if _, err := w.Write([]byte("null")); err != nil { + return WithStack(err) + } + case ConvertUnsupportedType: + msg := fmt.Sprintf("(non-representable type %s)", s.Type().String()) + if err := d.appendString(msg); err != nil { + return WithStack(err) + } + default: + return WithStack(NoJSONEquivalentError) + } + } + + return nil +} + +var ( + doubleQuoteSeq = []byte{'"'} + escapeTable = [256]byte{ + // 0 1 2 3 4 5 6 7 8 9 A B C D E + // F + 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'b', 't', 'n', 'u', 'f', 'r', + 'u', + 'u', // 00 + 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', + 'u', + 'u', // 10 + 0, 0, '"', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + '/', // 20 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + 0, // 30~4F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + '\\', 0, 0, 0, // 50 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, + 0, // 60~FF + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0} +) + +func (d *Dumper) appendUInt(v uint64) error { + s := strconv.FormatUint(v, 10) + if _, err := d.w.Write([]byte(s)); err != nil { + return WithStack(err) + } + return nil +} + +func (d *Dumper) appendInt(v int64) error { + s := strconv.FormatInt(v, 10) + if _, err := d.w.Write([]byte(s)); err != nil { + return WithStack(err) + } + return nil +} + +func formatDouble(v float64) string { + return strconv.FormatFloat(v, 'g', -1, 64) +} + +func (d *Dumper) appendDouble(v float64) error { + s := formatDouble(v) + if _, err := d.w.Write([]byte(s)); err != nil { + return WithStack(err) + } + return nil +} + +func (d *Dumper) appendString(v string) error { + p := []byte(v) + e := len(p) + buf := make([]byte, 0, 16) + if _, err := d.w.Write(doubleQuoteSeq); err != nil { + return WithStack(err) + } + for i := 0; i < e; i++ { + buf = buf[0:0] + c := p[i] + if (c & 0x80) == 0 { + // check for control characters + esc := escapeTable[c] + + if esc != 0 { + if c != '/' || d.options.EscapeForwardSlashes { + // escape forward slashes only when requested + buf = append(buf, '\\') + } + buf = append(buf, esc) + + if esc == 'u' { + i1 := ((uint(c)) & 0xf0) >> 4 + i2 := ((uint(c)) & 0x0f) + + buf = append(buf, '0', '0', hexChar(i1), hexChar(i2)) + } + } else { + buf = append(buf, c) + } + } else if (c & 0xe0) == 0xc0 { + // two-byte sequence + if i+1 >= e { + return WithStack(InvalidUtf8SequenceError) + } + + if d.options.EscapeUnicode { + value := ((uint(p[i]) & 0x1f) << 6) | (uint(p[i+1]) & 0x3f) + buf = dumpUnicodeCharacter(buf, value) + } else { + buf = append(buf, p[i:i+2]...) + } + i++ + } else if (c & 0xf0) == 0xe0 { + // three-byte sequence + if i+2 >= e { + return WithStack(InvalidUtf8SequenceError) + } + + if d.options.EscapeUnicode { + value := (((uint(p[i]) & 0x0f) << 12) | ((uint(p[i+1]) & 0x3f) << 6) | (uint(p[i + +2]) & 0x3f)) + buf = dumpUnicodeCharacter(buf, value) + } else { + buf = append(buf, p[i:i+3]...) + } + i += 2 + } else if (c & 0xf8) == 0xf0 { + // four-byte sequence + if i+3 >= e { + return WithStack(InvalidUtf8SequenceError) + } + + if d.options.EscapeUnicode { + value := (((uint(p[i]) & 0x0f) << 18) | ((uint(p[i+1]) & 0x3f) << 12) | ((uint(p[i+2]) & 0x3f) << 6) | (uint(p[i+3]) & 0x3f)) + // construct the surrogate pairs + value -= 0x10000 + high := (((value & 0xffc00) >> 10) + 0xd800) + buf = dumpUnicodeCharacter(buf, high) + low := (value & 0x3ff) + 0xdc00 + buf = dumpUnicodeCharacter(buf, low) + } else { + buf = append(buf, p[i:i+4]...) + } + i += 3 + } + if _, err := d.w.Write(buf); err != nil { + return WithStack(err) + } + } + if _, err := d.w.Write(doubleQuoteSeq); err != nil { + return WithStack(err) + } + return nil +} + +func (d *Dumper) appendArray(v Slice) error { + w := d.w + it, err := NewArrayIterator(v) + if err != nil { + return WithStack(err) + } + if _, err := w.Write([]byte{'['}); err != nil { + return WithStack(err) + } + for it.IsValid() { + if !it.IsFirst() { + if _, err := w.Write([]byte{','}); err != nil { + return WithStack(err) + } + } + if value, err := it.Value(); err != nil { + return WithStack(err) + } else if err := d.Append(value); err != nil { + return WithStack(err) + } + if err := it.Next(); err != nil { + return WithStack(err) + } + } + if _, err := w.Write([]byte{']'}); err != nil { + return WithStack(err) + } + return nil +} + +func (d *Dumper) appendObject(v Slice) error { + w := d.w + it, err := NewObjectIterator(v) + if err != nil { + return WithStack(err) + } + if _, err := w.Write([]byte{'{'}); err != nil { + return WithStack(err) + } + for it.IsValid() { + if !it.IsFirst() { + if _, err := w.Write([]byte{','}); err != nil { + return WithStack(err) + } + } + if key, err := it.Key(true); err != nil { + return WithStack(err) + } else if err := d.Append(key); err != nil { + return WithStack(err) + } + if _, err := w.Write([]byte{':'}); err != nil { + return WithStack(err) + } + if value, err := it.Value(); err != nil { + return WithStack(err) + } else if err := d.Append(value); err != nil { + return WithStack(err) + } + if err := it.Next(); err != nil { + return WithStack(err) + } + } + if _, err := w.Write([]byte{'}'}); err != nil { + return WithStack(err) + } + return nil +} + +func dumpUnicodeCharacter(dst []byte, value uint) []byte { + dst = append(dst, '\\', 'u') + + mask := uint(0xf000) + shift := uint(12) + for i := 3; i >= 0; i-- { + p := (value & mask) >> shift + dst = append(dst, hexChar(p)) + if i > 0 { + mask = mask >> 4 + shift -= 4 + } + } + return dst +} + +func hexChar(v uint) byte { + v = v & uint(0x0f) + if v < 10 { + return byte('0' + v) + } + return byte('A' + v - 10) +} diff --git a/deps/github.com/arangodb/go-velocypack/encoder.go b/deps/github.com/arangodb/go-velocypack/encoder.go new file mode 100644 index 000000000..0b8bd7fe2 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/encoder.go @@ -0,0 +1,669 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// This code is heavily inspired by the Go sources. +// See https://golang.org/src/encoding/json/ + +package velocypack + +import ( + "bytes" + "encoding" + "encoding/json" + "io" + "reflect" + "runtime" + "sort" + "strconv" + "sync" +) + +// An Encoder encodes Go structures into velocypack values written to an output stream. +type Encoder struct { + b Builder + w io.Writer +} + +// Marshaler is implemented by types that can convert themselves into Velocypack. +type Marshaler interface { + MarshalVPack() (Slice, error) +} + +// NewEncoder creates a new Encoder that writes output to the given writer. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + } +} + +// Marshal writes the Velocypack encoding of v to a buffer and returns that buffer. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalVPack method +// to produce Velocypack. +// If an encountered value implements the json.Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON and converts the resulting JSON to VelocyPack. +// If no MarshalVPack or MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method and encodes the result as a Velocypack string. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalVPack. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as Velocypack booleans. +// +// Floating point, integer, and Number values encode as Velocypack Int's, UInt's and Double's. +// +// String values encode as Velocypack strings. +// +// Array and slice values encode as Velocypack arrays, except that +// []byte encodes as Velocypack Binary data, and a nil slice +// encodes as the Null Velocypack value. +// +// Struct values encode as Velocypack objects. +// The encoding follows the same rules as specified for json.Marshal. +// This means that all `json` tags are fully supported. +// +// Map values encode as Velocypack objects. +// The encoding follows the same rules as specified for json.Marshal. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the Null Velocypack value. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the Null Velocypack value. +// +// Channel, complex, and function values cannot be encoded in Velocypack. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// Velocypack cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) (result Slice, err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + if s, ok := r.(string); ok { + panic(s) + } + err = r.(error) + } + }() + var b Builder + reflectValue(&b, reflect.ValueOf(v), encoderOptions{}) + return b.Slice() +} + +// Encode writes the Velocypack encoding of v to the stream. +func (e *Encoder) Encode(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + if s, ok := r.(string); ok { + panic(s) + } + err = r.(error) + } + }() + e.b.Clear() + reflectValue(&e.b, reflect.ValueOf(v), encoderOptions{}) + if _, err := e.b.WriteTo(e.w); err != nil { + return WithStack(err) + } + return nil +} + +// Builder returns a reference to the builder used in the given encoder. +func (e *Encoder) Builder() *Builder { + return &e.b +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func reflectValue(b *Builder, v reflect.Value, options encoderOptions) { + valueEncoder(v)(b, v, options) +} + +type encoderOptions struct { + quoted bool +} + +type encoderFunc func(b *Builder, v reflect.Value, options encoderOptions) + +var encoderCache struct { + sync.RWMutex + m map[reflect.Type]encoderFunc +} + +func valueEncoder(v reflect.Value) encoderFunc { + if !v.IsValid() { + return invalidValueEncoder + } + return typeEncoder(v.Type()) +} + +var ( + marshalerType = reflect.TypeOf(new(Marshaler)).Elem() + jsonMarshalerType = reflect.TypeOf(new(json.Marshaler)).Elem() + textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() + nullValue = NewNullValue() +) + +func typeEncoder(t reflect.Type) encoderFunc { + encoderCache.RLock() + f := encoderCache.m[t] + encoderCache.RUnlock() + if f != nil { + return f + } + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to be ready and then calls it. This indirect + // func is only used for recursive types. + encoderCache.Lock() + if encoderCache.m == nil { + encoderCache.m = make(map[reflect.Type]encoderFunc) + } + var wg sync.WaitGroup + wg.Add(1) + encoderCache.m[t] = func(b *Builder, v reflect.Value, options encoderOptions) { + wg.Wait() + f(b, v, options) + } + encoderCache.Unlock() + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = newTypeEncoder(t, true) + wg.Done() + encoderCache.Lock() + encoderCache.m[t] = f + encoderCache.Unlock() + return f +} + +// newTypeEncoder constructs an encoderFunc for a type. +// The returned encoder only checks CanAddr when allowAddr is true. +func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc { + if t.Implements(marshalerType) { + return marshalerEncoder + } + if t.Implements(jsonMarshalerType) { + return jsonMarshalerEncoder + } + if t.Kind() != reflect.Ptr && allowAddr { + if reflect.PtrTo(t).Implements(marshalerType) { + return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false)) + } + if reflect.PtrTo(t).Implements(jsonMarshalerType) { + return newCondAddrEncoder(addrJSONMarshalerEncoder, newTypeEncoder(t, false)) + } + } + + if t.Implements(textMarshalerType) { + return textMarshalerEncoder + } + if t.Kind() != reflect.Ptr && allowAddr { + if reflect.PtrTo(t).Implements(textMarshalerType) { + return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false)) + } + } + + switch t.Kind() { + case reflect.Bool: + return boolEncoder + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intEncoder + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintEncoder + case reflect.Float32, reflect.Float64: + return doubleEncoder + case reflect.String: + return stringEncoder + case reflect.Interface: + return interfaceEncoder + case reflect.Struct: + return newStructEncoder(t) + case reflect.Map: + return newMapEncoder(t) + case reflect.Slice: + return newSliceEncoder(t) + case reflect.Array: + return newArrayEncoder(t) + case reflect.Ptr: + return newPtrEncoder(t) + default: + return unsupportedTypeEncoder + } +} + +func invalidValueEncoder(b *Builder, v reflect.Value, options encoderOptions) { + b.addInternal(nullValue) +} + +func marshalerEncoder(b *Builder, v reflect.Value, options encoderOptions) { + if v.Kind() == reflect.Ptr && v.IsNil() { + b.addInternal(nullValue) + return + } + m, ok := v.Interface().(Marshaler) + if !ok { + b.addInternal(nullValue) + return + } + if vpack, err := m.MarshalVPack(); err != nil { + panic(&MarshalerError{v.Type(), err}) + } else { + b.addInternal(NewSliceValue(vpack)) + } +} + +func jsonMarshalerEncoder(b *Builder, v reflect.Value, options encoderOptions) { + if v.Kind() == reflect.Ptr && v.IsNil() { + b.addInternal(nullValue) + return + } + m, ok := v.Interface().(json.Marshaler) + if !ok { + b.addInternal(nullValue) + return + } + if json, err := m.MarshalJSON(); err != nil { + panic(&MarshalerError{v.Type(), err}) + } else { + // Convert JSON to vpack + if slice, err := ParseJSON(bytes.NewReader(json)); err != nil { + panic(&MarshalerError{v.Type(), err}) + } else { + b.addInternal(NewSliceValue(slice)) + } + } +} + +func addrMarshalerEncoder(b *Builder, v reflect.Value, options encoderOptions) { + va := v.Addr() + if va.IsNil() { + b.addInternal(nullValue) + return + } + m := va.Interface().(Marshaler) + if vpack, err := m.MarshalVPack(); err != nil { + panic(&MarshalerError{Type: v.Type(), Err: err}) + } else { + // copy VPack into buffer, checking validity. + b.buf.Write(vpack) + } +} + +func addrJSONMarshalerEncoder(b *Builder, v reflect.Value, options encoderOptions) { + va := v.Addr() + if va.IsNil() { + b.addInternal(nullValue) + return + } + m := va.Interface().(json.Marshaler) + if json, err := m.MarshalJSON(); err != nil { + panic(&MarshalerError{Type: v.Type(), Err: err}) + } else { + if slice, err := ParseJSON(bytes.NewReader(json)); err != nil { + panic(&MarshalerError{v.Type(), err}) + } else { + // copy VPack into buffer, checking validity. + b.buf.Write(slice) + } + } +} + +func textMarshalerEncoder(b *Builder, v reflect.Value, options encoderOptions) { + if v.Kind() == reflect.Ptr && v.IsNil() { + b.addInternal(nullValue) + return + } + m := v.Interface().(encoding.TextMarshaler) + text, err := m.MarshalText() + if err != nil { + panic(&MarshalerError{v.Type(), err}) + } + b.addInternal(NewStringValue(string(text))) +} + +func addrTextMarshalerEncoder(b *Builder, v reflect.Value, options encoderOptions) { + va := v.Addr() + if va.IsNil() { + b.addInternal(nullValue) + return + } + m := va.Interface().(encoding.TextMarshaler) + text, err := m.MarshalText() + if err != nil { + panic(&MarshalerError{v.Type(), err}) + } + b.addInternal(NewStringValue(string(text))) +} + +func boolEncoder(b *Builder, v reflect.Value, options encoderOptions) { + if options.quoted { + b.addInternal(NewStringValue(strconv.FormatBool(v.Bool()))) + } else { + b.addInternal(NewBoolValue(v.Bool())) + } +} + +func intEncoder(b *Builder, v reflect.Value, options encoderOptions) { + if options.quoted { + b.addInternal(NewStringValue(strconv.FormatInt(v.Int(), 10))) + } else { + b.addInternal(NewIntValue(v.Int())) + } +} + +func uintEncoder(b *Builder, v reflect.Value, options encoderOptions) { + if options.quoted { + b.addInternal(NewStringValue(strconv.FormatUint(v.Uint(), 10))) + } else { + b.addInternal(NewUIntValue(v.Uint())) + } +} + +func doubleEncoder(b *Builder, v reflect.Value, options encoderOptions) { + if options.quoted { + b.addInternal(NewStringValue(formatDouble(v.Float()))) + } else { + b.addInternal(NewDoubleValue(v.Float())) + } +} + +func stringEncoder(b *Builder, v reflect.Value, options encoderOptions) { + s := v.String() + if options.quoted { + raw, _ := json.Marshal(s) + s = string(raw) + } + b.addInternal(NewStringValue(s)) +} + +func interfaceEncoder(b *Builder, v reflect.Value, options encoderOptions) { + if v.IsNil() { + b.addInternal(nullValue) + return + } + vElem := v.Elem() + valueEncoder(vElem)(b, vElem, options) +} + +func unsupportedTypeEncoder(b *Builder, v reflect.Value, options encoderOptions) { + panic(&UnsupportedTypeError{v.Type()}) +} + +type structEncoder struct { + fields []field + fieldEncs []encoderFunc +} + +func (se *structEncoder) encode(b *Builder, v reflect.Value, options encoderOptions) { + if err := b.OpenObject(); err != nil { + panic(err) + } + for i, f := range se.fields { + fv := fieldByIndex(v, f.index) + if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) { + continue + } + // Key + _, err := b.addInternalKey(f.name) + if err != nil { + panic(err) + } + // Value + options.quoted = f.quoted + se.fieldEncs[i](b, fv, options) + } + if err := b.Close(); err != nil { + panic(err) + } +} + +func newStructEncoder(t reflect.Type) encoderFunc { + fields := cachedTypeFields(t) + se := &structEncoder{ + fields: fields, + fieldEncs: make([]encoderFunc, len(fields)), + } + for i, f := range fields { + se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index)) + } + return se.encode +} + +type mapEncoder struct { + elemEnc encoderFunc +} + +func (e *mapEncoder) encode(b *Builder, v reflect.Value, options encoderOptions) { + if v.IsNil() { + b.addInternal(nullValue) + return + } + if err := b.OpenObject(); err != nil { + panic(err) + } + + // Extract and sort the keys. + keys := v.MapKeys() + sv := make(reflectWithStringSlice, len(keys)) + for i, v := range keys { + sv[i].v = v + if err := sv[i].resolve(); err != nil { + panic(&MarshalerError{v.Type(), err}) + } + } + sort.Sort(sv) + + for _, kv := range sv { + // Key + _, err := b.addInternalKey(kv.s) + if err != nil { + panic(err) + } + // Value + e.elemEnc(b, v.MapIndex(kv.v), options) + } + if err := b.Close(); err != nil { + panic(err) + } +} + +func newMapEncoder(t reflect.Type) encoderFunc { + switch t.Key().Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + default: + if !t.Key().Implements(textMarshalerType) { + return unsupportedTypeEncoder + } + } + me := &mapEncoder{typeEncoder(t.Elem())} + return me.encode +} + +func encodeByteSlice(b *Builder, v reflect.Value, options encoderOptions) { + if v.IsNil() { + b.addInternal(nullValue) + return + } + b.addInternal(NewBinaryValue(v.Bytes())) +} + +// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil. +type sliceEncoder struct { + arrayEnc encoderFunc +} + +func (se *sliceEncoder) encode(b *Builder, v reflect.Value, options encoderOptions) { + if v.IsNil() { + b.addInternal(nullValue) + return + } + se.arrayEnc(b, v, options) +} + +func newSliceEncoder(t reflect.Type) encoderFunc { + // Byte slices get special treatment; arrays don't. + if t.Elem().Kind() == reflect.Uint8 { + p := reflect.PtrTo(t.Elem()) + if !p.Implements(marshalerType) && !p.Implements(jsonMarshalerType) && !p.Implements(textMarshalerType) { + return encodeByteSlice + } + } + enc := &sliceEncoder{newArrayEncoder(t)} + return enc.encode +} + +type arrayEncoder struct { + elemEnc encoderFunc +} + +func (ae *arrayEncoder) encode(b *Builder, v reflect.Value, options encoderOptions) { + if err := b.OpenArray(); err != nil { + panic(err) + } + n := v.Len() + for i := 0; i < n; i++ { + ae.elemEnc(b, v.Index(i), options) + } + if err := b.Close(); err != nil { + panic(err) + } +} + +func newArrayEncoder(t reflect.Type) encoderFunc { + enc := &arrayEncoder{typeEncoder(t.Elem())} + return enc.encode +} + +type ptrEncoder struct { + elemEnc encoderFunc +} + +func (pe *ptrEncoder) encode(b *Builder, v reflect.Value, options encoderOptions) { + if v.IsNil() { + b.addInternal(nullValue) + return + } + pe.elemEnc(b, v.Elem(), options) +} + +func newPtrEncoder(t reflect.Type) encoderFunc { + enc := &ptrEncoder{typeEncoder(t.Elem())} + return enc.encode +} + +type condAddrEncoder struct { + canAddrEnc, elseEnc encoderFunc +} + +func (ce *condAddrEncoder) encode(b *Builder, v reflect.Value, options encoderOptions) { + if v.CanAddr() { + ce.canAddrEnc(b, v, options) + } else { + ce.elseEnc(b, v, options) + } +} + +// newCondAddrEncoder returns an encoder that checks whether its value +// CanAddr and delegates to canAddrEnc if so, else to elseEnc. +func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc { + enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} + return enc.encode +} + +type reflectWithString struct { + v reflect.Value + s string +} + +func (w *reflectWithString) resolve() error { + if w.v.Kind() == reflect.String { + w.s = w.v.String() + return nil + } + if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok { + buf, err := tm.MarshalText() + w.s = string(buf) + return err + } + switch w.v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + w.s = strconv.FormatInt(w.v.Int(), 10) + return nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + w.s = strconv.FormatUint(w.v.Uint(), 10) + return nil + } + panic("unexpected map key type") +} + +type reflectWithStringSlice []reflectWithString + +// Len is the number of elements in the collection. +func (l reflectWithStringSlice) Len() int { + return len(l) +} + +// Less reports whether the element with +// index i should sort before the element with index j. +func (l reflectWithStringSlice) Less(i, j int) bool { + return l[i].s < l[j].s +} + +// Swap swaps the elements with indexes i and j. +func (l reflectWithStringSlice) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/deps/github.com/arangodb/go-velocypack/encoder_field.go b/deps/github.com/arangodb/go-velocypack/encoder_field.go new file mode 100644 index 000000000..4a72fd710 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/encoder_field.go @@ -0,0 +1,324 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// This code is (mostly) taken for the Go sources. +// See https://golang.org/src/encoding/json/ +// +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package velocypack + +import ( + "reflect" + "sort" + "sync" + "sync/atomic" +) + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fieldByIndex(v reflect.Value, index []int) reflect.Value { + for _, i := range index { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + } + v = v.Field(i) + } + return v +} + +func typeByIndex(t reflect.Type, index []int) reflect.Type { + for _, i := range index { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + t = t.Field(i).Type + } + return t +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// sort field by name, breaking ties with depth, then +// breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byNameIndexlenTag []field + +func (x byNameIndexlenTag) Len() int { return len(x) } + +func (x byNameIndexlenTag) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byNameIndexlenTag) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Only strings, floats, integers, and booleans can be quoted. + quoted := false + if opts.Contains("string") { + switch ft.Kind() { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64, + reflect.String: + quoted = true + } + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: quoted, + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byNameIndexlenTag(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + value atomic.Value // map[reflect.Type][]field + mu sync.Mutex // used only by writers +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + m, _ := fieldCache.value.Load().(map[reflect.Type][]field) + f := m[t] + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.mu.Lock() + m, _ = fieldCache.value.Load().(map[reflect.Type][]field) + newM := make(map[reflect.Type][]field, len(m)+1) + for k, v := range m { + newM[k] = v + } + newM[t] = f + fieldCache.value.Store(newM) + fieldCache.mu.Unlock() + return f +} diff --git a/deps/github.com/arangodb/go-velocypack/encoder_fold.go b/deps/github.com/arangodb/go-velocypack/encoder_fold.go new file mode 100644 index 000000000..a32f5f2e3 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/encoder_fold.go @@ -0,0 +1,168 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// This code is (mostly) taken for the Go sources. +// See https://golang.org/src/encoding/json/ +// +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package velocypack + +import ( + "bytes" + "unicode/utf8" +) + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See https://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} diff --git a/deps/github.com/arangodb/go-velocypack/encoder_tags.go b/deps/github.com/arangodb/go-velocypack/encoder_tags.go new file mode 100644 index 000000000..0efeb6a63 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/encoder_tags.go @@ -0,0 +1,89 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// This code is (mostly) taken for the Go sources. +// See https://golang.org/src/encoding/json/ +// +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package velocypack + +import ( + "strings" + "unicode" +) + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/deps/github.com/arangodb/go-velocypack/error.go b/deps/github.com/arangodb/go-velocypack/error.go new file mode 100644 index 000000000..51b6c6e2a --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/error.go @@ -0,0 +1,231 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "errors" + "reflect" +) + +// InvalidTypeError is returned when a Slice getter is called on a slice of a different type. +type InvalidTypeError struct { + Message string +} + +// Error implements the error interface for InvalidTypeError. +func (e InvalidTypeError) Error() string { + return e.Message +} + +// IsInvalidType returns true if the given error is an InvalidTypeError. +func IsInvalidType(err error) bool { + _, ok := Cause(err).(InvalidTypeError) + return ok +} + +var ( + // NumberOutOfRangeError indicates an out of range error. + NumberOutOfRangeError = errors.New("number out of range") + // IsNumberOutOfRange returns true if the given error is an NumberOutOfRangeError. + IsNumberOutOfRange = isCausedByFunc(NumberOutOfRangeError) + // IndexOutOfBoundsError indicates an index outside of array/object bounds. + IndexOutOfBoundsError = errors.New("index out of range") + // IsIndexOutOfBounds returns true if the given error is an IndexOutOfBoundsError. + IsIndexOutOfBounds = isCausedByFunc(IndexOutOfBoundsError) + // NeedAttributeTranslatorError indicates a lack of object key translator (smallint|uint -> string). + NeedAttributeTranslatorError = errors.New("need attribute translator") + // IsNeedAttributeTranslator returns true if the given error is an NeedAttributeTranslatorError. + IsNeedAttributeTranslator = isCausedByFunc(NeedAttributeTranslatorError) + // InternalError indicates an error that the client cannot prevent. + InternalError = errors.New("internal") + // IsInternal returns true if the given error is an InternalError. + IsInternal = isCausedByFunc(InternalError) + // BuilderNeedOpenArrayError indicates an (invalid) attempt to open an array/object when that is not allowed. + BuilderNeedOpenArrayError = errors.New("builder need open array") + // IsBuilderNeedOpenArray returns true if the given error is an BuilderNeedOpenArrayError. + IsBuilderNeedOpenArray = isCausedByFunc(BuilderNeedOpenArrayError) + // BuilderNeedOpenObjectError indicates an (invalid) attempt to open an array/object when that is not allowed. + BuilderNeedOpenObjectError = errors.New("builder need open object") + // IsBuilderNeedOpenObject returns true if the given error is an BuilderNeedOpenObjectError. + IsBuilderNeedOpenObject = isCausedByFunc(BuilderNeedOpenObjectError) + // BuilderNeedOpenCompoundError indicates an (invalid) attempt to close an array/object that is already closed. + BuilderNeedOpenCompoundError = errors.New("builder need open array or object") + // IsBuilderNeedOpenCompound returns true if the given error is an BuilderNeedOpenCompoundError. + IsBuilderNeedOpenCompound = isCausedByFunc(BuilderNeedOpenCompoundError) + DuplicateAttributeNameError = errors.New("duplicate key name") + // IsDuplicateAttributeName returns true if the given error is an DuplicateAttributeNameError. + IsDuplicateAttributeName = isCausedByFunc(DuplicateAttributeNameError) + // BuilderNotClosedError is returned when a call is made to Builder.Bytes without being closed. + BuilderNotClosedError = errors.New("builder not closed") + // IsBuilderNotClosed returns true if the given error is an BuilderNotClosedError. + IsBuilderNotClosed = isCausedByFunc(BuilderNotClosedError) + // BuilderKeyAlreadyWrittenError is returned when a call is made to Builder.Bytes without being closed. + BuilderKeyAlreadyWrittenError = errors.New("builder key already written") + // IsBuilderKeyAlreadyWritten returns true if the given error is an BuilderKeyAlreadyWrittenError. + IsBuilderKeyAlreadyWritten = isCausedByFunc(BuilderKeyAlreadyWrittenError) + // BuilderKeyMustBeStringError is returned when a key is not of type string. + BuilderKeyMustBeStringError = errors.New("builder key must be string") + // IsBuilderKeyMustBeString returns true if the given error is an BuilderKeyMustBeStringError. + IsBuilderKeyMustBeString = isCausedByFunc(BuilderKeyMustBeStringError) + // BuilderNeedSubValueError is returned when a RemoveLast is called without any value in an object/array. + BuilderNeedSubValueError = errors.New("builder need sub value") + // IsBuilderNeedSubValue returns true if the given error is an BuilderNeedSubValueError. + IsBuilderNeedSubValue = isCausedByFunc(BuilderNeedSubValueError) + // InvalidUtf8SequenceError indicates an invalid UTF8 (string) sequence. + InvalidUtf8SequenceError = errors.New("invalid utf8 sequence") + // IsInvalidUtf8Sequence returns true if the given error is an InvalidUtf8SequenceError. + IsInvalidUtf8Sequence = isCausedByFunc(InvalidUtf8SequenceError) + // NoJSONEquivalentError is returned when a Velocypack type cannot be converted to JSON. + NoJSONEquivalentError = errors.New("no JSON equivalent") + // IsNoJSONEquivalent returns true if the given error is an NoJSONEquivalentError. + IsNoJSONEquivalent = isCausedByFunc(NoJSONEquivalentError) +) + +// isCausedByFunc creates an error test function. +func isCausedByFunc(cause error) func(err error) bool { + return func(err error) bool { + return Cause(err) == cause + } +} + +// BuilderUnexpectedTypeError is returned when a Builder function received an invalid type. +type BuilderUnexpectedTypeError struct { + Message string +} + +// Error implements the error interface for BuilderUnexpectedTypeError. +func (e BuilderUnexpectedTypeError) Error() string { + return e.Message +} + +// IsBuilderUnexpectedType returns true if the given error is an BuilderUnexpectedTypeError. +func IsBuilderUnexpectedType(err error) bool { + _, ok := Cause(err).(BuilderUnexpectedTypeError) + return ok +} + +// MarshalerError is returned when a custom VPack Marshaler returns an error. +type MarshalerError struct { + Type reflect.Type + Err error +} + +// Error implements the error interface for MarshalerError. +func (e MarshalerError) Error() string { + return "error calling MarshalVPack for type " + e.Type.String() + ": " + e.Err.Error() +} + +// IsMarshaler returns true if the given error is an MarshalerError. +func IsMarshaler(err error) bool { + _, ok := Cause(err).(MarshalerError) + return ok +} + +// UnsupportedTypeError is returned when a type is marshaled that cannot be marshaled. +type UnsupportedTypeError struct { + Type reflect.Type +} + +// Error implements the error interface for UnsupportedTypeError. +func (e UnsupportedTypeError) Error() string { + return "unsupported type " + e.Type.String() +} + +// IsUnsupportedType returns true if the given error is an UnsupportedTypeError. +func IsUnsupportedType(err error) bool { + _, ok := Cause(err).(UnsupportedTypeError) + return ok +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +// IsInvalidUnmarshal returns true if the given error is an InvalidUnmarshalError. +func IsInvalidUnmarshal(err error) bool { + _, ok := Cause(err).(*InvalidUnmarshalError) + return ok +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Struct string // name of the struct type containing the field + Field string // name of the field holding the Go value +} + +func (e *UnmarshalTypeError) Error() string { + if e.Struct != "" || e.Field != "" { + return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String() + } + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// IsUnmarshalType returns true if the given error is an UnmarshalTypeError. +func IsUnmarshalType(err error) bool { + _, ok := Cause(err).(*UnmarshalTypeError) + return ok +} + +// An ParseError is returned when JSON cannot be parsed correctly. +type ParseError struct { + msg string + Offset int64 +} + +func (e *ParseError) Error() string { + return e.msg +} + +// IsParse returns true if the given error is a ParseError. +func IsParse(err error) bool { + _, ok := Cause(err).(*ParseError) + return ok +} + +var ( + // WithStack is called on every return of an error to add stacktrace information to the error. + // When setting this function, also set the Cause function. + // The interface of this function is compatible with functions in github.com/pkg/errors. + // WithStack(nil) must return nil. + WithStack = func(err error) error { return err } + // Cause is used to get the root cause of the given error. + // The interface of this function is compatible with functions in github.com/pkg/errors. + // Cause(nil) must return nil. + Cause = func(err error) error { return err } +) diff --git a/deps/github.com/arangodb/go-velocypack/object_iterator.go b/deps/github.com/arangodb/go-velocypack/object_iterator.go new file mode 100644 index 000000000..187e6379f --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/object_iterator.go @@ -0,0 +1,114 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +type ObjectIterator struct { + s Slice + position ValueLength + size ValueLength + current Slice +} + +// NewObjectIterator initializes an iterator at position 0 of the given object slice. +func NewObjectIterator(s Slice, allowRandomIteration ...bool) (*ObjectIterator, error) { + if !s.IsObject() { + return nil, InvalidTypeError{"Expected Object slice"} + } + size, err := s.Length() + if err != nil { + return nil, WithStack(err) + } + i := &ObjectIterator{ + s: s, + position: 0, + size: size, + } + if size > 0 { + if h := s.head(); h == 0x14 { + i.current, err = s.KeyAt(0, false) + } else if optionalBool(allowRandomIteration, false) { + i.current = s[s.findDataOffset(h):] + } + } + return i, nil +} + +// IsValid returns true if the given position of the iterator is valid. +func (i *ObjectIterator) IsValid() bool { + return i.position < i.size +} + +// IsFirst returns true if the current position is 0. +func (i *ObjectIterator) IsFirst() bool { + return i.position == 0 +} + +// Key returns the key of the current position of the iterator +func (i *ObjectIterator) Key(translate bool) (Slice, error) { + if i.position >= i.size { + return nil, WithStack(IndexOutOfBoundsError) + } + if current := i.current; current != nil { + if translate { + key, err := current.makeKey() + return key, WithStack(err) + } + return current, nil + } + key, err := i.s.getNthKey(i.position, translate) + return key, WithStack(err) +} + +// Value returns the value of the current position of the iterator +func (i *ObjectIterator) Value() (Slice, error) { + if i.position >= i.size { + return nil, WithStack(IndexOutOfBoundsError) + } + if current := i.current; current != nil { + value, err := current.Next() + return value, WithStack(err) + } + value, err := i.s.getNthValue(i.position) + return value, WithStack(err) +} + +// Next moves to the next position. +func (i *ObjectIterator) Next() error { + i.position++ + if i.position < i.size && i.current != nil { + var err error + // skip over key + i.current, err = i.current.Next() + if err != nil { + return WithStack(err) + } + // skip over value + i.current, err = i.current.Next() + if err != nil { + return WithStack(err) + } + } else { + i.current = nil + } + return nil +} diff --git a/deps/github.com/arangodb/go-velocypack/parser.go b/deps/github.com/arangodb/go-velocypack/parser.go new file mode 100644 index 000000000..55b454144 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/parser.go @@ -0,0 +1,151 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "bytes" + "encoding/json" + "io" + "strconv" + "strings" +) + +// ParserOptions controls how the Parser builds Velocypack. +type ParserOptions struct { + // If set, all Array's will be unindexed. + BuildUnindexedArrays bool + // If set, all Objects's will be unindexed. + BuildUnindexedObjects bool +} + +// Parser is used to build VPack structures from JSON. +type Parser struct { + options ParserOptions + decoder *json.Decoder + builder *Builder +} + +// ParseJSON parses JSON from the given reader and returns the +// VPack equivalent. +func ParseJSON(r io.Reader, options ...ParserOptions) (Slice, error) { + builder := &Builder{} + p := NewParser(r, builder, options...) + if err := p.Parse(); err != nil { + return nil, WithStack(err) + } + slice, err := builder.Slice() + if err != nil { + return nil, WithStack(err) + } + return slice, nil +} + +// ParseJSONFromString parses the given JSON string and returns the +// VPack equivalent. +func ParseJSONFromString(json string, options ...ParserOptions) (Slice, error) { + return ParseJSON(strings.NewReader(json), options...) +} + +// ParseJSONFromUTF8 parses the given JSON string and returns the +// VPack equivalent. +func ParseJSONFromUTF8(json []byte, options ...ParserOptions) (Slice, error) { + return ParseJSON(bytes.NewReader(json), options...) +} + +// NewParser initializes a new Parser with JSON from the given reader and +// it will store the parsers output in the given builder. +func NewParser(r io.Reader, builder *Builder, options ...ParserOptions) *Parser { + d := json.NewDecoder(r) + d.UseNumber() + p := &Parser{ + decoder: d, + builder: builder, + } + if len(options) > 0 { + p.options = options[0] + } + return p +} + +// Parse JSON from the parsers reader and build VPack structures in the +// parsers builder. +func (p *Parser) Parse() error { + for { + t, err := p.decoder.Token() + if err == io.EOF { + break + } else if serr, ok := err.(*json.SyntaxError); ok { + return WithStack(&ParseError{msg: err.Error(), Offset: serr.Offset}) + } else if err != nil { + return WithStack(&ParseError{msg: err.Error()}) + } + switch x := t.(type) { + case nil: + if err := p.builder.AddValue(NewNullValue()); err != nil { + return WithStack(err) + } + case bool: + if err := p.builder.AddValue(NewBoolValue(x)); err != nil { + return WithStack(err) + } + case json.Number: + if xu, err := strconv.ParseUint(string(x), 10, 64); err == nil { + if err := p.builder.AddValue(NewUIntValue(xu)); err != nil { + return WithStack(err) + } + } else if xi, err := x.Int64(); err == nil { + if err := p.builder.AddValue(NewIntValue(xi)); err != nil { + return WithStack(err) + } + } else { + if xf, err := x.Float64(); err == nil { + if err := p.builder.AddValue(NewDoubleValue(xf)); err != nil { + return WithStack(err) + } + } else { + return WithStack(&ParseError{msg: err.Error()}) + } + } + case string: + if err := p.builder.AddValue(NewStringValue(x)); err != nil { + return WithStack(err) + } + case json.Delim: + switch x { + case '[': + if err := p.builder.OpenArray(p.options.BuildUnindexedArrays); err != nil { + return WithStack(err) + } + case '{': + if err := p.builder.OpenObject(p.options.BuildUnindexedObjects); err != nil { + return WithStack(err) + } + case ']', '}': + if err := p.builder.Close(); err != nil { + return WithStack(err) + } + } + } + } + return nil +} diff --git a/deps/github.com/arangodb/go-velocypack/raw_slice.go b/deps/github.com/arangodb/go-velocypack/raw_slice.go new file mode 100644 index 000000000..6f7b37c4e --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/raw_slice.go @@ -0,0 +1,50 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import "errors" + +// RawSlice is a raw encoded Velocypack value. +// It implements Marshaler and Unmarshaler and can +// be used to delay Velocypack decoding or precompute a Velocypack encoding. +type RawSlice []byte + +// MarshalVPack returns m as the Velocypack encoding of m. +func (m RawSlice) MarshalVPack() (Slice, error) { + if m == nil { + return NullSlice(), nil + } + return Slice(m), nil +} + +// UnmarshalVPack sets *m to a copy of data. +func (m *RawSlice) UnmarshalVPack(data Slice) error { + if m == nil { + return errors.New("velocypack.RawSlice: UnmarshalVPack on nil pointer") + } + *m = append((*m)[0:0], data...) + return nil +} + +var _ Marshaler = (*RawSlice)(nil) +var _ Unmarshaler = (*RawSlice)(nil) diff --git a/deps/github.com/arangodb/go-velocypack/slice.go b/deps/github.com/arangodb/go-velocypack/slice.go new file mode 100644 index 000000000..0814bb043 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/slice.go @@ -0,0 +1,927 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "math" + "time" +) + +// Slice provides read only access to a VPack value +type Slice []byte + +// SliceFromHex creates a Slice by decoding the given hex string into a Slice. +// If decoding fails, nil is returned. +func SliceFromHex(v string) Slice { + if bytes, err := hex.DecodeString(v); err != nil { + return nil + } else { + return Slice(bytes) + } +} + +// String returns a HEX representation of the slice. +func (s Slice) String() string { + return hex.EncodeToString(s) +} + +// JSONString converts the contents of the slice to JSON. +func (s Slice) JSONString(options ...DumperOptions) (string, error) { + buf := &bytes.Buffer{} + var opt *DumperOptions + if len(options) > 0 { + opt = &options[0] + } + d := NewDumper(buf, opt) + if err := d.Append(s); err != nil { + return "", WithStack(err) + } + return buf.String(), nil +} + +// head returns the first element of the slice or 0 if the slice is empty. +func (s Slice) head() byte { + if len(s) > 0 { + return s[0] + } + return 0 +} + +// ByteSize returns the total byte size for the slice, including the head byte +func (s Slice) ByteSize() (ValueLength, error) { + h := s.head() + // check if the type has a fixed length first + l := fixedTypeLengths[h] + if l != 0 { + // return fixed length + return ValueLength(l), nil + } + + // types with dynamic lengths need special treatment: + switch s.Type() { + case Array, Object: + if h == 0x13 || h == 0x14 { + // compact Array or Object + return readVariableValueLength(s, 1, false), nil + } + + vpackAssert(h > 0x00 && h <= 0x0e) + return ValueLength(readIntegerNonEmpty(s[1:], widthMap[h])), nil + + case String: + vpackAssert(h == 0xbf) + // long UTF-8 String + return ValueLength(1 + 8 + readIntegerFixed(s[1:], 8)), nil + + case Binary: + vpackAssert(h >= 0xc0 && h <= 0xc7) + return ValueLength(1 + ValueLength(h) - 0xbf + ValueLength(readIntegerNonEmpty(s[1:], uint(h)-0xbf))), nil + + case BCD: + if h <= 0xcf { + // positive BCD + vpackAssert(h >= 0xc8 && h < 0xcf) + return ValueLength(1 + ValueLength(h) - 0xc7 + ValueLength(readIntegerNonEmpty(s[1:], uint(h)-0xc7))), nil + } + + // negative BCD + vpackAssert(h >= 0xd0 && h < 0xd7) + return ValueLength(1 + ValueLength(h) - 0xcf + ValueLength(readIntegerNonEmpty(s[1:], uint(h)-0xcf))), nil + + case Custom: + vpackAssert(h >= 0xf4) + switch h { + case 0xf4, 0xf5, 0xf6: + return ValueLength(2 + readIntegerFixed(s[1:], 1)), nil + case 0xf7, 0xf8, 0xf9: + return ValueLength(3 + readIntegerFixed(s[1:], 2)), nil + case 0xfa, 0xfb, 0xfc: + return ValueLength(5 + readIntegerFixed(s[1:], 4)), nil + case 0xfd, 0xfe, 0xff: + return ValueLength(9 + readIntegerFixed(s[1:], 8)), nil + } + } + + return 0, WithStack(InternalError) +} + +// Next returns the Slice that directly follows the given slice. +// Same as s[s.ByteSize:] +func (s Slice) Next() (Slice, error) { + size, err := s.ByteSize() + if err != nil { + return nil, WithStack(err) + } + return Slice(s[size:]), nil +} + +// GetBool returns a boolean value from the slice. +// Returns an error if slice is not of type Bool. +func (s Slice) GetBool() (bool, error) { + if err := s.AssertType(Bool); err != nil { + return false, WithStack(err) + } + return s.IsTrue(), nil +} + +// GetDouble returns a Double value from the slice. +// Returns an error if slice is not of type Double. +func (s Slice) GetDouble() (float64, error) { + if err := s.AssertType(Double); err != nil { + return 0.0, WithStack(err) + } + bits := binary.LittleEndian.Uint64(s[1:]) + return math.Float64frombits(bits), nil +} + +// GetInt returns a Int value from the slice. +// Returns an error if slice is not of type Int. +func (s Slice) GetInt() (int64, error) { + h := s.head() + + if h >= 0x20 && h <= 0x27 { + // Int T + v := readIntegerNonEmpty(s[1:], uint(h)-0x1f) + if h == 0x27 { + return toInt64(v), nil + } else { + vv := int64(v) + shift := int64(1) << ((h-0x1f)*8 - 1) + if vv < shift { + return vv, nil + } else { + return vv - (shift << 1), nil + } + } + } + + if h >= 0x28 && h <= 0x2f { + // UInt + v, err := s.GetUInt() + if err != nil { + return 0, WithStack(err) + } + if v > math.MaxInt64 { + return 0, WithStack(NumberOutOfRangeError) + } + return int64(v), nil + } + + if h >= 0x30 && h <= 0x3f { + // SmallInt + return s.GetSmallInt() + } + + return 0, WithStack(InvalidTypeError{"Expecting type Int"}) +} + +// GetUInt returns a UInt value from the slice. +// Returns an error if slice is not of type UInt. +func (s Slice) GetUInt() (uint64, error) { + h := s.head() + + if h == 0x28 { + // single byte integer + return uint64(s[1]), nil + } + + if h >= 0x29 && h <= 0x2f { + // UInt + return readIntegerNonEmpty(s[1:], uint(h)-0x27), nil + } + + if h >= 0x20 && h <= 0x27 { + // Int + v, err := s.GetInt() + if err != nil { + return 0, WithStack(err) + } + if v < 0 { + return 0, WithStack(NumberOutOfRangeError) + } + return uint64(v), nil + } + + if h >= 0x30 && h <= 0x39 { + // Smallint >= 0 + return uint64(h - 0x30), nil + } + + if h >= 0x3a && h <= 0x3f { + // Smallint < 0 + return 0, WithStack(NumberOutOfRangeError) + } + + return 0, WithStack(InvalidTypeError{"Expecting type UInt"}) +} + +// GetSmallInt returns a SmallInt value from the slice. +// Returns an error if slice is not of type SmallInt. +func (s Slice) GetSmallInt() (int64, error) { + h := s.head() + + if h >= 0x30 && h <= 0x39 { + // Smallint >= 0 + return int64(h - 0x30), nil + } + + if h >= 0x3a && h <= 0x3f { + // Smallint < 0 + return int64(h-0x3a) - 6, nil + } + + if (h >= 0x20 && h <= 0x27) || (h >= 0x28 && h <= 0x2f) { + // Int and UInt + // we'll leave it to the compiler to detect the two ranges above are + // adjacent + return s.GetInt() + } + + return 0, InvalidTypeError{"Expecting type SmallInt"} +} + +// GetUTCDate return the value for an UTCDate object +func (s Slice) GetUTCDate() (time.Time, error) { + if !s.IsUTCDate() { + return time.Time{}, InvalidTypeError{"Expecting type UTCDate"} + } + v := toInt64(readIntegerFixed(s[1:], 8)) // milliseconds since epoch + sec := v / 1000 + nsec := (v % 1000) * 1000000 + return time.Unix(sec, nsec).UTC(), nil +} + +// GetStringUTF8 return the value for a String object as a []byte with UTF-8 values. +// This function is a bit faster than GetString, since the conversion from +// []byte to string needs a memory allocation. +func (s Slice) GetStringUTF8() ([]byte, error) { + h := s.head() + if h >= 0x40 && h <= 0xbe { + // short UTF-8 String + length := h - 0x40 + result := s[1 : 1+length] + return result, nil + } + + if h == 0xbf { + // long UTF-8 String + length := readIntegerFixed(s[1:], 8) + if err := checkOverflow(ValueLength(length)); err != nil { + return nil, WithStack(err) + } + result := s[1+8 : 1+8+length] + return result, nil + } + + return nil, InvalidTypeError{"Expecting type String"} +} + +// GetString return the value for a String object +// This function is a bit slower than GetStringUTF8, since the conversion from +// []byte to string needs a memory allocation. +func (s Slice) GetString() (string, error) { + bytes, err := s.GetStringUTF8() + if err != nil { + return "", WithStack(err) + } + return string(bytes), nil +} + +// GetStringLength return the length for a String object +func (s Slice) GetStringLength() (ValueLength, error) { + h := s.head() + if h >= 0x40 && h <= 0xbe { + // short UTF-8 String + length := h - 0x40 + return ValueLength(length), nil + } + + if h == 0xbf { + // long UTF-8 String + length := readIntegerFixed(s[1:], 8) + if err := checkOverflow(ValueLength(length)); err != nil { + return 0, WithStack(err) + } + return ValueLength(length), nil + } + + return 0, InvalidTypeError{"Expecting type String"} +} + +// CompareString compares the string value in the slice with the given string. +// s == value -> 0 +// s < value -> -1 +// s > value -> 1 +func (s Slice) CompareString(value string) (int, error) { + k, err := s.GetStringUTF8() + if err != nil { + return 0, WithStack(err) + } + return bytes.Compare(k, []byte(value)), nil +} + +// IsEqualString compares the string value in the slice with the given string for equivalence. +func (s Slice) IsEqualString(value string) (bool, error) { + k, err := s.GetStringUTF8() + if err != nil { + return false, WithStack(err) + } + rc := bytes.Compare(k, []byte(value)) + return rc == 0, nil +} + +// GetBinary return the value for a Binary object +func (s Slice) GetBinary() ([]byte, error) { + if !s.IsBinary() { + return nil, InvalidTypeError{"Expecting type Binary"} + } + + h := s.head() + vpackAssert(h >= 0xc0 && h <= 0xc7) + + lengthSize := uint(h - 0xbf) + length := readIntegerNonEmpty(s[1:], lengthSize) + checkOverflow(ValueLength(length)) + return s[1+lengthSize : 1+uint64(lengthSize)+length], nil +} + +// GetBinaryLength return the length for a Binary object +func (s Slice) GetBinaryLength() (ValueLength, error) { + if !s.IsBinary() { + return 0, InvalidTypeError{"Expecting type Binary"} + } + + h := s.head() + vpackAssert(h >= 0xc0 && h <= 0xc7) + + lengthSize := uint(h - 0xbf) + length := readIntegerNonEmpty(s[1:], lengthSize) + return ValueLength(length), nil +} + +// Length return the number of members for an Array or Object object +func (s Slice) Length() (ValueLength, error) { + if !s.IsArray() && !s.IsObject() { + return 0, InvalidTypeError{"Expecting type Array or Object"} + } + + h := s.head() + if h == 0x01 || h == 0x0a { + // special case: empty! + return 0, nil + } + + if h == 0x13 || h == 0x14 { + // compact Array or Object + end := readVariableValueLength(s, 1, false) + return readVariableValueLength(s, end-1, true), nil + } + + offsetSize := indexEntrySize(h) + vpackAssert(offsetSize > 0) + end := readIntegerNonEmpty(s[1:], offsetSize) + + // find number of items + if h <= 0x05 { // No offset table or length, need to compute: + firstSubOffset := s.findDataOffset(h) + first := s[firstSubOffset:] + s, err := first.ByteSize() + if err != nil { + return 0, WithStack(err) + } + if s == 0 { + return 0, WithStack(InternalError) + } + return (ValueLength(end) - firstSubOffset) / s, nil + } else if offsetSize < 8 { + return ValueLength(readIntegerNonEmpty(s[offsetSize+1:], offsetSize)), nil + } + + return ValueLength(readIntegerNonEmpty(s[end-uint64(offsetSize):], offsetSize)), nil +} + +// At extracts the array value at the specified index. +func (s Slice) At(index ValueLength) (Slice, error) { + if !s.IsArray() { + return nil, InvalidTypeError{"Expecting type Array"} + } + + if result, err := s.getNth(index); err != nil { + return nil, WithStack(err) + } else { + return result, nil + } +} + +// KeyAt extracts a key from an Object at the specified index. +func (s Slice) KeyAt(index ValueLength, translate ...bool) (Slice, error) { + if !s.IsObject() { + return nil, InvalidTypeError{"Expecting type Object"} + } + + return s.getNthKey(index, optionalBool(translate, true)) +} + +// ValueAt extracts a value from an Object at the specified index +func (s Slice) ValueAt(index ValueLength) (Slice, error) { + if !s.IsObject() { + return nil, InvalidTypeError{"Expecting type Object"} + } + + key, err := s.getNthKey(index, false) + if err != nil { + return nil, WithStack(err) + } + byteSize, err := key.ByteSize() + if err != nil { + return nil, WithStack(err) + } + return Slice(key[byteSize:]), nil +} + +func indexEntrySize(head byte) uint { + vpackAssert(head > 0x00 && head <= 0x12) + return widthMap[head] +} + +// Get looks for the specified attribute path inside an Object +// returns a Slice(ValueType::None) if not found +func (s Slice) Get(attributePath ...string) (Slice, error) { + result := s + parent := s + for _, a := range attributePath { + var err error + result, err = parent.get(a) + if err != nil { + return nil, WithStack(err) + } + if result.IsNone() { + return result, nil + } + parent = result + } + return result, nil +} + +// Get looks for the specified attribute inside an Object +// returns a Slice(ValueType::None) if not found +func (s Slice) get(attribute string) (Slice, error) { + if !s.IsObject() { + return nil, InvalidTypeError{"Expecting Object"} + } + + h := s.head() + if h == 0x0a { + // special case, empty object + return nil, nil + } + + if h == 0x14 { + // compact Object + value, err := s.getFromCompactObject(attribute) + return value, WithStack(err) + } + + offsetSize := indexEntrySize(h) + vpackAssert(offsetSize > 0) + end := ValueLength(readIntegerNonEmpty(s[1:], offsetSize)) + + // read number of items + var n ValueLength + var ieBase ValueLength + if offsetSize < 8 { + n = ValueLength(readIntegerNonEmpty(s[1+offsetSize:], offsetSize)) + ieBase = end - n*ValueLength(offsetSize) + } else { + n = ValueLength(readIntegerNonEmpty(s[end-ValueLength(offsetSize):], offsetSize)) + ieBase = end - n*ValueLength(offsetSize) - ValueLength(offsetSize) + } + + if n == 1 { + // Just one attribute, there is no index table! + key := Slice(s[s.findDataOffset(h):]) + + if key.IsString() { + if eq, err := key.IsEqualString(attribute); err != nil { + return nil, WithStack(err) + } else if eq { + value, err := key.Next() + return value, WithStack(err) + } + // fall through to returning None Slice below + } else if key.IsSmallInt() || key.IsUInt() { + // translate key + if attributeTranslator == nil { + return nil, WithStack(NeedAttributeTranslatorError) + } + if eq, err := key.translateUnchecked().IsEqualString(attribute); err != nil { + return nil, WithStack(err) + } else if eq { + value, err := key.Next() + return value, WithStack(err) + } + } + + // no match or invalid key type + return nil, nil + } + + // only use binary search for attributes if we have at least this many entries + // otherwise we'll always use the linear search + const SortedSearchEntriesThreshold = ValueLength(4) + + // bool const isSorted = (h >= 0x0b && h <= 0x0e); + if n >= SortedSearchEntriesThreshold && (h >= 0x0b && h <= 0x0e) { + // This means, we have to handle the special case n == 1 only + // in the linear search! + switch offsetSize { + case 1: + result, err := s.searchObjectKeyBinary(attribute, ieBase, n, 1) + return result, WithStack(err) + case 2: + result, err := s.searchObjectKeyBinary(attribute, ieBase, n, 2) + return result, WithStack(err) + case 4: + result, err := s.searchObjectKeyBinary(attribute, ieBase, n, 4) + return result, WithStack(err) + case 8: + result, err := s.searchObjectKeyBinary(attribute, ieBase, n, 8) + return result, WithStack(err) + } + } + + result, err := s.searchObjectKeyLinear(attribute, ieBase, ValueLength(offsetSize), n) + return result, WithStack(err) +} + +// HasKey returns true if the slice is an object that has a given key path. +func (s Slice) HasKey(keyPath ...string) (bool, error) { + if result, err := s.Get(keyPath...); err != nil { + return false, WithStack(err) + } else { + return !result.IsNone(), nil + } +} + +func (s Slice) getFromCompactObject(attribute string) (Slice, error) { + it, err := NewObjectIterator(s) + if err != nil { + return nil, WithStack(err) + } + for it.IsValid() { + key, err := it.Key(false) + if err != nil { + return nil, WithStack(err) + } + k, err := key.makeKey() + if err != nil { + return nil, WithStack(err) + } + if eq, err := k.IsEqualString(attribute); err != nil { + return nil, WithStack(err) + } else if eq { + value, err := key.Next() + return value, WithStack(err) + } + + if err := it.Next(); err != nil { + return nil, WithStack(err) + } + } + // not found + return nil, nil +} + +func (s Slice) findDataOffset(head byte) ValueLength { + // Must be called for a nonempty array or object at start(): + vpackAssert(head <= 0x12) + fsm := firstSubMap[head] + if fsm <= 2 && s[2] != 0 { + return 2 + } + if fsm <= 3 && s[3] != 0 { + return 3 + } + if fsm <= 5 && s[5] != 0 { + return 5 + } + return 9 +} + +// get the offset for the nth member from an Array or Object type +func (s Slice) getNthOffset(index ValueLength) (ValueLength, error) { + vpackAssert(s.IsArray() || s.IsObject()) + + h := s.head() + + if h == 0x13 || h == 0x14 { + // compact Array or Object + l, err := s.getNthOffsetFromCompact(index) + if err != nil { + return 0, WithStack(err) + } + return l, nil + } + + if h == 0x01 || h == 0x0a { + // special case: empty Array or empty Object + return 0, WithStack(IndexOutOfBoundsError) + } + + offsetSize := indexEntrySize(h) + end := ValueLength(readIntegerNonEmpty(s[1:], offsetSize)) + + dataOffset := ValueLength(0) + + // find the number of items + var n ValueLength + if h <= 0x05 { // No offset table or length, need to compute: + dataOffset = s.findDataOffset(h) + first := Slice(s[dataOffset:]) + s, err := first.ByteSize() + if err != nil { + return 0, WithStack(err) + } + if s == 0 { + return 0, WithStack(InternalError) + } + n = (end - dataOffset) / s + } else if offsetSize < 8 { + n = ValueLength(readIntegerNonEmpty(s[1+offsetSize:], offsetSize)) + } else { + n = ValueLength(readIntegerNonEmpty(s[end-ValueLength(offsetSize):], offsetSize)) + } + + if index >= n { + return 0, WithStack(IndexOutOfBoundsError) + } + + // empty array case was already covered + vpackAssert(n > 0) + + if h <= 0x05 || n == 1 { + // no index table, but all array items have the same length + // now fetch first item and determine its length + if dataOffset == 0 { + dataOffset = s.findDataOffset(h) + } + sliceAtDataOffset := Slice(s[dataOffset:]) + sliceAtDataOffsetByteSize, err := sliceAtDataOffset.ByteSize() + if err != nil { + return 0, WithStack(err) + } + return dataOffset + index*sliceAtDataOffsetByteSize, nil + } + + offsetSize8Or0 := ValueLength(0) + if offsetSize == 8 { + offsetSize8Or0 = 8 + } + ieBase := end - n*ValueLength(offsetSize) + index*ValueLength(offsetSize) - (offsetSize8Or0) + return ValueLength(readIntegerNonEmpty(s[ieBase:], offsetSize)), nil +} + +// get the offset for the nth member from a compact Array or Object type +func (s Slice) getNthOffsetFromCompact(index ValueLength) (ValueLength, error) { + end := ValueLength(readVariableValueLength(s, 1, false)) + n := ValueLength(readVariableValueLength(s, end-1, true)) + if index >= n { + return 0, WithStack(IndexOutOfBoundsError) + } + + h := s.head() + offset := ValueLength(1 + getVariableValueLength(end)) + current := ValueLength(0) + for current != index { + sliceAtOffset := Slice(s[offset:]) + sliceAtOffsetByteSize, err := sliceAtOffset.ByteSize() + if err != nil { + return 0, WithStack(err) + } + offset += sliceAtOffsetByteSize + if h == 0x14 { + sliceAtOffset := Slice(s[offset:]) + sliceAtOffsetByteSize, err := sliceAtOffset.ByteSize() + if err != nil { + return 0, WithStack(err) + } + offset += sliceAtOffsetByteSize + } + current++ + } + return offset, nil +} + +// extract the nth member from an Array +func (s Slice) getNth(index ValueLength) (Slice, error) { + vpackAssert(s.IsArray()) + + offset, err := s.getNthOffset(index) + if err != nil { + return nil, WithStack(err) + } + return Slice(s[offset:]), nil +} + +// getNthKey extract the nth member from an Object +func (s Slice) getNthKey(index ValueLength, translate bool) (Slice, error) { + vpackAssert(s.Type() == Object) + + offset, err := s.getNthOffset(index) + if err != nil { + return nil, WithStack(err) + } + result := Slice(s[offset:]) + if translate { + result, err = result.makeKey() + if err != nil { + return nil, WithStack(err) + } + } + return result, nil +} + +// getNthValue extract the nth value from an Object +func (s Slice) getNthValue(index ValueLength) (Slice, error) { + key, err := s.getNthKey(index, false) + if err != nil { + return nil, WithStack(err) + } + value, err := key.Next() + return value, WithStack(err) +} + +func (s Slice) makeKey() (Slice, error) { + if s.IsString() { + return s, nil + } + if s.IsSmallInt() || s.IsUInt() { + if attributeTranslator == nil { + return nil, WithStack(NeedAttributeTranslatorError) + } + return s.translateUnchecked(), nil + } + + return nil, InvalidTypeError{"Cannot translate key of this type"} +} + +// perform a linear search for the specified attribute inside an Object +func (s Slice) searchObjectKeyLinear(attribute string, ieBase, offsetSize, n ValueLength) (Slice, error) { + useTranslator := attributeTranslator != nil + + for index := ValueLength(0); index < n; index++ { + offset := ValueLength(ieBase + index*offsetSize) + key := Slice(s[readIntegerNonEmpty(s[offset:], uint(offsetSize)):]) + + if key.IsString() { + if eq, err := key.IsEqualString(attribute); err != nil { + return nil, WithStack(err) + } else if !eq { + continue + } + } else if key.IsSmallInt() || key.IsUInt() { + // translate key + if !useTranslator { + // no attribute translator + return nil, WithStack(NeedAttributeTranslatorError) + } + if eq, err := key.translateUnchecked().IsEqualString(attribute); err != nil { + return nil, WithStack(err) + } else if !eq { + continue + } + } else { + // invalid key type + return nil, nil + } + + // key is identical. now return value + value, err := key.Next() + return value, WithStack(err) + } + + // nothing found + return nil, nil +} + +// perform a binary search for the specified attribute inside an Object +//template +func (s Slice) searchObjectKeyBinary(attribute string, ieBase ValueLength, n ValueLength, offsetSize ValueLength) (Slice, error) { + useTranslator := attributeTranslator != nil + vpackAssert(n > 0) + + l := ValueLength(0) + r := ValueLength(n - 1) + index := ValueLength(r / 2) + + for { + offset := ValueLength(ieBase + index*offsetSize) + key := Slice(s[readIntegerFixed(s[offset:], uint(offsetSize)):]) + + var res int + var err error + if key.IsString() { + res, err = key.CompareString(attribute) + if err != nil { + return nil, WithStack(err) + } + } else if key.IsSmallInt() || key.IsUInt() { + // translate key + if !useTranslator { + // no attribute translator + return nil, WithStack(NeedAttributeTranslatorError) + } + res, err = key.translateUnchecked().CompareString(attribute) + if err != nil { + return nil, WithStack(err) + } + } else { + // invalid key + return nil, nil + } + + if res == 0 { + // found. now return a Slice pointing at the value + keySize, err := key.ByteSize() + if err != nil { + return nil, WithStack(err) + } + return Slice(key[keySize:]), nil + } + + if res > 0 { + if index == 0 { + return nil, nil + } + r = index - 1 + } else { + l = index + 1 + } + if r < l { + return nil, nil + } + + // determine new midpoint + index = l + ((r - l) / 2) + } +} + +// translates an integer key into a string +func (s Slice) translate() (Slice, error) { + if !s.IsSmallInt() && !s.IsUInt() { + return nil, WithStack(InvalidTypeError{"Cannot translate key of this type"}) + } + if attributeTranslator == nil { + return nil, WithStack(NeedAttributeTranslatorError) + } + return s.translateUnchecked(), nil +} + +// return the value for a UInt object, without checks! +// returns 0 for invalid values/types +func (s Slice) getUIntUnchecked() uint64 { + h := s.head() + if h >= 0x28 && h <= 0x2f { + // UInt + return readIntegerNonEmpty(s[1:], uint(h-0x27)) + } + + if h >= 0x30 && h <= 0x39 { + // Smallint >= 0 + return uint64(h - 0x30) + } + return 0 +} + +// translates an integer key into a string, without checks +func (s Slice) translateUnchecked() Slice { + id := s.getUIntUnchecked() + key := attributeTranslator.IDToString(id) + if key == "" { + return nil + } + return StringSlice(key) +} diff --git a/deps/github.com/arangodb/go-velocypack/slice_factory.go b/deps/github.com/arangodb/go-velocypack/slice_factory.go new file mode 100644 index 000000000..39d4d4f42 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/slice_factory.go @@ -0,0 +1,69 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License} +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import "encoding/binary" + +// NoneSlice creates a slice of type None +func NoneSlice() Slice { return Slice{0x00} } + +// IllegalSlice creates a slice of type Illegal +func IllegalSlice() Slice { return Slice{0x17} } + +// NullSlice creates a slice of type Null +func NullSlice() Slice { return Slice{0x18} } + +// FalseSlice creates a slice of type Boolean with false value +func FalseSlice() Slice { return Slice{0x19} } + +// TrueSlice creates a slice of type Boolean with true value +func TrueSlice() Slice { return Slice{0x1a} } + +// ZeroSlice creates a slice of type Smallint(0) +func ZeroSlice() Slice { return Slice{0x30} } + +// EmptyArraySlice creates a slice of type Array, empty +func EmptyArraySlice() Slice { return Slice{0x01} } + +// EmptyObjectSlice creates a slice of type Object, empty +func EmptyObjectSlice() Slice { return Slice{0x0a} } + +// MinKeySlice creates a slice of type MinKey +func MinKeySlice() Slice { return Slice{0x1e} } + +// MaxKeySlice creates a slice of type MaxKey +func MaxKeySlice() Slice { return Slice{0x1f} } + +// StringSlice creates a slice of type String with given string value +func StringSlice(s string) Slice { + raw := []byte(s) + l := len(raw) + if l <= 126 { + return Slice(append([]byte{byte(0x40 + l)}, raw...)) + } + buf := make([]byte, 1+8+l) + buf[0] = 0xbf + binary.LittleEndian.PutUint64(buf[1:], uint64(l)) + copy(buf[1+8:], raw) + return buf +} diff --git a/deps/github.com/arangodb/go-velocypack/slice_merge.go b/deps/github.com/arangodb/go-velocypack/slice_merge.go new file mode 100644 index 000000000..be9a77578 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/slice_merge.go @@ -0,0 +1,99 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +// Merge creates a slice that contains all fields from all given slices. +// When a field exists (with same name) in an earlier slice, it is ignored. +// All slices must be objects. +func Merge(slices ...Slice) (Slice, error) { + // Calculate overall length + l := ValueLength(0) + for _, s := range slices { + if err := s.AssertType(Object); err != nil { + return nil, WithStack(err) + } + byteSize, err := s.ByteSize() + if err != nil { + return nil, WithStack(err) + } + l += byteSize + } + + if len(slices) == 1 { + // Fast path, only 1 slice + return slices[0], nil + } + + // Create a buffer to hold all slices. + b := NewBuilder(uint(l)) + keys := make(map[string]struct{}) + if err := b.OpenObject(); err != nil { + return nil, WithStack(err) + } + for _, s := range slices { + it, err := NewObjectIterator(s, true) + if err != nil { + return nil, WithStack(err) + } + for it.IsValid() { + keySlice, err := it.Key(true) + if err != nil { + return nil, WithStack(err) + } + key, err := keySlice.GetString() + if err != nil { + return nil, WithStack(err) + } + if _, found := keys[key]; !found { + // Record key + keys[key] = struct{}{} + + // Fetch value + value, err := it.Value() + if err != nil { + return nil, WithStack(err) + } + + // Add key,value + if err := b.addInternalKeyValue(key, NewSliceValue(value)); err != nil { + return nil, WithStack(err) + } + } + + // Move to next field + if err := it.Next(); err != nil { + return nil, WithStack(err) + } + } + } + if err := b.Close(); err != nil { + return nil, WithStack(err) + } + + // Return slice + result, err := b.Slice() + if err != nil { + return nil, WithStack(err) + } + return result, nil +} diff --git a/deps/github.com/arangodb/go-velocypack/slice_reader.go b/deps/github.com/arangodb/go-velocypack/slice_reader.go new file mode 100644 index 000000000..7baa365c9 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/slice_reader.go @@ -0,0 +1,197 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "bufio" + "io" +) + +const ( + maxByteSizeBytes = 16 +) + +// SliceFromReader reads a slice from the given reader. +func SliceFromReader(r io.Reader) (Slice, error) { + if r, ok := r.(*bufio.Reader); ok { + // Buffered reader can use faster path. + return sliceFromBufReader(r) + } + hdr := make(Slice, 1, maxByteSizeBytes) + // Read first byte + if err := readBytes(hdr, r); err != nil { + if Cause(err) == io.EOF { + // Empty slice + return nil, nil + } + return nil, WithStack(err) + } + // Lookup first size + // check if the type has a fixed length first + l := fixedTypeLengths[hdr[0]] + if l != 0 { + // Found fixed length, read it (minus byte already read) + s := make(Slice, l) + s[0] = hdr[0] + if err := readBytes(s[1:], r); err != nil { + return nil, WithStack(err) + } + return s, nil + } + + readRemaining := func(prefix Slice, l ValueLength) (Slice, error) { + s := make(Slice, l) + copy(s, prefix) + if err := readBytes(s[len(prefix):], r); err != nil { + return nil, WithStack(err) + } + return s, nil + } + + // types with dynamic lengths need special treatment: + h := hdr[0] + switch hdr.Type() { + case Array, Object: + if h == 0x13 || h == 0x14 { + // compact Array or Object + l, bytes, err := readVariableValueLengthFromReader(r, false) + if err != nil { + return nil, WithStack(err) + } + return readRemaining(append(hdr, bytes...), l) + } + + vpackAssert(h > 0x00 && h <= 0x0e) + l, bytes, err := readIntegerNonEmptyFromReader(r, widthMap[h]) + if err != nil { + return nil, WithStack(err) + } + return readRemaining(append(hdr, bytes...), ValueLength(l)) + + case String: + vpackAssert(h == 0xbf) + + // long UTF-8 String + l, bytes, err := readIntegerFixedFromReader(r, 8) + if err != nil { + return nil, WithStack(err) + } + return readRemaining(append(hdr, bytes...), ValueLength(l+1+8)) + + case Binary: + vpackAssert(h >= 0xc0 && h <= 0xc7) + x, bytes, err := readIntegerNonEmptyFromReader(r, uint(h)-0xbf) + if err != nil { + return nil, WithStack(err) + } + l := ValueLength(1 + ValueLength(h) - 0xbf + ValueLength(x)) + return readRemaining(append(hdr, bytes...), l) + + case BCD: + if h <= 0xcf { + // positive BCD + vpackAssert(h >= 0xc8 && h < 0xcf) + x, bytes, err := readIntegerNonEmptyFromReader(r, uint(h)-0xc7) + if err != nil { + return nil, WithStack(err) + } + l := ValueLength(1 + ValueLength(h) - 0xc7 + ValueLength(x)) + return readRemaining(append(hdr, bytes...), l) + } + + // negative BCD + vpackAssert(h >= 0xd0 && h < 0xd7) + x, bytes, err := readIntegerNonEmptyFromReader(r, uint(h)-0xcf) + if err != nil { + return nil, WithStack(err) + } + l := ValueLength(1 + ValueLength(h) - 0xcf + ValueLength(x)) + return readRemaining(append(hdr, bytes...), l) + + case Custom: + vpackAssert(h >= 0xf4) + switch h { + case 0xf4, 0xf5, 0xf6: + x, bytes, err := readIntegerFixedFromReader(r, 1) + if err != nil { + return nil, WithStack(err) + } + l := ValueLength(2 + x) + return readRemaining(append(hdr, bytes...), l) + case 0xf7, 0xf8, 0xf9: + x, bytes, err := readIntegerFixedFromReader(r, 2) + if err != nil { + return nil, WithStack(err) + } + l := ValueLength(3 + x) + return readRemaining(append(hdr, bytes...), l) + case 0xfa, 0xfb, 0xfc: + x, bytes, err := readIntegerFixedFromReader(r, 4) + if err != nil { + return nil, WithStack(err) + } + l := ValueLength(5 + x) + return readRemaining(append(hdr, bytes...), l) + case 0xfd, 0xfe, 0xff: + x, bytes, err := readIntegerFixedFromReader(r, 8) + if err != nil { + return nil, WithStack(err) + } + l := ValueLength(9 + x) + return readRemaining(append(hdr, bytes...), l) + } + } + + return nil, WithStack(InternalError) +} + +// sliceFromBufReader reads a slice from the given buffered reader. +func sliceFromBufReader(r *bufio.Reader) (Slice, error) { + // ByteSize is always found within first 16 bytes + hdr, err := r.Peek(maxByteSizeBytes) + if len(hdr) == 0 && err != nil { + if Cause(err) == io.EOF { + // Empty slice + return nil, nil + } + return nil, WithStack(err) + } + s := Slice(hdr) + size, err := s.ByteSize() + if err != nil { + return nil, WithStack(err) + } + // Now that we know the size, read the entire slice + buf := make(Slice, size) + offset := 0 + bytesRead := 0 + for ValueLength(bytesRead) < size { + n, err := r.Read(buf[offset:]) + bytesRead += n + offset += n + if err != nil && ValueLength(bytesRead) < size { + return nil, WithStack(err) + } + } + return buf, nil +} diff --git a/deps/github.com/arangodb/go-velocypack/slice_type.go b/deps/github.com/arangodb/go-velocypack/slice_type.go new file mode 100644 index 000000000..a49b2fc08 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/slice_type.go @@ -0,0 +1,135 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import "fmt" + +// Type returns the vpack type of the slice +func (s Slice) Type() ValueType { + return typeMap[s.head()] +} + +// IsType returns true when the vpack type of the slice is equal to the given type. +// Returns false otherwise. +func (s Slice) IsType(t ValueType) bool { + return typeMap[s.head()] == t +} + +// AssertType returns an error when the vpack type of the slice different from the given type. +// Returns nil otherwise. +func (s Slice) AssertType(t ValueType) error { + if found := typeMap[s.head()]; found != t { + return WithStack(InvalidTypeError{Message: fmt.Sprintf("expected type '%s', got '%s'", t, found)}) + } + return nil +} + +// AssertTypeAny returns an error when the vpack type of the slice different from all of the given types. +// Returns nil otherwise. +func (s Slice) AssertTypeAny(t ...ValueType) error { + found := typeMap[s.head()] + for _, x := range t { + if x == found { + return nil + } + } + return WithStack(InvalidTypeError{Message: fmt.Sprintf("expected types '%q', got '%s'", t, found)}) +} + +// IsNone returns true if slice is a None object +func (s Slice) IsNone() bool { return s.IsType(None) } + +// IsIllegal returns true if slice is an Illegal object +func (s Slice) IsIllegal() bool { return s.IsType(Illegal) } + +// IsNull returns true if slice is a Null object +func (s Slice) IsNull() bool { return s.IsType(Null) } + +// IsBool returns true if slice is a Bool object +func (s Slice) IsBool() bool { return s.IsType(Bool) } + +// IsTrue returns true if slice is the Boolean value true +func (s Slice) IsTrue() bool { return s.head() == 0x1a } + +// IsFalse returns true if slice is the Boolean value false +func (s Slice) IsFalse() bool { return s.head() == 0x19 } + +// IsArray returns true if slice is an Array object +func (s Slice) IsArray() bool { return s.IsType(Array) } + +// IsEmptyArray tests whether the Slice is an empty array +func (s Slice) IsEmptyArray() bool { return s.head() == 0x01 } + +// IsObject returns true if slice is an Object object +func (s Slice) IsObject() bool { return s.IsType(Object) } + +// IsEmptyObject tests whether the Slice is an empty object +func (s Slice) IsEmptyObject() bool { return s.head() == 0x0a } + +// IsDouble returns true if slice is a Double object +func (s Slice) IsDouble() bool { return s.IsType(Double) } + +// IsUTCDate returns true if slice is a UTCDate object +func (s Slice) IsUTCDate() bool { return s.IsType(UTCDate) } + +// IsExternal returns true if slice is an External object +func (s Slice) IsExternal() bool { return s.IsType(External) } + +// IsMinKey returns true if slice is a MinKey object +func (s Slice) IsMinKey() bool { return s.IsType(MinKey) } + +// IsMaxKey returns true if slice is a MaxKey object +func (s Slice) IsMaxKey() bool { return s.IsType(MaxKey) } + +// IsInt returns true if slice is an Int object +func (s Slice) IsInt() bool { return s.IsType(Int) } + +// IsUInt returns true if slice is a UInt object +func (s Slice) IsUInt() bool { return s.IsType(UInt) } + +// IsSmallInt returns true if slice is a SmallInt object +func (s Slice) IsSmallInt() bool { return s.IsType(SmallInt) } + +// IsString returns true if slice is a String object +func (s Slice) IsString() bool { return s.IsType(String) } + +// IsBinary returns true if slice is a Binary object +func (s Slice) IsBinary() bool { return s.IsType(Binary) } + +// IsBCD returns true if slice is a BCD +func (s Slice) IsBCD() bool { return s.IsType(BCD) } + +// IsCustom returns true if slice is a Custom type +func (s Slice) IsCustom() bool { return s.IsType(Custom) } + +// IsInteger returns true if a slice is any decimal number type +func (s Slice) IsInteger() bool { return s.IsInt() || s.IsUInt() || s.IsSmallInt() } + +// IsNumber returns true if slice is any Number-type object +func (s Slice) IsNumber() bool { return s.IsInteger() || s.IsDouble() } + +// IsSorted returns true if slice is an object with table offsets, sorted by attribute name +func (s Slice) IsSorted() bool { + h := s.head() + return (h >= 0x0b && h <= 0x0e) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/array_iterator_test.go b/deps/github.com/arangodb/go-velocypack/test/array_iterator_test.go new file mode 100644 index 000000000..d39c4727d --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/array_iterator_test.go @@ -0,0 +1,73 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "strings" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestArrayIteratorInvalidSlice(t *testing.T) { + tests := []velocypack.Slice{ + velocypack.NullSlice(), + velocypack.TrueSlice(), + velocypack.FalseSlice(), + mustSlice(velocypack.ParseJSONFromString("1")), + mustSlice(velocypack.ParseJSONFromString("7.7")), + mustSlice(velocypack.ParseJSONFromString("\"foo\"")), + mustSlice(velocypack.ParseJSONFromString("{}")), + mustSlice(velocypack.ParseJSONFromString("{}", velocypack.ParserOptions{BuildUnindexedObjects: true})), + } + for _, test := range tests { + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(velocypack.NewArrayIterator(test)) + } +} + +func TestArrayIteratorValues(t *testing.T) { + tests := [][]string{ + []string{}, + []string{"1", "2", "true", "null", "false", "{}"}, + } + for _, unindexed := range []bool{true, false} { + for _, test := range tests { + json := "[" + strings.Join(test, ",") + "]" + s := mustSlice(velocypack.ParseJSONFromString(json, velocypack.ParserOptions{BuildUnindexedArrays: unindexed})) + it, err := velocypack.NewArrayIterator(s) + if err != nil { + t.Errorf("Failed to create ArrayIterator for '%s': %v", json, err) + } else { + i := 0 + for it.IsValid() { + v := mustSlice(it.Value()) + if mustString(v.JSONString()) != test[i] { + t.Errorf("Element %d is invalid; got '%s', expected '%s'", i, mustString(v.JSONString()), test[i]) + } + must(it.Next()) + i++ + } + } + } + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/assert.go b/deps/github.com/arangodb/go-velocypack/test/assert.go new file mode 100644 index 000000000..8ad4c0eb8 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/assert.go @@ -0,0 +1,92 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "fmt" + "reflect" + "runtime" + "strings" + "testing" +) + +func ASSERT_EQ(a, b interface{}, t *testing.T) { + as, asOk := a.(string) + bs, bsOk := b.(string) + if asOk && bsOk { + if strings.Compare(as, bs) != 0 { + t.Errorf("Expected '%s', '%s' to be equal\nat %s", as, bs, callerInfo(2)) + } + } else if !reflect.DeepEqual(a, b) { + t.Errorf("Expected %v, %v to be equal\nat %s", a, b, callerInfo(2)) + } +} + +func ASSERT_DOUBLE_EQ(a, b float64, t *testing.T) { + if a != b { + t.Errorf("Expected %v, %v to be equal\nat %s", a, b, callerInfo(2)) + } +} + +func ASSERT_TRUE(a bool, t *testing.T) { + if !a { + t.Errorf("Expected true\nat %s", callerInfo(2)) + } +} + +func ASSERT_FALSE(a bool, t *testing.T) { + if a { + t.Errorf("Expected false\nat %s", callerInfo(2)) + } +} + +func ASSERT_NIL(a interface{}, t *testing.T) { + if a != nil { + t.Errorf("Expected nil, got %v\nat %s", a, callerInfo(2)) + } +} + +func ASSERT_VELOCYPACK_EXCEPTION(errorType func(error) bool, t *testing.T) func(args ...interface{}) { + return func(args ...interface{}) { + l := len(args) + if l == 0 { + t.Fatalf("Expected at least 1 error argument\nat %s", callerInfo(2)) + } + last := args[l-1] + if last == nil { + t.Errorf("Expected error, got nil\nat %s", callerInfo(2)) + } else if err, ok := last.(error); !ok { + t.Fatalf("Expected last argument to be of type error, got %v\nat %s", args[l-1], callerInfo(2)) + } else if !errorType(err) { + t.Errorf("Expected error, got %s\nat %s", err, callerInfo(2)) + } + } +} + +func callerInfo(depth int) string { + _, file, line, ok := runtime.Caller(depth) + if !ok { + return "?" + } + return fmt.Sprintf("%s (%d)", file, line) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/attribute_translator_test.go b/deps/github.com/arangodb/go-velocypack/test/attribute_translator_test.go new file mode 100644 index 000000000..709861e63 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/attribute_translator_test.go @@ -0,0 +1,199 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestAttributeTranslator1(t *testing.T) { + tests := map[uint8]string{ + 1: "_key", + 2: "_rev", + 3: "_id", + 4: "_from", + 5: "_to", + 6: "6", + } + for id, name := range tests { + // Simple object with only 1 field + slice := velocypack.Slice{0x0b, + 0x07, // Bytesize + 0x01, // NoItems + 0x28, id, 0x1a, // "_xyz": true + 0x03, // Index of "_xyz" + } + + a := mustSlice(slice.Get(name)) + ASSERT_EQ(velocypack.Bool, a.Type(), t) + ASSERT_TRUE(mustBool(a.GetBool()), t) + } +} + +func TestAttributeTranslatorObject(t *testing.T) { + // Normal object with multiple fields + slice := velocypack.Slice{0x0b, + 0x00, // Bytesize + 0x05, // NoItems + 0x28, 0x01, 0x1a, // "_key": true + 0x28, 0x02, 0x19, // "_rev": false + 0x28, 0x03, 0x01, // "_id": [] + 0x28, 0x04, 0x18, // "_from": null + 0x28, 0x05, 0x0a, // "_to": {} + 12, 9, 3, 6, 15, // Index of "_from", "_id", "_key", "_rev", "_to" + } + slice[1] = byte(len(slice)) + + ASSERT_EQ("_key", mustString(mustSlice(slice.KeyAt(2, true)).GetString()), t) + key := mustSlice(slice.Get("_key")) + ASSERT_EQ(velocypack.Bool, key.Type(), t) + ASSERT_TRUE(mustBool(key.GetBool()), t) + + ASSERT_EQ("_rev", mustString(mustSlice(slice.KeyAt(3, true)).GetString()), t) + rev := mustSlice(slice.Get("_rev")) + ASSERT_EQ(velocypack.Bool, rev.Type(), t) + ASSERT_FALSE(mustBool(rev.GetBool()), t) + + ASSERT_EQ("_id", mustString(mustSlice(slice.KeyAt(1, true)).GetString()), t) + id := mustSlice(slice.Get("_id")) + ASSERT_EQ(velocypack.Array, id.Type(), t) + + ASSERT_EQ("_from", mustString(mustSlice(slice.KeyAt(0, true)).GetString()), t) + from := mustSlice(slice.Get("_from")) + ASSERT_EQ(velocypack.Null, from.Type(), t) + + ASSERT_EQ("_to", mustString(mustSlice(slice.KeyAt(4, true)).GetString()), t) + to := mustSlice(slice.Get("_to")) + ASSERT_EQ(velocypack.Object, to.Type(), t) +} + +func TestAttributeTranslatorObjectSmallInt(t *testing.T) { + // Normal object with multiple fields + slice := velocypack.Slice{0x0b, + 0x00, // Bytesize + 0x05, // NoItems + 0x31, 0x1a, // "_key": true + 0x32, 0x19, // "_rev": false + 0x33, 0x01, // "_id": [] + 0x34, 0x18, // "_from": null + 0x35, 0x0a, // "_to": {} + 9, 7, 3, 5, 11, // Index of "_from", "_id", "_key", "_rev", "_to" + } + slice[1] = byte(len(slice)) + + ASSERT_EQ("_key", mustString(mustSlice(slice.KeyAt(2, true)).GetString()), t) + key := mustSlice(slice.Get("_key")) + ASSERT_EQ(velocypack.Bool, key.Type(), t) + ASSERT_TRUE(mustBool(key.GetBool()), t) + + ASSERT_EQ("_rev", mustString(mustSlice(slice.KeyAt(3, true)).GetString()), t) + rev := mustSlice(slice.Get("_rev")) + ASSERT_EQ(velocypack.Bool, rev.Type(), t) + ASSERT_FALSE(mustBool(rev.GetBool()), t) + + ASSERT_EQ("_id", mustString(mustSlice(slice.KeyAt(1, true)).GetString()), t) + id := mustSlice(slice.Get("_id")) + ASSERT_EQ(velocypack.Array, id.Type(), t) + + ASSERT_EQ("_from", mustString(mustSlice(slice.KeyAt(0, true)).GetString()), t) + from := mustSlice(slice.Get("_from")) + ASSERT_EQ(velocypack.Null, from.Type(), t) + + ASSERT_EQ("_to", mustString(mustSlice(slice.KeyAt(4, true)).GetString()), t) + to := mustSlice(slice.Get("_to")) + ASSERT_EQ(velocypack.Object, to.Type(), t) +} + +func TestAttributeTranslatorCompactObject(t *testing.T) { + // Compact object with multiple fields + slice := velocypack.Slice{0x14, + 0x00, // Bytesize + 0x28, 0x01, 0x1a, // "_key": true + 0x28, 0x02, 0x19, // "_rev": false + 0x28, 0x03, 0x01, // "_id": [] + 0x28, 0x04, 0x18, // "_from": null + 0x28, 0x05, 0x0a, // "_to": {} + 0x05, // NoItems + } + slice[1] = byte(len(slice)) + + ASSERT_EQ("_key", mustString(mustSlice(slice.KeyAt(0, true)).GetString()), t) + key := mustSlice(slice.Get("_key")) + ASSERT_EQ(velocypack.Bool, key.Type(), t) + ASSERT_TRUE(mustBool(key.GetBool()), t) + + ASSERT_EQ("_rev", mustString(mustSlice(slice.KeyAt(1, true)).GetString()), t) + rev := mustSlice(slice.Get("_rev")) + ASSERT_EQ(velocypack.Bool, rev.Type(), t) + ASSERT_FALSE(mustBool(rev.GetBool()), t) + + ASSERT_EQ("_id", mustString(mustSlice(slice.KeyAt(2, true)).GetString()), t) + id := mustSlice(slice.Get("_id")) + ASSERT_EQ(velocypack.Array, id.Type(), t) + + ASSERT_EQ("_from", mustString(mustSlice(slice.KeyAt(3, true)).GetString()), t) + from := mustSlice(slice.Get("_from")) + ASSERT_EQ(velocypack.Null, from.Type(), t) + + ASSERT_EQ("_to", mustString(mustSlice(slice.KeyAt(4, true)).GetString()), t) + to := mustSlice(slice.Get("_to")) + ASSERT_EQ(velocypack.Object, to.Type(), t) +} + +func TestAttributeTranslatorCompactObjectSmallInt(t *testing.T) { + // Compact object with multiple fields + slice := velocypack.Slice{0x14, + 0x00, // Bytesize + 0x31, 0x1a, // "_key": true + 0x32, 0x19, // "_rev": false + 0x33, 0x01, // "_id": [] + 0x34, 0x18, // "_from": null + 0x35, 0x0a, // "_to": {} + 0x05, // NoItems + } + slice[1] = byte(len(slice)) + + ASSERT_EQ("_key", mustString(mustSlice(slice.KeyAt(0, true)).GetString()), t) + key := mustSlice(slice.Get("_key")) + ASSERT_EQ(velocypack.Bool, key.Type(), t) + ASSERT_TRUE(mustBool(key.GetBool()), t) + + ASSERT_EQ("_rev", mustString(mustSlice(slice.KeyAt(1, true)).GetString()), t) + rev := mustSlice(slice.Get("_rev")) + ASSERT_EQ(velocypack.Bool, rev.Type(), t) + ASSERT_FALSE(mustBool(rev.GetBool()), t) + + ASSERT_EQ("_id", mustString(mustSlice(slice.KeyAt(2, true)).GetString()), t) + id := mustSlice(slice.Get("_id")) + ASSERT_EQ(velocypack.Array, id.Type(), t) + + ASSERT_EQ("_from", mustString(mustSlice(slice.KeyAt(3, true)).GetString()), t) + from := mustSlice(slice.Get("_from")) + ASSERT_EQ(velocypack.Null, from.Type(), t) + + ASSERT_EQ("_to", mustString(mustSlice(slice.KeyAt(4, true)).GetString()), t) + to := mustSlice(slice.Get("_to")) + ASSERT_EQ(velocypack.Object, to.Type(), t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/benchmark_builder_test.go b/deps/github.com/arangodb/go-velocypack/test/benchmark_builder_test.go new file mode 100644 index 000000000..476c91f13 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/benchmark_builder_test.go @@ -0,0 +1,73 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func BenchmarkBuilderString(b *testing.B) { + for i := 0; i < b.N; i++ { + builder := velocypack.NewBuilder(64) + builder.AddValue(velocypack.NewStringValue("Some string")) + if _, err := builder.Slice(); err != nil { + b.Errorf("Slice failed: %v", err) + } + } +} + +func BenchmarkBuilderObject1(b *testing.B) { + for i := 0; i < b.N; i++ { + builder := velocypack.Builder{} + builder.OpenObject() + builder.AddKeyValue("Name", velocypack.NewStringValue("John Doe")) + builder.AddKeyValue("Age", velocypack.NewIntValue(42)) + builder.Close() + if _, err := builder.Slice(); err != nil { + b.Errorf("Slice failed: %v", err) + } + } +} + +func BenchmarkBuilderObject2(b *testing.B) { + for i := 0; i < b.N; i++ { + builder := velocypack.Builder{} + builder.OpenObject() + builder.AddKeyValue("Name", velocypack.NewStringValue("John Doe")) + builder.AddKeyValue("FirstName", velocypack.NewStringValue("John")) + builder.AddKeyValue("LastName", velocypack.NewStringValue("Doe")) + builder.AddKeyValue("Age", velocypack.NewIntValue(42)) + builder.AddValue(velocypack.NewStringValue("Address")) + builder.OpenArray() + builder.AddValue(velocypack.NewStringValue("Some street")) + builder.AddValue(velocypack.NewStringValue("Block 123")) + builder.AddValue(velocypack.NewStringValue("South")) + builder.Close() + builder.Close() + if _, err := builder.Slice(); err != nil { + b.Errorf("Slice failed: %v", err) + } + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/benchmark_decoder_test.go b/deps/github.com/arangodb/go-velocypack/test/benchmark_decoder_test.go new file mode 100644 index 000000000..486c5d280 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/benchmark_decoder_test.go @@ -0,0 +1,62 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "encoding/json" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func BenchmarkVPackDecoderObject(b *testing.B) { + b.StopTimer() + slice, err := velocypack.Marshal(benchmarkObjectInput) + if err != nil { + b.Errorf("Marshal failed: %v", err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + var result benchmarkObjectType + if err := velocypack.Unmarshal(slice, &result); err != nil { + b.Errorf("Unmarshal failed: %v", err) + } + } +} + +func BenchmarkJSONDecoderObject(b *testing.B) { + b.StopTimer() + data, err := json.Marshal(benchmarkObjectInput) + if err != nil { + b.Errorf("Marshal failed: %v", err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + var result benchmarkObjectType + if err := json.Unmarshal(data, &result); err != nil { + b.Errorf("Unmarshal failed: %v", err) + } + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/benchmark_encoder_test.go b/deps/github.com/arangodb/go-velocypack/test/benchmark_encoder_test.go new file mode 100644 index 000000000..b9acbe0ea --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/benchmark_encoder_test.go @@ -0,0 +1,70 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "encoding/json" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +type ( + benchmarkObjectType struct { + Name string + FirstName string + LastName string + Age int + Address []string + } +) + +var ( + benchmarkObjectInput = benchmarkObjectType{ + Name: "John Doe", + FirstName: "John", + LastName: "Doe", + Age: 42, + Address: []string{ + "Some street", + "Block 123", + "South", + }, + } +) + +func BenchmarkVPackEncoderObject(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := velocypack.Marshal(benchmarkObjectInput); err != nil { + b.Errorf("Marshal failed: %v", err) + } + } +} + +func BenchmarkJSONEncoderObject(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := json.Marshal(benchmarkObjectInput); err != nil { + b.Errorf("Marshal failed: %v", err) + } + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/builder_array_large_test.go b/deps/github.com/arangodb/go-velocypack/test/builder_array_large_test.go new file mode 100644 index 000000000..828f0c416 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/builder_array_large_test.go @@ -0,0 +1,50 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// +build !nolarge + +package test + +import ( + "math" + "strconv" + "strings" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestBuilderArrayLarge(t *testing.T) { + var obj velocypack.Builder + max := math.MaxInt16 * 2 + expected := make([]string, max) + must(obj.OpenArray()) + for i := 0; i < max; i++ { + must(obj.AddValue(velocypack.NewIntValue(int64(i)))) + expected[i] = strconv.Itoa(i) + } + must(obj.Close()) + objSlice := mustSlice(obj.Slice()) + + expectedJSON := "[" + strings.Join(expected, ",") + "]" + ASSERT_EQ(expectedJSON, mustString(objSlice.JSONString()), t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/builder_array_test.go b/deps/github.com/arangodb/go-velocypack/test/builder_array_test.go new file mode 100644 index 000000000..dd31abab2 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/builder_array_test.go @@ -0,0 +1,454 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "bytes" + "encoding/binary" + "math" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestBuilderEmptyArray(t *testing.T) { + var b velocypack.Builder + b.OpenArray() + b.Close() + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(velocypack.ValueLength(0), mustLength(s.Length()), t) +} + +func TestBuilderArrayEmpty(t *testing.T) { + var b velocypack.Builder + must(b.AddValue(velocypack.NewArrayValue())) + must(b.Close()) + l := mustLength(b.Size()) + result := mustBytes(b.Bytes()) + + correctResult := []byte{0x01} + + ASSERT_EQ(velocypack.ValueLength(len(correctResult)), l, t) + ASSERT_EQ(result, correctResult, t) +} + +func TestBuilderArraySingleEntry(t *testing.T) { + var b velocypack.Builder + must(b.AddValue(velocypack.NewArrayValue())) + must(b.AddValue(velocypack.NewIntValue(1))) + must(b.Close()) + l := mustLength(b.Size()) + result := mustBytes(b.Bytes()) + + correctResult := []byte{0x02, 0x03, 0x31} + + ASSERT_EQ(velocypack.ValueLength(len(correctResult)), l, t) + ASSERT_EQ(result, correctResult, t) +} + +func TestBuilderArraySingleEntryLong(t *testing.T) { + value := "ngdddddljjjjjjjjjjjjjjjjjjjjjjjjjjjjjjjsdddffffffffffffmmmmmmmmmmmmmmmsf" + + "dlllllllllllllllllllllllllllllllllllllllllllllllllrjjjjjjsdddddddddddddd" + + "ddddhhhhhhkkkkkkkksssssssssssssssssssssssssssssssssdddddddddddddddddkkkk" + + "kkkkkkkkksddddddddddddssssssssssfvvvvvvvvvvvvvvvvvvvvvvvvvvvfvgfff" + var b velocypack.Builder + must(b.AddValue(velocypack.NewArrayValue())) + must(b.AddValue(velocypack.NewStringValue(value))) + must(b.Close()) + l := mustLength(b.Size()) + result := mustBytes(b.Bytes()) + + correctResult := []byte{ + 0x03, 0x2c, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbf, 0x1a, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x67, 0x64, 0x64, 0x64, 0x64, + 0x64, 0x6c, 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, + 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, + 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, 0x6a, 0x73, 0x64, 0x64, + 0x64, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x6d, 0x6d, 0x6d, 0x6d, 0x6d, 0x6d, 0x6d, 0x6d, 0x6d, 0x6d, 0x6d, + 0x6d, 0x6d, 0x6d, 0x6d, 0x73, 0x66, 0x64, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, + 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, + 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, + 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, + 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x72, 0x6a, 0x6a, 0x6a, + 0x6a, 0x6a, 0x6a, 0x73, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x68, 0x68, + 0x68, 0x68, 0x68, 0x68, 0x6b, 0x6b, 0x6b, 0x6b, 0x6b, 0x6b, 0x6b, 0x6b, + 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, + 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, + 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x64, 0x64, 0x64, + 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x64, 0x64, 0x6b, 0x6b, 0x6b, 0x6b, 0x6b, 0x6b, 0x6b, 0x6b, 0x6b, 0x6b, + 0x6b, 0x6b, 0x6b, 0x73, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x64, 0x64, 0x64, 0x64, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, + 0x73, 0x73, 0x66, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, + 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, + 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x66, 0x76, 0x67, 0x66, 0x66, 0x66} + + ASSERT_EQ(velocypack.ValueLength(len(correctResult)), l, t) + ASSERT_EQ(result, correctResult, t) +} + +func TestBuilderArraySameSizeEntries(t *testing.T) { + var b velocypack.Builder + must(b.AddValue(velocypack.NewArrayValue())) + must(b.AddValue(velocypack.NewUIntValue(1))) + must(b.AddValue(velocypack.NewUIntValue(2))) + must(b.AddValue(velocypack.NewUIntValue(3))) + must(b.Close()) + l := mustLength(b.Size()) + result := mustBytes(b.Bytes()) + + correctResult := []byte{0x02, 0x05, 0x31, 0x32, 0x33} + + ASSERT_EQ(velocypack.ValueLength(len(correctResult)), l, t) + ASSERT_EQ(result, correctResult, t) +} + +func TestBuilderArraySomeEntries(t *testing.T) { + var b velocypack.Builder + value := 2.3 + must(b.AddValue(velocypack.NewArrayValue())) + must(b.AddValue(velocypack.NewUIntValue(1200))) + must(b.AddValue(velocypack.NewDoubleValue(value))) + must(b.AddValue(velocypack.NewStringValue("abc"))) + must(b.AddValue(velocypack.NewBoolValue(true))) + must(b.Close()) + l := mustLength(b.Size()) + result := mustBytes(b.Bytes()) + + correctResult := []byte{ + 0x06, 0x18, 0x04, 0x29, 0xb0, 0x04, // uint(1200) = 0x4b0 + 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // double(2.3) + 0x43, 0x61, 0x62, 0x63, 0x1a, 0x03, 0x06, 0x0f, 0x13} + binary.LittleEndian.PutUint64(correctResult[7:], math.Float64bits(value)) + + ASSERT_EQ(velocypack.ValueLength(len(correctResult)), l, t) + ASSERT_EQ(result, correctResult, t) +} + +func TestBuilderArrayCompact(t *testing.T) { + var b velocypack.Builder + value := 2.3 + must(b.AddValue(velocypack.NewArrayValue(true))) + must(b.AddValue(velocypack.NewUIntValue(1200))) + must(b.AddValue(velocypack.NewDoubleValue(value))) + must(b.AddValue(velocypack.NewStringValue("abc"))) + must(b.AddValue(velocypack.NewBoolValue(true))) + must(b.Close()) + l := mustLength(b.Size()) + result := mustBytes(b.Bytes()) + + correctResult := []byte{ + 0x13, 0x14, 0x29, 0xb0, 0x04, 0x1b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, // double + 0x43, 0x61, 0x62, 0x63, 0x1a, 0x04} + binary.LittleEndian.PutUint64(correctResult[6:], math.Float64bits(value)) + + ASSERT_EQ(velocypack.ValueLength(len(correctResult)), l, t) + ASSERT_EQ(result, correctResult, t) +} + +func TestBuilderArrayCompactBytesizeBelowThreshold(t *testing.T) { + var b velocypack.Builder + must(b.AddValue(velocypack.NewArrayValue(true))) + for i := uint64(0); i < 124; i++ { + must(b.AddValue(velocypack.NewUIntValue(i % 10))) + } + must(b.Close()) + l := mustLength(b.Size()) + result := mustBytes(b.Bytes()) + + ASSERT_EQ(velocypack.ValueLength(127), l, t) + ASSERT_EQ(byte(0x13), result[0], t) + ASSERT_EQ(byte(0x7f), result[1], t) + for i := uint64(0); i < 124; i++ { + ASSERT_EQ(byte(0x30+(i%10)), result[2+i], t) + } + ASSERT_EQ(byte(0x7c), result[126], t) +} + +func TestBuilderArrayCompactBytesizeAboveThreshold(t *testing.T) { + var b velocypack.Builder + must(b.AddValue(velocypack.NewArrayValue(true))) + for i := uint64(0); i < 125; i++ { + must(b.AddValue(velocypack.NewUIntValue(i % 10))) + } + must(b.Close()) + l := mustLength(b.Size()) + result := mustBytes(b.Bytes()) + + ASSERT_EQ(velocypack.ValueLength(129), l, t) + ASSERT_EQ(byte(0x13), result[0], t) + ASSERT_EQ(byte(0x81), result[1], t) + ASSERT_EQ(byte(0x01), result[2], t) + for i := uint64(0); i < 125; i++ { + ASSERT_EQ(byte(0x30+(i%10)), result[3+i], t) + } + ASSERT_EQ(byte(0x7d), result[128], t) +} + +func TestBuilderArrayCompactLengthBelowThreshold(t *testing.T) { + var b velocypack.Builder + must(b.AddValue(velocypack.NewArrayValue(true))) + for i := uint64(0); i < 127; i++ { + must(b.AddValue(velocypack.NewStringValue("aaa"))) + } + must(b.Close()) + l := mustLength(b.Size()) + result := mustBytes(b.Bytes()) + + ASSERT_EQ(velocypack.ValueLength(512), l, t) + ASSERT_EQ(byte(0x13), result[0], t) + ASSERT_EQ(byte(0x80), result[1], t) + ASSERT_EQ(byte(0x04), result[2], t) + for i := uint64(0); i < 127; i++ { + ASSERT_EQ(byte(0x43), result[3+i*4], t) + } + ASSERT_EQ(byte(0x7f), result[511], t) +} + +func TestBuilderArrayCompactLengthAboveThreshold(t *testing.T) { + var b velocypack.Builder + must(b.AddValue(velocypack.NewArrayValue(true))) + for i := uint64(0); i < 128; i++ { + must(b.AddValue(velocypack.NewStringValue("aaa"))) + } + must(b.Close()) + l := mustLength(b.Size()) + result := mustBytes(b.Bytes()) + + ASSERT_EQ(velocypack.ValueLength(517), l, t) + ASSERT_EQ(byte(0x13), result[0], t) + ASSERT_EQ(byte(0x85), result[1], t) + ASSERT_EQ(byte(0x04), result[2], t) + for i := uint64(0); i < 128; i++ { + ASSERT_EQ(byte(0x43), result[3+i*4], t) + } + ASSERT_EQ(byte(0x01), result[515], t) + ASSERT_EQ(byte(0x80), result[516], t) +} + +func TestBuilderAddObjectInArray(t *testing.T) { + var b velocypack.Builder + b.OpenArray() + b.OpenObject() + b.Close() + b.Close() + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(s.Length()), t) + ss := mustSlice(s.At(0)) + ASSERT_TRUE(ss.IsObject(), t) + ASSERT_EQ(velocypack.ValueLength(0), mustLength(ss.Length()), t) +} + +func TestBuilderAddNonEmptyObjectsInArray(t *testing.T) { + var b velocypack.Builder + must(b.OpenArray()) + for i := 0; i < 5; i++ { + must(b.OpenObject()) + must(b.AddKeyValue("Field1", velocypack.NewIntValue(int64(i+1)))) + must(b.Close()) + } + must(b.Close()) + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(velocypack.ValueLength(5), mustLength(s.Length()), t) + ss := mustSlice(s.At(0)) + ASSERT_TRUE(ss.IsObject(), t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(ss.Length()), t) + ASSERT_EQ(int64(1), mustInt(mustSlice(ss.Get("Field1")).GetInt()), t) + + it := mustArrayIterator(velocypack.NewArrayIterator(s)) + i := 1 + for it.IsValid() { + ss := mustSlice(it.Value()) + ASSERT_TRUE(ss.IsObject(), t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(ss.Length()), t) + ASSERT_EQ(int64(i), mustInt(mustSlice(ss.Get("Field1")).GetInt()), t) + it.Next() + i++ + } +} + +func TestBuilderAddArrayIteratorEmpty(t *testing.T) { + var obj velocypack.Builder + must(obj.OpenArray()) + must(obj.AddValue(velocypack.NewIntValue(1))) + must(obj.AddValue(velocypack.NewIntValue(2))) + must(obj.AddValue(velocypack.NewIntValue(3))) + must(obj.Close()) + objSlice := mustSlice(obj.Slice()) + + var b velocypack.Builder + ASSERT_TRUE(b.IsClosed(), t) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderNeedOpenArray, t)(b.AddValuesFromIterator(mustArrayIterator(velocypack.NewArrayIterator(objSlice)))) + ASSERT_TRUE(b.IsClosed(), t) +} + +func TestBuilderAddArrayIteratorNonArray(t *testing.T) { + var obj velocypack.Builder + must(obj.OpenArray()) + must(obj.AddValue(velocypack.NewIntValue(1))) + must(obj.AddValue(velocypack.NewIntValue(2))) + must(obj.AddValue(velocypack.NewIntValue(3))) + must(obj.Close()) + objSlice := mustSlice(obj.Slice()) + + var b velocypack.Builder + must(b.OpenObject()) + ASSERT_FALSE(b.IsClosed(), t) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderNeedOpenArray, t)(b.AddValuesFromIterator(mustArrayIterator(velocypack.NewArrayIterator(objSlice)))) + ASSERT_FALSE(b.IsClosed(), t) +} + +func TestBuilderAddArrayIteratorTop(t *testing.T) { + var obj velocypack.Builder + must(obj.OpenArray()) + must(obj.AddValue(velocypack.NewIntValue(1))) + must(obj.AddValue(velocypack.NewIntValue(2))) + must(obj.AddValue(velocypack.NewIntValue(3))) + must(obj.Close()) + objSlice := mustSlice(obj.Slice()) + + var b velocypack.Builder + must(b.OpenArray()) + ASSERT_FALSE(b.IsClosed(), t) + must(b.AddValuesFromIterator(mustArrayIterator(velocypack.NewArrayIterator(objSlice)))) + ASSERT_FALSE(b.IsClosed(), t) + must(b.Close()) + result := mustSlice(b.Slice()) + + ASSERT_EQ("[1,2,3]", mustString(result.JSONString()), t) +} + +func TestBuilderAddArrayIteratorReference(t *testing.T) { + var obj velocypack.Builder + must(obj.OpenArray()) + must(obj.AddValue(velocypack.NewIntValue(1))) + must(obj.AddValue(velocypack.NewIntValue(2))) + must(obj.AddValue(velocypack.NewIntValue(3))) + must(obj.Close()) + objSlice := mustSlice(obj.Slice()) + + var b velocypack.Builder + must(b.OpenArray()) + ASSERT_FALSE(b.IsClosed(), t) + must(b.Add(mustArrayIterator(velocypack.NewArrayIterator(objSlice)))) + ASSERT_FALSE(b.IsClosed(), t) + must(b.Close()) + result := mustSlice(b.Slice()) + + ASSERT_EQ("[1,2,3]", mustString(result.JSONString()), t) +} + +func TestBuilderAddArrayIteratorSub(t *testing.T) { + var obj velocypack.Builder + must(obj.OpenArray()) + must(obj.AddValue(velocypack.NewIntValue(1))) + must(obj.AddValue(velocypack.NewIntValue(2))) + must(obj.AddValue(velocypack.NewIntValue(3))) + must(obj.Close()) + objSlice := mustSlice(obj.Slice()) + + var b velocypack.Builder + must(b.OpenArray()) + must(b.AddValue(velocypack.NewStringValue("tennis"))) + must(b.OpenArray()) + must(b.Add(mustArrayIterator(velocypack.NewArrayIterator(objSlice)))) + ASSERT_FALSE(b.IsClosed(), t) + must(b.Close()) // close one level + must(b.AddValue(velocypack.NewStringValue("qux"))) + ASSERT_FALSE(b.IsClosed(), t) + must(b.Close()) + result := mustSlice(b.Slice()) + ASSERT_TRUE(b.IsClosed(), t) + + ASSERT_EQ("[\"tennis\",[1,2,3],\"qux\"]", mustString(result.JSONString()), t) +} + +func TestBuilderAddAndOpenArray(t *testing.T) { + var b1 velocypack.Builder + ASSERT_TRUE(b1.IsClosed(), t) + must(b1.OpenArray()) + ASSERT_FALSE(b1.IsClosed(), t) + must(b1.AddValue(velocypack.NewStringValue("bar"))) + must(b1.Close()) + ASSERT_TRUE(b1.IsClosed(), t) + ASSERT_EQ(byte(0x02), mustSlice(b1.Slice())[0], t) + + var b2 velocypack.Builder + ASSERT_TRUE(b2.IsClosed(), t) + must(b2.OpenArray()) + ASSERT_FALSE(b2.IsClosed(), t) + must(b2.AddValue(velocypack.NewStringValue("bar"))) + must(b2.Close()) + ASSERT_TRUE(b2.IsClosed(), t) + ASSERT_EQ(byte(0x02), mustSlice(b2.Slice())[0], t) +} + +func TestBuilderAddOnNonArray(t *testing.T) { + var b velocypack.Builder + must(b.AddValue(velocypack.NewObjectValue())) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderKeyMustBeString, t)(b.AddValue(velocypack.NewBoolValue(true))) +} + +func TestBuilderIsOpenArray(t *testing.T) { + var b velocypack.Builder + ASSERT_FALSE(b.IsOpenArray(), t) + must(b.OpenArray()) + ASSERT_TRUE(b.IsOpenArray(), t) + must(b.Close()) + ASSERT_FALSE(b.IsOpenArray(), t) +} + +func TestBuilderWriteTo(t *testing.T) { + var b velocypack.Builder + must(b.OpenArray()) + must(b.Close()) + var buf bytes.Buffer + _, err := b.WriteTo(&buf) + ASSERT_NIL(err, t) +} + +func TestBuilderWriteToNotClosed(t *testing.T) { + var b velocypack.Builder + must(b.OpenArray()) + var buf bytes.Buffer + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderNotClosed, t)(b.WriteTo(&buf)) +} + +func TestBuilderClear(t *testing.T) { + var b velocypack.Builder + must(b.OpenArray()) + ASSERT_FALSE(b.IsClosed(), t) + b.Clear() + ASSERT_TRUE(b.IsClosed(), t) + ASSERT_EQ(0, len(mustBytes(b.Bytes())), t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/builder_object_large_test.go b/deps/github.com/arangodb/go-velocypack/test/builder_object_large_test.go new file mode 100644 index 000000000..7dfcb1e44 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/builder_object_large_test.go @@ -0,0 +1,51 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// +build !nolarge + +package test + +import ( + "fmt" + "math" + "strings" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestBuilderObjectLarge(t *testing.T) { + var obj velocypack.Builder + max := math.MaxInt16 * 2 + expected := make([]string, max) + must(obj.OpenObject()) + for i := 0; i < max; i++ { + must(obj.AddValue(velocypack.NewStringValue(fmt.Sprintf("x%06d", i)))) + must(obj.AddValue(velocypack.NewIntValue(int64(i)))) + expected[i] = fmt.Sprintf(`"x%06d":%d`, i, i) + } + must(obj.Close()) + objSlice := mustSlice(obj.Slice()) + + expectedJSON := "{" + strings.Join(expected, ",") + "}" + ASSERT_EQ(expectedJSON, mustString(objSlice.JSONString()), t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/builder_object_test.go b/deps/github.com/arangodb/go-velocypack/test/builder_object_test.go new file mode 100644 index 000000000..e75e6c275 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/builder_object_test.go @@ -0,0 +1,471 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "encoding/binary" + "math" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestBuilderEmptyObject(t *testing.T) { + var b velocypack.Builder + b.OpenObject() + b.Close() + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsObject(), t) + ASSERT_EQ(velocypack.ValueLength(0), mustLength(s.Length()), t) +} + +func TestBuilderObjectEmpty(t *testing.T) { + var b velocypack.Builder + must(b.AddValue(velocypack.NewObjectValue())) + must(b.Close()) + l := mustLength(b.Size()) + result := mustBytes(b.Bytes()) + + correctResult := []byte{0x0a} + + ASSERT_EQ(velocypack.ValueLength(len(correctResult)), l, t) + ASSERT_EQ(result, correctResult, t) +} + +func TestBuilderObjectEmptyCompact(t *testing.T) { + var b velocypack.Builder + must(b.AddValue(velocypack.NewObjectValue(true))) + must(b.Close()) + l := mustLength(b.Size()) + result := mustBytes(b.Bytes()) + + correctResult := []byte{0x0a} + + ASSERT_EQ(velocypack.ValueLength(len(correctResult)), l, t) + ASSERT_EQ(result, correctResult, t) +} + +func TestBuilderObjectSorted(t *testing.T) { + var b velocypack.Builder + value := 2.3 + must(b.AddValue(velocypack.NewObjectValue())) + must(b.AddKeyValue("d", velocypack.NewUIntValue(1200))) + must(b.AddKeyValue("c", velocypack.NewDoubleValue(value))) + must(b.AddKeyValue("b", velocypack.NewStringValue("abc"))) + must(b.AddKeyValue("a", velocypack.NewBoolValue(true))) + must(b.Close()) + l := mustLength(b.Size()) + result := mustBytes(b.Bytes()) + + correctResult := []byte{ + 0x0b, 0x20, 0x04, 0x41, 0x64, 0x29, 0xb0, 0x04, // "d": uint(1200) = + // 0x4b0 + 0x41, 0x63, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // "c": double(2.3) + 0x41, 0x62, 0x43, 0x61, 0x62, 0x63, // "b": "abc" + 0x41, 0x61, 0x1a, // "a": true + 0x19, 0x13, 0x08, 0x03} + binary.LittleEndian.PutUint64(correctResult[11:], math.Float64bits(value)) + + ASSERT_EQ(velocypack.ValueLength(len(correctResult)), l, t) + ASSERT_EQ(result, correctResult, t) +} + +func TestBuilderObjectCompact(t *testing.T) { + var b velocypack.Builder + value := 2.3 + must(b.AddValue(velocypack.NewObjectValue(true))) + must(b.AddKeyValue("d", velocypack.NewUIntValue(1200))) + must(b.AddKeyValue("c", velocypack.NewDoubleValue(value))) + must(b.AddKeyValue("b", velocypack.NewStringValue("abc"))) + must(b.AddKeyValue("a", velocypack.NewBoolValue(true))) + must(b.Close()) + l := mustLength(b.Size()) + result := mustBytes(b.Bytes()) + + correctResult := []byte{ + 0x14, 0x1c, 0x41, 0x64, 0x29, 0xb0, 0x04, 0x41, 0x63, 0x1b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // double + 0x41, 0x62, 0x43, 0x61, 0x62, 0x63, 0x41, 0x61, 0x1a, 0x04} + binary.LittleEndian.PutUint64(correctResult[10:], math.Float64bits(value)) + + ASSERT_EQ(velocypack.ValueLength(len(correctResult)), l, t) + ASSERT_EQ(result, correctResult, t) +} + +func TestBuilderObjectValue1(t *testing.T) { + var b velocypack.Builder + u := uint64(77) + b.OpenObject() + b.AddKeyValue("test", velocypack.NewUIntValue(u)) + b.Close() + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsObject(), t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(s.Length()), t) + ASSERT_EQ(u, mustUInt(mustSlice(s.Get("test")).GetUInt()), t) +} + +func TestBuilderObjectValue2(t *testing.T) { + var b velocypack.Builder + u := uint64(77) + b.OpenObject() + b.AddKeyValue("test", velocypack.NewUIntValue(u)) + b.AddKeyValue("soup", velocypack.NewUIntValue(u*2)) + b.Close() + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsObject(), t) + ASSERT_EQ(velocypack.ValueLength(2), mustLength(s.Length()), t) + ASSERT_EQ(u, mustUInt(mustSlice(s.Get("test")).GetUInt()), t) + ASSERT_EQ(u*2, mustUInt(mustSlice(s.Get("soup")).GetUInt()), t) +} + +func TestBuilderAddObjectIteratorEmpty(t *testing.T) { + var obj velocypack.Builder + obj.OpenObject() + obj.AddKeyValue("1-one", velocypack.NewIntValue(1)) + obj.AddKeyValue("2-two", velocypack.NewIntValue(2)) + obj.AddKeyValue("3-three", velocypack.NewIntValue(3)) + obj.Close() + objSlice := mustSlice(obj.Slice()) + + var b velocypack.Builder + ASSERT_TRUE(b.IsClosed(), t) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderNeedOpenObject, t)(b.AddKeyValuesFromIterator(mustObjectIterator(velocypack.NewObjectIterator(objSlice)))) + ASSERT_TRUE(b.IsClosed(), t) +} + +func TestBuilderAddObjectIteratorKeyAlreadyWritten(t *testing.T) { + var obj velocypack.Builder + obj.OpenObject() + obj.AddKeyValue("1-one", velocypack.NewIntValue(1)) + obj.AddKeyValue("2-two", velocypack.NewIntValue(2)) + obj.AddKeyValue("3-three", velocypack.NewIntValue(3)) + obj.Close() + objSlice := mustSlice(obj.Slice()) + + var b velocypack.Builder + ASSERT_TRUE(b.IsClosed(), t) + must(b.OpenObject()) + must(b.AddValue(velocypack.NewStringValue("foo"))) + ASSERT_FALSE(b.IsClosed(), t) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderKeyAlreadyWritten, t)(b.AddKeyValuesFromIterator(mustObjectIterator(velocypack.NewObjectIterator(objSlice)))) + ASSERT_FALSE(b.IsClosed(), t) +} + +func TestBuilderAddObjectIteratorNonObject(t *testing.T) { + var obj velocypack.Builder + obj.OpenObject() + obj.AddKeyValue("1-one", velocypack.NewIntValue(1)) + obj.AddKeyValue("2-two", velocypack.NewIntValue(2)) + obj.AddKeyValue("3-three", velocypack.NewIntValue(3)) + obj.Close() + objSlice := mustSlice(obj.Slice()) + + var b velocypack.Builder + must(b.OpenArray()) + ASSERT_FALSE(b.IsClosed(), t) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderNeedOpenObject, t)(b.AddKeyValuesFromIterator(mustObjectIterator(velocypack.NewObjectIterator(objSlice)))) + ASSERT_FALSE(b.IsClosed(), t) +} + +func TestBuilderAddObjectIteratorTop(t *testing.T) { + var obj velocypack.Builder + obj.OpenObject() + obj.AddKeyValue("1-one", velocypack.NewIntValue(1)) + obj.AddKeyValue("2-two", velocypack.NewIntValue(2)) + obj.AddKeyValue("3-three", velocypack.NewIntValue(3)) + obj.Close() + objSlice := mustSlice(obj.Slice()) + + var b velocypack.Builder + must(b.OpenObject()) + ASSERT_FALSE(b.IsClosed(), t) + must(b.AddKeyValuesFromIterator(mustObjectIterator(velocypack.NewObjectIterator(objSlice)))) + ASSERT_FALSE(b.IsClosed(), t) + must(b.Close()) + result := mustSlice(b.Slice()) + ASSERT_TRUE(b.IsClosed(), t) + + ASSERT_EQ("{\"1-one\":1,\"2-two\":2,\"3-three\":3}", mustString(result.JSONString()), t) +} + +func TestBuilderAddObjectIteratorReference(t *testing.T) { + var obj velocypack.Builder + obj.OpenObject() + obj.AddKeyValue("1-one", velocypack.NewIntValue(1)) + obj.AddKeyValue("2-two", velocypack.NewIntValue(2)) + obj.AddKeyValue("3-three", velocypack.NewIntValue(3)) + obj.Close() + objSlice := mustSlice(obj.Slice()) + + var b velocypack.Builder + must(b.OpenObject()) + ASSERT_FALSE(b.IsClosed(), t) + must(b.Add(mustObjectIterator(velocypack.NewObjectIterator(objSlice)))) + ASSERT_FALSE(b.IsClosed(), t) + must(b.Close()) + result := mustSlice(b.Slice()) + ASSERT_TRUE(b.IsClosed(), t) + + ASSERT_EQ("{\"1-one\":1,\"2-two\":2,\"3-three\":3}", mustString(result.JSONString()), t) +} + +func TestBuilderAddObjectIteratorSub(t *testing.T) { + var obj velocypack.Builder + obj.OpenObject() + obj.AddKeyValue("1-one", velocypack.NewIntValue(1)) + obj.AddKeyValue("2-two", velocypack.NewIntValue(2)) + obj.AddKeyValue("3-three", velocypack.NewIntValue(3)) + obj.Close() + objSlice := mustSlice(obj.Slice()) + + var b velocypack.Builder + must(b.OpenObject()) + must(b.AddKeyValue("1-something", velocypack.NewStringValue("tennis"))) + must(b.AddValue(velocypack.NewStringValue("2-values"))) + must(b.OpenObject()) + must(b.Add(mustObjectIterator(velocypack.NewObjectIterator(objSlice)))) + ASSERT_FALSE(b.IsClosed(), t) + must(b.Close()) // close one level + must(b.AddKeyValue("3-bark", velocypack.NewStringValue("qux"))) + ASSERT_FALSE(b.IsClosed(), t) + must(b.Close()) + result := mustSlice(b.Slice()) + ASSERT_TRUE(b.IsClosed(), t) + + ASSERT_EQ("{\"1-something\":\"tennis\",\"2-values\":{\"1-one\":1,\"2-two\":2,\"3-three\":3},\"3-bark\":\"qux\"}", mustString(result.JSONString()), t) +} + +func TestBuilderAddAndOpenObject(t *testing.T) { + var b1 velocypack.Builder + ASSERT_TRUE(b1.IsClosed(), t) + must(b1.OpenObject()) + ASSERT_FALSE(b1.IsClosed(), t) + must(b1.AddKeyValue("foo", velocypack.NewStringValue("bar"))) + must(b1.Close()) + ASSERT_TRUE(b1.IsClosed(), t) + ASSERT_EQ(byte(0x14), mustSlice(b1.Slice())[0], t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(mustSlice(b1.Slice()).Length()), t) + + var b2 velocypack.Builder + ASSERT_TRUE(b2.IsClosed(), t) + must(b2.OpenObject()) + ASSERT_FALSE(b2.IsClosed(), t) + must(b2.AddKeyValue("foo", velocypack.NewStringValue("bar"))) + must(b2.Close()) + ASSERT_TRUE(b2.IsClosed(), t) + ASSERT_EQ(byte(0x14), mustSlice(b2.Slice())[0], t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(mustSlice(b2.Slice()).Length()), t) +} + +func TestBuilderAddOnNonObject(t *testing.T) { + var b velocypack.Builder + must(b.AddValue(velocypack.NewArrayValue())) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderNeedOpenObject, t)(b.AddKeyValue("foo", velocypack.NewBoolValue(true))) +} + +func TestBuilderIsOpenObject(t *testing.T) { + var b velocypack.Builder + ASSERT_FALSE(b.IsOpenObject(), t) + must(b.OpenObject()) + ASSERT_TRUE(b.IsOpenObject(), t) + must(b.Close()) + ASSERT_FALSE(b.IsOpenObject(), t) +} + +func TestBuilderHasKeyNonObject(t *testing.T) { + var b velocypack.Builder + b.AddValue(velocypack.NewIntValue(1)) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderNeedOpenObject, t)(b.HasKey("foo")) +} + +func TestBuilderHasKeyArray(t *testing.T) { + var b velocypack.Builder + b.AddValue(velocypack.NewArrayValue()) + b.AddValue(velocypack.NewIntValue(1)) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderNeedOpenObject, t)(b.HasKey("foo")) +} + +func TestBuilderHasKeyEmptyObject(t *testing.T) { + var b velocypack.Builder + b.AddValue(velocypack.NewObjectValue()) + ASSERT_FALSE(mustBool(b.HasKey("foo")), t) + ASSERT_FALSE(mustBool(b.HasKey("bar")), t) + ASSERT_FALSE(mustBool(b.HasKey("baz")), t) + ASSERT_FALSE(mustBool(b.HasKey("quetzalcoatl")), t) + b.Close() +} + +func TestBuilderHasKeySubObject(t *testing.T) { + var b velocypack.Builder + b.AddValue(velocypack.NewObjectValue()) + must(b.AddKeyValue("foo", velocypack.NewIntValue(1))) + must(b.AddKeyValue("bar", velocypack.NewBoolValue(true))) + ASSERT_TRUE(mustBool(b.HasKey("foo")), t) + ASSERT_TRUE(mustBool(b.HasKey("bar")), t) + ASSERT_FALSE(mustBool(b.HasKey("baz")), t) + + must(b.AddKeyValue("bark", velocypack.NewObjectValue())) + ASSERT_FALSE(mustBool(b.HasKey("bark")), t) + ASSERT_FALSE(mustBool(b.HasKey("foo")), t) + ASSERT_FALSE(mustBool(b.HasKey("bar")), t) + ASSERT_FALSE(mustBool(b.HasKey("baz")), t) + must(b.Close()) + + ASSERT_TRUE(mustBool(b.HasKey("foo")), t) + ASSERT_TRUE(mustBool(b.HasKey("bar")), t) + ASSERT_TRUE(mustBool(b.HasKey("bark")), t) + ASSERT_FALSE(mustBool(b.HasKey("baz")), t) + + must(b.AddKeyValue("baz", velocypack.NewIntValue(42))) + ASSERT_TRUE(mustBool(b.HasKey("foo")), t) + ASSERT_TRUE(mustBool(b.HasKey("bar")), t) + ASSERT_TRUE(mustBool(b.HasKey("bark")), t) + ASSERT_TRUE(mustBool(b.HasKey("baz")), t) + b.Close() +} + +func TestBuilderHasKeyCompact(t *testing.T) { + var b velocypack.Builder + b.AddValue(velocypack.NewObjectValue(true)) + must(b.AddKeyValue("foo", velocypack.NewIntValue(1))) + must(b.AddKeyValue("bar", velocypack.NewBoolValue(true))) + ASSERT_TRUE(mustBool(b.HasKey("foo")), t) + ASSERT_TRUE(mustBool(b.HasKey("bar")), t) + ASSERT_FALSE(mustBool(b.HasKey("baz")), t) + + must(b.AddKeyValue("bark", velocypack.NewObjectValue())) + ASSERT_FALSE(mustBool(b.HasKey("bark")), t) + ASSERT_FALSE(mustBool(b.HasKey("foo")), t) + ASSERT_FALSE(mustBool(b.HasKey("bar")), t) + ASSERT_FALSE(mustBool(b.HasKey("baz")), t) + must(b.Close()) + + ASSERT_TRUE(mustBool(b.HasKey("foo")), t) + ASSERT_TRUE(mustBool(b.HasKey("bar")), t) + ASSERT_TRUE(mustBool(b.HasKey("bark")), t) + ASSERT_FALSE(mustBool(b.HasKey("baz")), t) + + must(b.AddKeyValue("baz", velocypack.NewIntValue(42))) + ASSERT_TRUE(mustBool(b.HasKey("foo")), t) + ASSERT_TRUE(mustBool(b.HasKey("bar")), t) + ASSERT_TRUE(mustBool(b.HasKey("bark")), t) + ASSERT_TRUE(mustBool(b.HasKey("baz")), t) + b.Close() +} + +func TestBuilderGetKeyNonObject(t *testing.T) { + var b velocypack.Builder + b.AddValue(velocypack.NewIntValue(1)) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderNeedOpenObject, t)(b.GetKey("foo")) +} + +func TestBuilderGetKeyArray(t *testing.T) { + var b velocypack.Builder + b.AddValue(velocypack.NewArrayValue()) + b.AddValue(velocypack.NewIntValue(1)) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderNeedOpenObject, t)(b.GetKey("foo")) +} + +func TestBuilderGetKeyEmptyObject(t *testing.T) { + var b velocypack.Builder + b.AddValue(velocypack.NewObjectValue()) + ASSERT_TRUE(mustSlice(b.GetKey("foo")).IsNone(), t) + ASSERT_TRUE(mustSlice(b.GetKey("bar")).IsNone(), t) + ASSERT_TRUE(mustSlice(b.GetKey("baz")).IsNone(), t) + ASSERT_TRUE(mustSlice(b.GetKey("quetzalcoatl")).IsNone(), t) + b.Close() +} + +func TestBuilderGetKeySubObject(t *testing.T) { + var b velocypack.Builder + b.AddValue(velocypack.NewObjectValue()) + must(b.AddKeyValue("foo", velocypack.NewIntValue(1))) + must(b.AddKeyValue("bar", velocypack.NewBoolValue(true))) + ASSERT_EQ(uint64(1), mustUInt(mustSlice(b.GetKey("foo")).GetUInt()), t) + ASSERT_TRUE(mustSlice(b.GetKey("bar")).IsBool(), t) + ASSERT_TRUE(mustSlice(b.GetKey("baz")).IsNone(), t) + + must(b.AddKeyValue("bark", velocypack.NewObjectValue())) + ASSERT_TRUE(mustSlice(b.GetKey("bark")).IsNone(), t) + ASSERT_TRUE(mustSlice(b.GetKey("foo")).IsNone(), t) + ASSERT_TRUE(mustSlice(b.GetKey("bar")).IsNone(), t) + ASSERT_TRUE(mustSlice(b.GetKey("baz")).IsNone(), t) + must(b.Close()) + + ASSERT_EQ(uint64(1), mustUInt(mustSlice(b.GetKey("foo")).GetUInt()), t) + ASSERT_TRUE(mustSlice(b.GetKey("bar")).IsBool(), t) + ASSERT_TRUE(mustSlice(b.GetKey("baz")).IsNone(), t) + ASSERT_TRUE(mustSlice(b.GetKey("bark")).IsObject(), t) + + must(b.AddKeyValue("baz", velocypack.NewIntValue(42))) + ASSERT_EQ(uint64(1), mustUInt(mustSlice(b.GetKey("foo")).GetUInt()), t) + ASSERT_TRUE(mustSlice(b.GetKey("bar")).IsBool(), t) + ASSERT_EQ(uint64(42), mustUInt(mustSlice(b.GetKey("baz")).GetUInt()), t) + ASSERT_TRUE(mustSlice(b.GetKey("bark")).IsObject(), t) + b.Close() +} + +func TestBuilderGetKeyCompact(t *testing.T) { + var b velocypack.Builder + b.AddValue(velocypack.NewObjectValue(true)) + must(b.AddKeyValue("foo", velocypack.NewIntValue(1))) + must(b.AddKeyValue("bar", velocypack.NewBoolValue(true))) + ASSERT_EQ(uint64(1), mustUInt(mustSlice(b.GetKey("foo")).GetUInt()), t) + ASSERT_TRUE(mustSlice(b.GetKey("bar")).IsBool(), t) + ASSERT_TRUE(mustSlice(b.GetKey("baz")).IsNone(), t) + + must(b.AddKeyValue("bark", velocypack.NewObjectValue())) + ASSERT_TRUE(mustSlice(b.GetKey("bark")).IsNone(), t) + ASSERT_TRUE(mustSlice(b.GetKey("foo")).IsNone(), t) + ASSERT_TRUE(mustSlice(b.GetKey("bar")).IsNone(), t) + ASSERT_TRUE(mustSlice(b.GetKey("baz")).IsNone(), t) + must(b.Close()) + + ASSERT_EQ(uint64(1), mustUInt(mustSlice(b.GetKey("foo")).GetUInt()), t) + ASSERT_TRUE(mustSlice(b.GetKey("bar")).IsBool(), t) + ASSERT_TRUE(mustSlice(b.GetKey("baz")).IsNone(), t) + ASSERT_TRUE(mustSlice(b.GetKey("bark")).IsObject(), t) + + must(b.AddKeyValue("baz", velocypack.NewIntValue(42))) + ASSERT_EQ(uint64(1), mustUInt(mustSlice(b.GetKey("foo")).GetUInt()), t) + ASSERT_TRUE(mustSlice(b.GetKey("bar")).IsBool(), t) + ASSERT_EQ(uint64(42), mustUInt(mustSlice(b.GetKey("baz")).GetUInt()), t) + ASSERT_TRUE(mustSlice(b.GetKey("bark")).IsObject(), t) + b.Close() +} + +func TestBuilderAddKeysSeparately1(t *testing.T) { + var b velocypack.Builder + must(b.OpenObject()) + must(b.AddValue(velocypack.NewStringValue("name"))) + must(b.AddValue(velocypack.NewStringValue("Neunhoeffer"))) + must(b.AddValue(velocypack.NewStringValue("firstName"))) + must(b.AddValue(velocypack.NewStringValue("Max"))) + must(b.Close()) + + ASSERT_EQ(`{"firstName":"Max","name":"Neunhoeffer"}`, mustString(mustSlice(b.Slice()).JSONString()), t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/builder_primitive_test.go b/deps/github.com/arangodb/go-velocypack/test/builder_primitive_test.go new file mode 100644 index 000000000..9476d64c9 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/builder_primitive_test.go @@ -0,0 +1,277 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "math" + "testing" + "time" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestBuilderPrimitiveAddNone(t *testing.T) { + var b velocypack.Builder + s := velocypack.NoneSlice() + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderUnexpectedType, t)(b.Add(s)) +} + +func TestBuilderPrimitiveAddNull(t *testing.T) { + var b velocypack.Builder + must(b.AddValue(velocypack.NewNullValue())) + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsNull(), t) +} + +func TestBuilderPrimitiveAddMinKey(t *testing.T) { + var b velocypack.Builder + must(b.AddValue(velocypack.NewMinKeyValue())) + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsMinKey(), t) +} + +func TestBuilderPrimitiveAddMaxKey(t *testing.T) { + var b velocypack.Builder + must(b.AddValue(velocypack.NewMaxKeyValue())) + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsMaxKey(), t) +} + +func TestBuilderPrimitiveAddBool(t *testing.T) { + tests := []bool{true, false} + for _, test := range tests { + var b velocypack.Builder + b.Add(test) + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsBool(), t) + if test { + ASSERT_TRUE(s.IsTrue(), t) + ASSERT_FALSE(s.IsFalse(), t) + } else { + ASSERT_FALSE(s.IsTrue(), t) + ASSERT_TRUE(s.IsFalse(), t) + } + } +} + +func TestBuilderPrimitiveAddDoubleFloat32(t *testing.T) { + tests := []float32{10.4, -6, 0.0, -999999999, 24643783456252.4545345, math.MaxFloat32, -math.MaxFloat32} + for _, test := range tests { + var b velocypack.Builder + b.Add(test) + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsDouble(), t) + ASSERT_DOUBLE_EQ(float64(test), mustDouble(s.GetDouble()), t) + } +} + +func TestBuilderPrimitiveAddDoubleFloat64(t *testing.T) { + tests := []float64{10.4, -6, 0.0, -999999999, 24643783456252.4545345, math.MaxFloat64, -math.MaxFloat64} + for _, test := range tests { + var b velocypack.Builder + b.Add(test) + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsDouble(), t) + ASSERT_DOUBLE_EQ(test, mustDouble(s.GetDouble()), t) + } +} + +func TestBuilderPrimitiveAddInt(t *testing.T) { + tests := []int{10, -7, -34, 344366, math.MaxInt32, 233224, math.MinInt32} + for _, test := range tests { + var b velocypack.Builder + b.Add(test) + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsInt(), t) + ASSERT_EQ(int64(test), mustInt(s.GetInt()), t) + } +} + +func TestBuilderPrimitiveAddInt8(t *testing.T) { + tests := []int8{10, -7, -34, math.MinInt8, math.MaxInt8} + for _, test := range tests { + var b velocypack.Builder + b.Add(test) + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsInt(), t) + ASSERT_EQ(int64(test), mustInt(s.GetInt()), t) + } +} + +func TestBuilderPrimitiveAddInt16(t *testing.T) { + tests := []int16{10, -7, -34, math.MinInt16, math.MaxInt16} + for _, test := range tests { + var b velocypack.Builder + b.Add(test) + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsInt(), t) + ASSERT_EQ(int64(test), mustInt(s.GetInt()), t) + } +} + +func TestBuilderPrimitiveAddInt32(t *testing.T) { + tests := []int32{10, -7, -34, math.MinInt32, math.MaxInt32} + for _, test := range tests { + var b velocypack.Builder + b.Add(test) + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsInt(), t) + ASSERT_EQ(int64(test), mustInt(s.GetInt()), t) + } +} + +func TestBuilderPrimitiveAddInt64(t *testing.T) { + tests := []int64{10, -7, -34, math.MinInt64, math.MaxInt64} + for _, test := range tests { + var b velocypack.Builder + b.Add(test) + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsInt(), t) + ASSERT_EQ(int64(test), mustInt(s.GetInt()), t) + } +} + +func TestBuilderPrimitiveAddUInt(t *testing.T) { + tests := []uint{10, 34, math.MaxUint32} + for _, test := range tests { + var b velocypack.Builder + b.Add(test) + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsUInt(), t) + ASSERT_EQ(uint64(test), mustUInt(s.GetUInt()), t) + } +} + +func TestBuilderPrimitiveAddUInt8(t *testing.T) { + tests := []uint8{10, 34, math.MaxUint8} + for _, test := range tests { + var b velocypack.Builder + b.Add(test) + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsUInt(), t) + ASSERT_EQ(uint64(test), mustUInt(s.GetUInt()), t) + } +} + +func TestBuilderPrimitiveAddUInt16(t *testing.T) { + tests := []uint16{10, 34, math.MaxUint16} + for _, test := range tests { + var b velocypack.Builder + b.Add(test) + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsUInt(), t) + ASSERT_EQ(uint64(test), mustUInt(s.GetUInt()), t) + } +} + +func TestBuilderPrimitiveAddUInt32(t *testing.T) { + tests := []uint32{10, 34, 56345344, math.MaxUint32} + for _, test := range tests { + var b velocypack.Builder + b.Add(test) + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsUInt(), t) + ASSERT_EQ(uint64(test), mustUInt(s.GetUInt()), t) + } +} + +func TestBuilderPrimitiveAddUInt64(t *testing.T) { + tests := []uint64{10, 34, 636346346345342355, math.MaxUint64} + for _, test := range tests { + var b velocypack.Builder + b.Add(test) + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsUInt(), t) + ASSERT_EQ(uint64(test), mustUInt(s.GetUInt()), t) + } +} + +func TestBuilderPrimitiveAddSmallInt(t *testing.T) { + tests := []int{-6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 9} + for _, test := range tests { + var b velocypack.Builder + b.Add(test) + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsSmallInt(), t) + ASSERT_EQ(int64(test), mustInt(s.GetInt()), t) + } +} + +func TestBuilderPrimitiveAddString(t *testing.T) { + tests := []string{"", "foo", "你好,世界", "\t\n\x00", "Some space and stuff"} + for _, test := range tests { + var b velocypack.Builder + b.Add(test) + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsString(), t) + ASSERT_EQ(test, mustString(s.GetString()), t) + } +} + +func TestBuilderPrimitiveAddUTCDate(t *testing.T) { + cet, err := time.LoadLocation("CET") + ASSERT_NIL(err, t) + tests := []time.Time{ + time.Date(2585, time.January, 12, 1, 2, 3, 0, time.UTC), + time.Date(2123, time.October, 9, 1, 2, 3, 0, time.UTC), + time.Date(2001, time.September, 11, 1, 2, 3, 0, time.UTC), + time.Date(1985, time.July, 4, 10, 22, 0, 0, cet), + time.Date(1950, time.December, 5, 20, 10, 50, 0, cet), + } + for _, test := range tests { + var b velocypack.Builder + b.Add(test) + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsUTCDate(), t) + ASSERT_EQ(test.UTC(), mustTime(s.GetUTCDate()), t) + } +} + +func TestBuilderPrimitiveAddBinary(t *testing.T) { + tests := [][]byte{[]byte{1, 2, 3}, []byte{}, []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 12, 13, 14, 15, 16, 17, 18, 19, 20}} + for _, test := range tests { + var b velocypack.Builder + b.Add(test) + + s := mustSlice(b.Slice()) + ASSERT_EQ(s.Type(), velocypack.Binary, t) + ASSERT_TRUE(s.IsBinary(), t) + ASSERT_EQ(test, mustBytes(s.GetBinary()), t) + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/builder_test.go b/deps/github.com/arangodb/go-velocypack/test/builder_test.go new file mode 100644 index 000000000..ce052ada1 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/builder_test.go @@ -0,0 +1,175 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestBuilderBytesWithOpenObject(t *testing.T) { + var b velocypack.Builder + ASSERT_EQ(0, len(mustBytes(b.Bytes())), t) + must(b.OpenObject()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderNotClosed, t)(b.Bytes()) + must(b.Close()) + ASSERT_EQ(1, len(mustBytes(b.Bytes())), t) +} + +func TestBuilderSliceWithOpenObject(t *testing.T) { + var b velocypack.Builder + ASSERT_EQ(0, len(mustSlice(b.Slice())), t) + must(b.OpenObject()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderNotClosed, t)(b.Slice()) + must(b.Close()) + ASSERT_EQ(1, len(mustSlice(b.Slice())), t) +} + +func TestBuilderSizeWithOpenObject(t *testing.T) { + var b velocypack.Builder + ASSERT_EQ(velocypack.ValueLength(0), mustLength(b.Size()), t) + must(b.OpenObject()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderNotClosed, t)(b.Size()) + must(b.Close()) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(b.Size()), t) +} + +func TestBuilderIsEmpty(t *testing.T) { + var b velocypack.Builder + ASSERT_TRUE(b.IsEmpty(), t) + must(b.OpenObject()) + ASSERT_FALSE(b.IsEmpty(), t) +} + +func TestBuilderIsClosedMixed(t *testing.T) { + var b velocypack.Builder + ASSERT_TRUE(b.IsClosed(), t) + b.AddValue(velocypack.NewNullValue()) + ASSERT_TRUE(b.IsClosed(), t) + b.AddValue(velocypack.NewBoolValue(true)) + ASSERT_TRUE(b.IsClosed(), t) + + b.AddValue(velocypack.NewArrayValue()) + ASSERT_FALSE(b.IsClosed(), t) + + b.AddValue(velocypack.NewBoolValue(true)) + ASSERT_FALSE(b.IsClosed(), t) + b.AddValue(velocypack.NewBoolValue(true)) + ASSERT_FALSE(b.IsClosed(), t) + + must(b.Close()) + ASSERT_TRUE(b.IsClosed(), t) + + b.AddValue(velocypack.NewObjectValue()) + ASSERT_FALSE(b.IsClosed(), t) + + b.AddKeyValue("foo", velocypack.NewBoolValue(true)) + ASSERT_FALSE(b.IsClosed(), t) + + b.AddKeyValue("bar", velocypack.NewBoolValue(true)) + ASSERT_FALSE(b.IsClosed(), t) + + b.AddKeyValue("baz", velocypack.NewArrayValue()) + ASSERT_FALSE(b.IsClosed(), t) + + must(b.Close()) + ASSERT_FALSE(b.IsClosed(), t) + + must(b.Close()) + ASSERT_TRUE(b.IsClosed(), t) +} + +func TestBuilderIsClosedObject(t *testing.T) { + var b velocypack.Builder + ASSERT_TRUE(b.IsClosed(), t) + must(b.AddValue(velocypack.NewObjectValue())) + ASSERT_FALSE(b.IsClosed(), t) + + must(b.AddKeyValue("foo", velocypack.NewBoolValue(true))) + ASSERT_FALSE(b.IsClosed(), t) + + must(b.AddKeyValue("bar", velocypack.NewBoolValue(true))) + ASSERT_FALSE(b.IsClosed(), t) + + must(b.AddKeyValue("baz", velocypack.NewObjectValue())) + ASSERT_FALSE(b.IsClosed(), t) + + must(b.Close()) + ASSERT_FALSE(b.IsClosed(), t) + + must(b.Close()) + ASSERT_TRUE(b.IsClosed(), t) +} + +func TestBuilderCloseClosed(t *testing.T) { + var b velocypack.Builder + ASSERT_TRUE(b.IsClosed(), t) + must(b.AddValue(velocypack.NewObjectValue())) + ASSERT_FALSE(b.IsClosed(), t) + must(b.Close()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderNeedOpenCompound, t)(b.Close()) +} + +func TestBuilderRemoveLastNonObject(t *testing.T) { + var b velocypack.Builder + must(b.AddValue(velocypack.NewBoolValue(true))) + must(b.AddValue(velocypack.NewBoolValue(false))) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderNeedOpenCompound, t)(b.RemoveLast()) +} + +func TestBuilderRemoveLastSealed(t *testing.T) { + var b velocypack.Builder + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderNeedOpenCompound, t)(b.RemoveLast()) +} + +func TestBuilderRemoveLastEmptyObject(t *testing.T) { + var b velocypack.Builder + must(b.AddValue(velocypack.NewObjectValue())) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderNeedSubValue, t)(b.RemoveLast()) +} + +func TestBuilderRemoveLastObjectInvalid(t *testing.T) { + var b velocypack.Builder + must(b.AddValue(velocypack.NewObjectValue())) + must(b.AddKeyValue("foo", velocypack.NewBoolValue(true))) + must(b.RemoveLast()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsBuilderNeedSubValue, t)(b.RemoveLast()) +} + +func TestBuilderRemoveLastObject(t *testing.T) { + var b velocypack.Builder + must(b.AddValue(velocypack.NewObjectValue())) + must(b.AddKeyValue("foo", velocypack.NewBoolValue(true))) + must(b.AddKeyValue("bar", velocypack.NewBoolValue(false))) + + must(b.RemoveLast()) + must(b.Close()) + + s := mustSlice(b.Slice()) + ASSERT_TRUE(s.IsObject(), t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(s.Length()), t) + ASSERT_TRUE(mustBool(s.HasKey("foo")), t) + ASSERT_TRUE(mustBool(mustSlice(s.Get("foo")).GetBool()), t) + ASSERT_FALSE(mustBool(s.HasKey("bar")), t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/decoder_array_test.go b/deps/github.com/arangodb/go-velocypack/test/decoder_array_test.go new file mode 100644 index 000000000..8744b02ed --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/decoder_array_test.go @@ -0,0 +1,219 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestDecoderArrayEmpty(t *testing.T) { + b := velocypack.Builder{} + must(b.OpenArray()) + must(b.Close()) + s := mustSlice(b.Slice()) + + var v []struct{} + err := velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(0, len(v), t) +} + +func TestDecoderArrayByteSlice(t *testing.T) { + expected := []byte{1, 2, 3, 4, 5} + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewBinaryValue(expected))) + s := mustSlice(b.Slice()) + + var v []byte + err := velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderArrayBoolSlice(t *testing.T) { + expected := []bool{true, false, false, true} + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v []bool + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderArrayIntSlice(t *testing.T) { + expected := []int{1, 2, 3, -4, 5, 6, 100000} + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v []int + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderArrayUIntSlice(t *testing.T) { + expected := []uint{1, 2, 3, 4, 5, 6, 100000} + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v []uint + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderArrayFloat32Slice(t *testing.T) { + expected := []float32{0.0, -1.5, 66, 45} + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v []float32 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderArrayFloat64Slice(t *testing.T) { + expected := []float64{0.0, -1.5, 6.23, 45e+10} + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v []float64 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderArrayStructSlice(t *testing.T) { + input := []Struct1{ + Struct1{Field1: 1, field2: 2}, + Struct1{Field1: 10, field2: 200}, + Struct1{Field1: 100, field2: 200}, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := input + expected[0].field2 = 0 + expected[1].field2 = 0 + expected[2].field2 = 0 + + var v []Struct1 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderArrayStructPtrSlice(t *testing.T) { + input := []*Struct1{ + &Struct1{Field1: 1, field2: 2}, + nil, + &Struct1{Field1: 10, field2: 200}, + &Struct1{Field1: 100, field2: 200}, + nil, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := input + expected[0].field2 = 0 + expected[2].field2 = 0 + expected[3].field2 = 0 + + var v []*Struct1 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderArrayNestedArray(t *testing.T) { + input := [][]Struct1{ + []Struct1{Struct1{Field1: 1, field2: 2}, Struct1{Field1: 3, field2: 4}}, + []Struct1{Struct1{Field1: 10, field2: 200}}, + []Struct1{Struct1{Field1: 100, field2: 200}}, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := input + expected[0][0].field2 = 0 + expected[0][1].field2 = 0 + expected[1][0].field2 = 0 + expected[2][0].field2 = 0 + + var v [][]Struct1 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderArrayExtraLengthInSlice(t *testing.T) { + input := []int{1, 2, 3, 4, 5, 6, 7, 8} + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := input + + v := make([]int, 16) + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderArrayExtraLengthInArray(t *testing.T) { + input := []int{1, 2, 3, 4, 5, 6, 7, 8} + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := [16]int{} + copy(expected[:], input) + + v := [16]int{} + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderArrayInterface(t *testing.T) { + input := []interface{}{1, false, Struct1{}, "foo", []byte{1, 2, 3, 4, 5}} + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := input + expected[2] = map[string]interface{}{ + "Field1": 0, + } + + var v []interface{} + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/decoder_convert_test.go b/deps/github.com/arangodb/go-velocypack/test/decoder_convert_test.go new file mode 100644 index 000000000..87ae3023f --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/decoder_convert_test.go @@ -0,0 +1,411 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "encoding/base64" + "encoding/json" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestDecoderConvertFloat32Int(t *testing.T) { + input := struct { + A float32 + }{ + A: -345.0, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A int + } + expected.A = -345 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertFloat32UInt(t *testing.T) { + input := struct { + A float32 + }{ + A: 333.0, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A uint + } + expected.A = 333 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertFloat32Number(t *testing.T) { + input := struct { + A float32 + }{ + A: 333.5, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A json.Number + } + expected.A = "333.5" + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertFloat32Interface(t *testing.T) { + input := struct { + A float32 + }{ + A: 333.5, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A interface{} + } + expected.A = 333.5 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertFloat64Int(t *testing.T) { + input := struct { + A float64 + }{ + A: -345.0, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A int + } + expected.A = -345 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertFloat64UInt(t *testing.T) { + input := struct { + A float64 + }{ + A: 333.0, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A uint + } + expected.A = 333 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertFloat64Number(t *testing.T) { + input := struct { + A float64 + }{ + A: 333.7, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A json.Number + } + expected.A = "333.7" + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertFloat64Interface(t *testing.T) { + input := struct { + A float64 + }{ + A: 333.7, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A interface{} + } + expected.A = 333.7 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertIntFloat32(t *testing.T) { + input := struct { + A int + }{ + A: -123, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A float32 + } + expected.A = -123.0 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertIntFloat64(t *testing.T) { + input := struct { + A int + }{ + A: -12345, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A float64 + } + expected.A = -12345.0 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertIntUInt(t *testing.T) { + input := struct { + A int + }{ + A: 12345, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A uint + } + expected.A = 12345 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertIntNumber(t *testing.T) { + input := struct { + A int + }{ + A: -12345, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A json.Number + } + expected.A = "-12345" + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertIntInterface(t *testing.T) { + input := struct { + A int + }{ + A: -12345, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A interface{} + } + expected.A = -12345 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertUIntFloat32(t *testing.T) { + input := struct { + A uint + }{ + A: 123, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A float32 + } + expected.A = 123.0 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertUIntFloat64(t *testing.T) { + input := struct { + A uint + }{ + A: 12345, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A float64 + } + expected.A = 12345.0 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertUIntInt(t *testing.T) { + input := struct { + A uint + }{ + A: 12345, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A int + } + expected.A = 12345 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertUIntNumber(t *testing.T) { + input := struct { + A uint + }{ + A: 12345, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A json.Number + } + expected.A = "12345" + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertUIntInterface(t *testing.T) { + input := struct { + A uint + }{ + A: 12345, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A interface{} + } + expected.A = uint64(12345) + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertStringByteSlice(t *testing.T) { + expectedBytes := []byte{5, 6, 7, 8, 9} + input := struct { + A string + }{ + A: base64.StdEncoding.EncodeToString(expectedBytes), + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A []byte + } + expected.A = expectedBytes + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderConvertNil(t *testing.T) { + input := struct { + A *Struct1 + }{ + A: nil, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var expected, v struct { + A interface{} + } + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/decoder_custom_test.go b/deps/github.com/arangodb/go-velocypack/test/decoder_custom_test.go new file mode 100644 index 000000000..19a5be3c1 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/decoder_custom_test.go @@ -0,0 +1,356 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +/* +type CustomStruct1 struct { + Field1 int +} +*/ + +func (cs *CustomStruct1) UnmarshalVPack(slice velocypack.Slice) error { + s, err := slice.GetString() + if err != nil { + return err + } + if s != "Hello world" { + return fmt.Errorf("Expected 'Hello world' got '%s'", s) + } + cs.Field1 = 42 + return nil +} + +func TestDecoderCustomStruct1(t *testing.T) { + input := &CustomStruct1{ + Field1: 999, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := CustomStruct1{ + Field1: 42, + } + + var v CustomStruct1 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +/* +type CustomStruct1Object struct { + Field1 int +} +*/ + +func (cs *CustomStruct1Object) UnmarshalVPack(slice velocypack.Slice) error { + ss, err := slice.Get("foo") + if err != nil { + return err + } + s, err := ss.GetString() + if err != nil { + return err + } + if s != "Hello world" { + return fmt.Errorf("Expected 'Hello world' got '%s'", s) + } + cs.Field1 = 42 + return nil +} + +func TestDecoderCustomCustomStruct1Object(t *testing.T) { + input := &CustomStruct1Object{ + Field1: 999, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := CustomStruct1Object{ + Field1: 42, + } + + var v CustomStruct1Object + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +/* +type CustomStruct1Array struct { + Field1 int +} +*/ + +func (cs *CustomStruct1Array) UnmarshalVPack(slice velocypack.Slice) error { + ss, err := slice.At(0) + if err != nil { + return err + } + s, err := ss.GetString() + if err != nil { + return err + } + if s != "Hello world Array" { + return fmt.Errorf("Expected 'Hello world Array' got '%s'", s) + } + cs.Field1 = 987 + return nil +} + +func TestDecoderCustomCustomStruct1Array(t *testing.T) { + input := &CustomStruct1Array{ + Field1: 999, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := CustomStruct1Array{ + Field1: 987, + } + + var v CustomStruct1Array + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +/* +type CustomStruct2 struct { + Field CustomStruct1 +} +// CustomStruct2.Field is not using a custom unmarshaler since only *CustomStruct1 implements the Unmarshal interface. +*/ + +func TestDecoderCustomStruct2(t *testing.T) { + input := CustomStruct2{ + Field: CustomStruct1{ + Field1: 999222, + }, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := input + expected.Field.Field1 = 42 + + var v CustomStruct2 + ASSERT_VELOCYPACK_EXCEPTION(func(error) bool { return true }, t)(velocypack.Unmarshal(s, &v)) +} + +/* +type CustomStruct3 struct { + Field *CustomStruct1 +} +*/ + +func TestDecoderCustomStruct3(t *testing.T) { + input := CustomStruct3{ + Field: &CustomStruct1{ + Field1: 999222, + }, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := input + expected.Field.Field1 = 42 + + var v CustomStruct3 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +/* +type CustomText1 struct { + I int +} +*/ +func (ct *CustomText1) UnmarshalText(text []byte) error { + if !strings.HasPrefix(string(text), "key") { + return fmt.Errorf("Expected 'key' prefix, got '%s'", string(text)) + } + i, err := strconv.Atoi(strings.TrimPrefix(string(text), "key")) + if err != nil { + return fmt.Errorf("Expected integer after 'key' prefix, got '%s' (err: %v)", strings.TrimPrefix(string(text), "key"), err) + } + ct.I = i + return nil +} + +func TestDecoderCustomText1(t *testing.T) { + expected := map[CustomText1]bool{ + CustomText1{7}: true, + CustomText1{2}: false, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v map[CustomText1]bool + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func (cs *CustomJSONStruct1) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s != "Hello JSON" { + return fmt.Errorf("Expected 'Hello JSON' got '%s'", s) + } + cs.Field1 = 88 + return nil +} + +func TestDecoderCustomJSONStruct1(t *testing.T) { + input := &CustomJSONStruct1{ + Field1: 999, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := CustomJSONStruct1{ + Field1: 88, + } + + var v CustomJSONStruct1 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func (cs *CustomJSONStruct1Object) UnmarshalJSON(data []byte) error { + var s struct { + Foo string `json:"foo"` + } + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s.Foo != "Hello JSON Object" { + return fmt.Errorf("Expected 'Hello JSON Object' got '%s'", s.Foo) + } + cs.Field1 = 222 + return nil +} + +func TestDecoderCustomJSONStruct1Object(t *testing.T) { + input := &CustomJSONStruct1Object{ + Field1: 999, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := CustomJSONStruct1Object{ + Field1: 222, + } + + var v CustomJSONStruct1Object + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func (cs *CustomJSONStruct1Array) UnmarshalJSON(data []byte) error { + var s []string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s[0] != "Hello JSON Array" { + return fmt.Errorf("Expected 'Hello JSON Array' got '%s'", s[0]) + } + cs.Field1 = 456 + return nil +} + +func TestDecoderCustomJSONStruct1Array(t *testing.T) { + input := &CustomJSONStruct1Array{ + Field1: 999, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := CustomJSONStruct1Array{ + Field1: 456, + } + + var v CustomJSONStruct1Array + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func (cs *CustomJSONVPACKStruct1) UnmarshalVPack(slice velocypack.Slice) error { + s, err := slice.GetString() + if err != nil { + return err + } + if s != "Hello VPACK, goodbye JSON" { + return fmt.Errorf("Expected 'Hello VPACK, goodbye JSON' got '%s'", s) + } + cs.Field1 = 99 + return nil +} + +func (cs *CustomJSONVPACKStruct1) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s != "Hello JSON, goodbye VPACK" { + return fmt.Errorf("Expected 'Hello JSON, goodbye VPACK' got '%s'", s) + } + cs.Field1 = 88 + return nil +} + +func TestDecoderCustomJSONVPACKStruct1(t *testing.T) { + // UnmarshalVPack is preferred over UnmarshalJSON + input := &CustomJSONVPACKStruct1{ + Field1: 999, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := CustomJSONVPACKStruct1{ + Field1: 99, + } + + var v CustomJSONVPACKStruct1 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/decoder_map_test.go b/deps/github.com/arangodb/go-velocypack/test/decoder_map_test.go new file mode 100644 index 000000000..13066ad3f --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/decoder_map_test.go @@ -0,0 +1,287 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "math" + "reflect" + "testing" + "unsafe" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestDecoderMapEmpty(t *testing.T) { + expected := map[string]interface{}{} + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v map[string]interface{} + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderMapOneField(t *testing.T) { + expected := map[string]string{ + "Name": "Max", + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v map[string]string + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderMapMultipleFields(t *testing.T) { + expected := map[string]interface{}{ + "Name": "Max", + "A": true, + "D": 123.456, + "I": 789, // Will be of type int + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v map[string]interface{} + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderMapMultipleFieldsInt64(t *testing.T) { + maxInt32P1 := int64(math.MaxInt32) + 1 + var i interface{} + if unsafe.Sizeof(int(0)) == 4 { + i = maxInt32P1 + } else { + i = int(maxInt32P1) + } + expected := map[string]interface{}{ + "Name": "Max", + "A": true, + "D": 123.456, + "I": i, // Will be of type int or int64 depending on GOARCH + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v map[string]interface{} + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(reflect.ValueOf(v["I"]).Type(), reflect.ValueOf(expected["I"]).Type(), t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderMapMultipleFieldsEmpty(t *testing.T) { + expected := map[string]interface{}{ + "Name": "", + "A": false, + "D": 0.0, + "I": 0, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v map[string]interface{} + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderMapNestedStruct(t *testing.T) { + expected := map[string]interface{}{ + "Name": "Jan", + "Nested": map[string]interface{}{ + "Foo": 999, + }, + "A": true, + "D": 123.456, + "I": 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v map[string]interface{} + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderMapNestedStructs(t *testing.T) { + expected := map[string]interface{}{ + "Name": "Jan", + "Nested": map[string]interface{}{ + "Foo": 999, + "Nested": map[string]interface{}{ + "Foo": true, + }, + }, + "A": true, + "D": 123.456, + "I": 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v map[string]interface{} + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderMapNestedStructPtrNil(t *testing.T) { + expected := map[string]interface{}{ + "Name": "Jan", + "Nested": nil, + "A": true, + "D": 123.456, + "I": 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v map[string]interface{} + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderMapNestedByteSlice(t *testing.T) { + expected := map[string]interface{}{ + "Name": "Jan", + "Nested": []byte{1, 2, 3, 4, 5, 6}, + "A": true, + "D": 123.456, + "I": 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v map[string]interface{} + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderMapNestedIntSlice(t *testing.T) { + expected := map[string]interface{}{ + "Name": "Jan", + "Nested": []interface{}{1, 2, 3, 4, 5}, + "A": true, + "D": 123.456, + "I": 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v map[string]interface{} + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderMapNestedStringSlice(t *testing.T) { + expected := map[string]interface{}{ + "Name": "Jan", + "Nested": []interface{}{"Aap", "Noot"}, + "A": true, + "D": 123.456, + "I": 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v map[string]interface{} + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderMapNestedStringSliceEmpty(t *testing.T) { + expected := map[string]interface{}{ + "Name": "Jan", + "Nested": []interface{}{}, + "A": true, + "D": 123.456, + "I": 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v map[string]interface{} + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderMapInt8Keys(t *testing.T) { + expected := map[int8]interface{}{ + 0: "Jan", + 1: []interface{}{"foo", "monkey"}, + 7: true, + 11: 123.456, + 23: 789, + -45: false, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v map[int8]interface{} + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderMapUInt16Keys(t *testing.T) { + expected := map[uint16]interface{}{ + 0: "Jan", + 1: []interface{}{"foo", "monkey"}, + 7: true, + 11: 123.456, + 23: 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v map[uint16]interface{} + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/decoder_object_test.go b/deps/github.com/arangodb/go-velocypack/test/decoder_object_test.go new file mode 100644 index 000000000..a2f2bb4da --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/decoder_object_test.go @@ -0,0 +1,742 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestDecoderObjectEmpty(t *testing.T) { + expected := struct{}{} + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v struct{} + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderObjectEmptyInvalidDestination(t *testing.T) { + b := velocypack.Builder{} + must(b.OpenObject()) + must(b.Close()) + s := mustSlice(b.Slice()) + + var v int64 + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsUnmarshalType, t)(velocypack.Unmarshal(s, &v)) +} + +func TestDecoderObjectOneField(t *testing.T) { + expected := struct { + Name string + }{ + Name: "Max", + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v struct { + Name string + } + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderObjectMultipleFields(t *testing.T) { + expected := struct { + Name string + A bool + D float64 + I int + }{ + Name: "Max", + A: true, + D: 123.456, + I: 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v struct { + Name string + A bool + D float64 + I int + } + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderObjectTagRename(t *testing.T) { + expected := struct { + Name string `json:"name"` + A bool `json:"field9"` + D float64 `json:"field7"` + I int + }{ + Name: "Max", + A: true, + D: 123.456, + I: 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v struct { + Name string `json:"name"` + A bool `json:"field9"` + D float64 `json:"field7"` + I int + } + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderObjectTagOmitEmptyFull(t *testing.T) { + expected := struct { + Name string `json:"name,omitempty"` + A bool `json:"field9,omitempty"` + D float64 `json:"field7,omitempty"` + I int `json:"field8,omitempty"` + }{ + Name: "Jan", + A: true, + D: 123.456, + I: 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v struct { + Name string `json:"name,omitempty"` + A bool `json:"field9,omitempty"` + D float64 `json:"field7,omitempty"` + I int `json:"field8,omitempty"` + } + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderObjectTagOmitEmptyEmpty(t *testing.T) { + expected := struct { + Name string `json:"name,omitempty"` + A bool `json:"field9,omitempty"` + D float64 `json:"field7,omitempty"` + I int `json:"field8,omitempty"` + }{ + Name: "", + A: false, + D: 0.0, + I: 0, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v struct { + Name string `json:"name,omitempty"` + A bool `json:"field9,omitempty"` + D float64 `json:"field7,omitempty"` + I int `json:"field8,omitempty"` + } + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderObjectTagOmitFields(t *testing.T) { + input := struct { + Name string `json:"name,omitempty"` + A bool `json:"field9,omitempty"` + D float64 `json:"-"` + I int `json:"-,"` + }{ + Name: "Jan", + A: true, + D: 123.456, + I: 789, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := input + expected.D = 0.0 + + var v struct { + Name string `json:"name,omitempty"` + A bool `json:"field9,omitempty"` + D float64 `json:"-"` + I int `json:"-,"` + } + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderObjectNestedStruct(t *testing.T) { + expected := struct { + Name string + Nested struct { + Foo int + } + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: struct{ Foo int }{999}, + A: true, + D: 123.456, + I: 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v struct { + Name string + Nested struct { + Foo int + } + A bool + D float64 + I int + } + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderObjectNestedStructs(t *testing.T) { + expected := struct { + Name string + Nested struct { + Foo int + Nested struct { + Foo bool + } + } + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: struct { + Foo int + Nested struct{ Foo bool } + }{999, struct{ Foo bool }{true}}, + A: true, + D: 123.456, + I: 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v struct { + Name string + Nested struct { + Foo int + Nested struct { + Foo bool + } + } + A bool + D float64 + I int + } + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderObjectNestedStructPtr(t *testing.T) { + expected := struct { + Name string + Nested *struct { + Foo int + } + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: &struct{ Foo int }{999}, + A: true, + D: 123.456, + I: 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v struct { + Name string + Nested *struct { + Foo int + } + A bool + D float64 + I int + } + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderObjectNestedStructPtrNil(t *testing.T) { + expected := struct { + Name string + Nested *struct { + Foo int + } + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: nil, + A: true, + D: 123.456, + I: 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v struct { + Name string + Nested *struct { + Foo int + } + A bool + D float64 + I int + } + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderObjectNestedStructPtrNilOmitEmpty(t *testing.T) { + expected := struct { + Name string + Nested *struct { + Foo int + } `json:",omitempty"` + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: nil, + A: true, + D: 123.456, + I: 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v struct { + Name string + Nested *struct { + Foo int + } `json:",omitempty"` + A bool + D float64 + I int + } + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderObjectNestedByteSlice(t *testing.T) { + expected := struct { + Name string + Nested []byte + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: []byte{1, 2, 3, 4, 5}, + A: true, + D: 123.456, + I: 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v struct { + Name string + Nested []byte + A bool + D float64 + I int + } + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderObjectNestedIntSlice(t *testing.T) { + expected := struct { + Name string + Nested []int + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: []int{1, 2, 3, 4, 5}, + A: true, + D: 123.456, + I: 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v struct { + Name string + Nested []int + A bool + D float64 + I int + } + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderObjectNestedStringSlice(t *testing.T) { + expected := struct { + Name string + Nested []string + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: []string{"Aap", "Noot"}, + A: true, + D: 123.456, + I: 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v struct { + Name string + Nested []string + A bool + D float64 + I int + } + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderObjectNestedStringSliceEmpty(t *testing.T) { + expected := struct { + Name string + Nested []string + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: []string{}, + A: true, + D: 123.456, + I: 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v struct { + Name string + Nested []string + A bool + D float64 + I int + } + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderObjectNestedStringSliceNil(t *testing.T) { + expected := struct { + Name string + Nested []string + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: nil, + A: true, + D: 123.456, + I: 789, + } + bytes, err := velocypack.Marshal(expected) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + var v struct { + Name string + Nested []string + A bool + D float64 + I int + } + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +/* +type Struct1 struct { + Field1 int + field2 int // Not exposed, must not be exported +} +*/ + +func TestDecoderObjectStruct1(t *testing.T) { + input := Struct1{ + Field1: 1, + field2: 2, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := input + expected.field2 = 0 + + var v Struct1 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +/* +type Struct2 struct { + Field1 bool + Struct1 // Anonymous struct +} +*/ +func TestDecoderObjectStruct2(t *testing.T) { + input := Struct2{ + Field1: true, + Struct1: Struct1{ + Field1: 101, + field2: 102, + }, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := input + expected.Struct1.Field1 = 0 + expected.Struct1.field2 = 0 + + var v Struct2 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +/* +type Struct3 struct { + Struct1 // Anonymous struct + Field1 bool +} +*/ +func TestDecoderObjectStruct3(t *testing.T) { + input := Struct3{ + Struct1: Struct1{ + Field1: 101, + field2: 102, + }, + Field1: true, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := input + expected.Struct1.Field1 = 0 + expected.Struct1.field2 = 0 + + var v Struct3 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +/* +type Struct4 struct { + Field4 bool `json:"a"` + Struct5 +} + +type Struct5 struct { + Field5 int `json:"a"` +} +*/ +func TestDecoderObjectStruct4(t *testing.T) { + input := Struct4{ + Field4: true, + Struct5: Struct5{ + Field5: 5, + }, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := input + expected.Struct5.Field5 = 0 + + var v Struct4 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +/* +type Struct6 struct { + Field4 bool `json:"a6"` + Struct5 +} +*/ +func TestDecoderObjectStruct6(t *testing.T) { + input := Struct6{ + Field4: true, + Struct5: Struct5{ + Field5: 5, + }, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := input + + var v Struct6 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +func TestDecoderObjectStructPtr6(t *testing.T) { + input := &Struct6{ + Field4: true, + Struct5: Struct5{ + Field5: 5, + }, + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := *input + + var v Struct6 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +/* +type Struct7 struct { + B bool `json:"b,string"` + I int `json:"i,string"` + U uint `json:"u,string"` + F float64 `json:"f,string"` + S string `json:"s,string"` +} +*/ + +func TestDecoderObjectStruct7(t *testing.T) { + input := Struct7{ + B: true, + I: -77, + U: 211, + F: 3.2, + S: "Hello world", + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := input + + var v Struct7 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} + +/* +type Struct8 struct { + B bool `json:",string"` + I int `json:",string"` + U uint `json:",string"` + F float64 `json:",string"` + S string `json:",string"` +} +*/ + +func TestDecoderObjectStruct8(t *testing.T) { + input := Struct8{ + B: true, + I: -77, + U: 211, + F: 3.2, + S: "Hello world", + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + expected := input + + var v Struct8 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, expected, t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/decoder_primitive_test.go b/deps/github.com/arangodb/go-velocypack/test/decoder_primitive_test.go new file mode 100644 index 000000000..b7e19cb15 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/decoder_primitive_test.go @@ -0,0 +1,273 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "math" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestDecoderPrimitiveAddNull(t *testing.T) { + var v interface{} + err := velocypack.Unmarshal(velocypack.NullSlice(), &v) + ASSERT_NIL(err, t) + ASSERT_NIL(v, t) +} + +func TestDecoderPrimitiveAddBool(t *testing.T) { + var v bool + err := velocypack.Unmarshal(velocypack.TrueSlice(), &v) + ASSERT_NIL(err, t) + ASSERT_TRUE(v, t) + + err = velocypack.Unmarshal(velocypack.FalseSlice(), &v) + ASSERT_NIL(err, t) + ASSERT_FALSE(v, t) +} + +func TestDecoderPrimitiveAddDoubleFloat32(t *testing.T) { + tests := []float32{10.4, -6, 0.0, -999999999, 24643783456252.4545345, math.MaxFloat32, -math.MaxFloat32} + for _, test := range tests { + b := velocypack.Builder{} + b.AddValue(velocypack.NewDoubleValue(float64(test))) + s, err := b.Slice() + ASSERT_NIL(err, t) + + var v float32 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_DOUBLE_EQ(float64(v), float64(test), t) + } +} + +func TestDecoderPrimitiveAddDoubleFloat64(t *testing.T) { + tests := []float64{10.4, -6, 0.0, -999999999, 24643783456252.4545345, math.MaxFloat64, -math.MaxFloat64} + for _, test := range tests { + b := velocypack.Builder{} + b.AddValue(velocypack.NewDoubleValue(test)) + s, err := b.Slice() + ASSERT_NIL(err, t) + + var v float64 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_DOUBLE_EQ(v, test, t) + } +} + +func TestDecoderPrimitiveAddInt(t *testing.T) { + tests := []int{10, -7, -34, 344366, math.MaxInt32, 233224, math.MinInt32} + for _, test := range tests { + b := velocypack.Builder{} + b.AddValue(velocypack.NewIntValue(int64(test))) + s, err := b.Slice() + ASSERT_NIL(err, t) + + var v int + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, test, t) + } +} + +func TestDecoderPrimitiveAddInt8(t *testing.T) { + tests := []int8{10, -7, -34, math.MinInt8, math.MaxInt8} + for _, test := range tests { + b := velocypack.Builder{} + b.AddValue(velocypack.NewIntValue(int64(test))) + s, err := b.Slice() + ASSERT_NIL(err, t) + + var v int8 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, test, t) + } +} + +func TestDecoderPrimitiveAddInt16(t *testing.T) { + tests := []int16{10, -7, -34, math.MinInt16, math.MaxInt16} + for _, test := range tests { + b := velocypack.Builder{} + b.AddValue(velocypack.NewIntValue(int64(test))) + s, err := b.Slice() + ASSERT_NIL(err, t) + + var v int16 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, test, t) + } +} + +func TestDecoderPrimitiveAddInt32(t *testing.T) { + tests := []int32{10, -7, -34, math.MinInt32, math.MaxInt32} + for _, test := range tests { + b := velocypack.Builder{} + b.AddValue(velocypack.NewIntValue(int64(test))) + s, err := b.Slice() + ASSERT_NIL(err, t) + + var v int32 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, test, t) + } +} + +func TestDecoderPrimitiveAddInt64(t *testing.T) { + tests := []int64{10, -7, -34, math.MinInt64, math.MaxInt64} + for _, test := range tests { + b := velocypack.Builder{} + b.AddValue(velocypack.NewIntValue(test)) + s, err := b.Slice() + ASSERT_NIL(err, t) + + var v int64 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, test, t) + } +} + +func TestDecoderPrimitiveAddUInt(t *testing.T) { + tests := []uint{10, 34, math.MaxUint32} + for _, test := range tests { + b := velocypack.Builder{} + b.AddValue(velocypack.NewUIntValue(uint64(test))) + s, err := b.Slice() + ASSERT_NIL(err, t) + + var v uint + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, test, t) + } +} + +func TestDecoderPrimitiveAddUInt8(t *testing.T) { + tests := []uint8{10, 34, math.MaxUint8} + for _, test := range tests { + b := velocypack.Builder{} + b.AddValue(velocypack.NewUIntValue(uint64(test))) + s, err := b.Slice() + ASSERT_NIL(err, t) + + var v uint8 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, test, t) + } +} + +func TestDecoderPrimitiveAddUInt16(t *testing.T) { + tests := []uint16{10, 34, math.MaxUint16} + for _, test := range tests { + b := velocypack.Builder{} + b.AddValue(velocypack.NewUIntValue(uint64(test))) + s, err := b.Slice() + ASSERT_NIL(err, t) + + var v uint16 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, test, t) + } +} + +func TestDecoderPrimitiveAddUInt32(t *testing.T) { + tests := []uint32{10, 34, 56345344, math.MaxUint32} + for _, test := range tests { + b := velocypack.Builder{} + b.AddValue(velocypack.NewUIntValue(uint64(test))) + s, err := b.Slice() + ASSERT_NIL(err, t) + + var v uint32 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, test, t) + } +} + +func TestDecoderPrimitiveAddUInt64(t *testing.T) { + tests := []uint64{10, 34, 636346346345342355, 0, math.MaxUint64} + for _, test := range tests { + b := velocypack.Builder{} + b.AddValue(velocypack.NewUIntValue(test)) + s, err := b.Slice() + ASSERT_NIL(err, t) + + var v uint64 + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, test, t) + } +} + +func TestDecoderPrimitiveAddSmallInt(t *testing.T) { + tests := []int{-6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 9} + for _, test := range tests { + b := velocypack.Builder{} + b.AddValue(velocypack.NewIntValue(int64(test))) + s, err := b.Slice() + ASSERT_NIL(err, t) + + var v int + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, test, t) + } +} + +func TestDecoderPrimitiveAddString(t *testing.T) { + tests := []string{"", "foo", "你好,世界", "\t\n\x00", "Some space and stuff"} + for _, test := range tests { + b := velocypack.Builder{} + b.AddValue(velocypack.NewStringValue(test)) + s, err := b.Slice() + ASSERT_NIL(err, t) + + var v string + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, test, t) + } +} + +func TestDecoderPrimitiveAddBinary(t *testing.T) { + tests := [][]byte{[]byte{1, 2, 3}, []byte{}, []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 12, 13, 14, 15, 16, 17, 18, 19, 20}} + for _, test := range tests { + b := velocypack.Builder{} + b.AddValue(velocypack.NewBinaryValue(test)) + s, err := b.Slice() + ASSERT_NIL(err, t) + + var v []byte + err = velocypack.Unmarshal(s, &v) + ASSERT_NIL(err, t) + ASSERT_EQ(v, test, t) + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/decoder_reader_test.go b/deps/github.com/arangodb/go-velocypack/test/decoder_reader_test.go new file mode 100644 index 000000000..27b9f16d0 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/decoder_reader_test.go @@ -0,0 +1,89 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "bytes" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestDecoderReaderSmall(t *testing.T) { + s := append(velocypack.NullSlice(), velocypack.TrueSlice()...) + r := bytes.NewReader(s) + d := velocypack.NewDecoder(r) + + var v1 interface{} + must(d.Decode(&v1)) + + var v2 bool + must(d.Decode(&v2)) + + ASSERT_EQ(v1, nil, t) + ASSERT_EQ(v2, true, t) +} + +func TestDecoderReaderLarge(t *testing.T) { + testX := func(x int) string { + result := "" + for i := 0; i < x; i++ { + result = result + "-foo-" + } + return result + } + b := velocypack.Builder{} + for i := 0; i < 1000; i++ { + must(b.AddValue(velocypack.NewStringValue(testX(i)))) + } + s := mustSlice(b.Slice()) + r := bytes.NewReader(s) + d := velocypack.NewDecoder(r) + + for i := 0; i < 1000; i++ { + var v string + must(d.Decode(&v)) + ASSERT_EQ(v, testX(i), t) + } +} + +func TestDecoderReaderStruct1(t *testing.T) { + var s velocypack.Slice + for i := 0; i < 1000; i++ { + input := Struct1{ + Field1: i, + } + s = append(s, mustSlice(velocypack.Marshal(input))...) + } + r := bytes.NewReader(s) + d := velocypack.NewDecoder(r) + + for i := 0; i < 1000; i++ { + var v Struct1 + must(d.Decode(&v)) + expected := Struct1{ + Field1: i, + } + ASSERT_EQ(v, expected, t) + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/decoder_test.go b/deps/github.com/arangodb/go-velocypack/test/decoder_test.go new file mode 100644 index 000000000..6205e2553 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/decoder_test.go @@ -0,0 +1,38 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestDecoderUnmarshalNil(t *testing.T) { + b := velocypack.Builder{} + must(b.OpenArray()) + must(b.Close()) + s := mustSlice(b.Slice()) + + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidUnmarshal, t)(velocypack.Unmarshal(s, nil)) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/dumper_test.go b/deps/github.com/arangodb/go-velocypack/test/dumper_test.go new file mode 100644 index 000000000..edbd0ff97 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/dumper_test.go @@ -0,0 +1,308 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "bytes" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestDumperNull(t *testing.T) { + s := velocypack.Slice([]byte{0x18}) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, nil) + d.Append(s) + ASSERT_EQ("null", buf.String(), t) +} + +func TestDumperFalse(t *testing.T) { + s := velocypack.Slice([]byte{0x19}) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, nil) + d.Append(s) + ASSERT_EQ("false", buf.String(), t) +} + +func TestDumperTrue(t *testing.T) { + s := velocypack.Slice([]byte{0x1a}) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, nil) + d.Append(s) + ASSERT_EQ("true", buf.String(), t) +} + +func TestDumperStringSimple(t *testing.T) { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewStringValue("foobar"))) + + s := mustSlice(b.Slice()) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, nil) + d.Append(s) + ASSERT_EQ(`"foobar"`, buf.String(), t) +} + +func TestDumperStringSpecialChars(t *testing.T) { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewStringValue("\"fo\r \n \\to''\\ \\bar\""))) + + s := mustSlice(b.Slice()) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, nil) + d.Append(s) + ASSERT_EQ("\"\\\"fo\\r \\n \\\\to''\\\\ \\\\bar\\\"\"", buf.String(), t) +} + +func TestDumperStringControlChars(t *testing.T) { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewStringValue("\x00\x01\x02 baz \x03"))) + + s := mustSlice(b.Slice()) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, nil) + d.Append(s) + ASSERT_EQ("\"\\u0000\\u0001\\u0002 baz \\u0003\"", buf.String(), t) +} + +func TestDumperStringUTF8(t *testing.T) { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewStringValue("mötör"))) + + s := mustSlice(b.Slice()) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, nil) + d.Append(s) + ASSERT_EQ("\"mötör\"", buf.String(), t) +} + +func TestDumperStringUTF8Escaped(t *testing.T) { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewStringValue("mötör"))) + + s := mustSlice(b.Slice()) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, &velocypack.DumperOptions{EscapeUnicode: true}) + d.Append(s) + ASSERT_EQ("\"m\\u00F6t\\u00F6r\"", buf.String(), t) +} + +func TestDumperStringTwoByteUTF8(t *testing.T) { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewStringValue("\xc2\xa2"))) + + s := mustSlice(b.Slice()) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, nil) + d.Append(s) + ASSERT_EQ("\"\xc2\xa2\"", buf.String(), t) +} + +func TestDumperStringTwoByteUTF8Escaped(t *testing.T) { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewStringValue("\xc2\xa2"))) + + s := mustSlice(b.Slice()) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, &velocypack.DumperOptions{EscapeUnicode: true}) + d.Append(s) + ASSERT_EQ("\"\\u00A2\"", buf.String(), t) +} + +func TestDumperStringThreeByteUTF8(t *testing.T) { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewStringValue("\xe2\x82\xac"))) + + s := mustSlice(b.Slice()) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, nil) + d.Append(s) + ASSERT_EQ("\"\xe2\x82\xac\"", buf.String(), t) +} + +func TestDumperStringThreeByteUTF8Escaped(t *testing.T) { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewStringValue("\xe2\x82\xac"))) + + s := mustSlice(b.Slice()) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, &velocypack.DumperOptions{EscapeUnicode: true}) + d.Append(s) + ASSERT_EQ("\"\\u20AC\"", buf.String(), t) +} + +func TestDumperStringFourByteUTF8(t *testing.T) { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewStringValue("\xf0\xa4\xad\xa2"))) + + s := mustSlice(b.Slice()) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, nil) + d.Append(s) + ASSERT_EQ("\"\xf0\xa4\xad\xa2\"", buf.String(), t) +} + +func TestDumperStringFourByteUTF8Escaped(t *testing.T) { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewStringValue("\xf0\xa4\xad\xa2"))) + + s := mustSlice(b.Slice()) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, &velocypack.DumperOptions{EscapeUnicode: true}) + d.Append(s) + ASSERT_EQ("\"\\uD852\\uDF62\"", buf.String(), t) +} + +func TestDumperStringMultiBytes(t *testing.T) { + tests := []string{ + "Lorem ipsum dolor sit amet, te enim mandamus consequat ius, cu eos timeam bonorum, in nec eruditi tibique. At nec malorum saperet vivendo. Qui delectus moderatius in. Vivendo expetendis ullamcorper ut mei.", + "Мёнём пауло пытынтёюм ад ыам. Но эрож рыпудяары вим, пожтэа эюрйпйдяч ентырпрытаряш ад хёз. Мыа дектаж дёжкэрэ котёдиэквюэ ан. Ведят брутэ мэдиокретатым йн прё", + "Μει ει παρτεμ μολλις δελισατα, σιφιβυς σονσυλατυ ραθιονιβυς συ φις, φερι μυνερε μεα ετ. Ειρμωδ απεριρι δισενθιετ εα υσυ, κυο θωτα φευγαιθ δισενθιετ νο", + "供覧必責同界要努新少時購止上際英連動信。同売宗島載団報音改浅研壊趣全。並嗅整日放横旅関書文転方。天名他賞川日拠隊散境行尚島自模交最葉駒到", + "舞ばい支小ぜ館応ヌエマ得6備ルあ煮社義ゃフおづ報載通ナチセ東帯あスフず案務革た証急をのだ毎点十はぞじド。1芸キテ成新53験モワサセ断団ニカ働給相づらべさ境著ラさ映権護ミオヲ但半モ橋同タ価法ナカネ仙説時オコワ気社オ", + "أي جنوب بداية السبب بلا. تمهيد التكاليف العمليات إذ دول, عن كلّ أراضي اعتداء, بال الأوروبي الإقتصادية و. دخول تحرّكت بـ حين. أي شاسعة لليابان استطاعوا مكن. الأخذ الصينية والنرويج هو أخذ.", + "זכר דפים בדפים מה, צילום מדינות היא או, ארץ צרפתית העברית אירועים ב. שונה קולנוע מתן אם, את אחד הארץ ציור וכמקובל. ויש העיר שימושי מדויקים בה, היא ויקי ברוכים תאולוגיה או. את זכר קהילה חבריכם ליצירתה, ערכים התפתחות חפש גם.", + } + for _, test := range tests { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewStringValue(test))) + + s := mustSlice(b.Slice()) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, nil) + d.Append(s) + expected := "\"" + test + "\"" + ASSERT_EQ(expected, buf.String(), t) + } +} + +func TestDumperStringMultiBytesEscaped(t *testing.T) { + tests := map[string]string{ + "Мёнём пауло пытынтёюм ад ыам. Но эрож рыпудяары вим, пожтэа эюрйпйдяч ентырпрытаряш ад хёз. Мыа дектаж дёжкэрэ котёдиэквюэ ан. Ведят брутэ мэдиокретатым йн прё": "\\u041C\\u0451\\u043D\\u0451\\u043C \\u043F\\u0430\\u0443\\u043B\\u043E \\u043F\\u044B\\u0442\\u044B\\u043D\\u0442\\u0451\\u044E\\u043C \\u0430\\u0434 \\u044B\\u0430\\u043C. \\u041D\\u043E \\u044D\\u0440\\u043E\\u0436 \\u0440\\u044B\\u043F\\u0443\\u0434\\u044F\\u0430\\u0440\\u044B \\u0432\\u0438\\u043C, \\u043F\\u043E\\u0436\\u0442\\u044D\\u0430 \\u044D\\u044E\\u0440\\u0439\\u043F\\u0439\\u0434\\u044F\\u0447 \\u0435\\u043D\\u0442\\u044B\\u0440\\u043F\\u0440\\u044B\\u0442\\u0430\\u0440\\u044F\\u0448 \\u0430\\u0434 \\u0445\\u0451\\u0437. \\u041C\\u044B\\u0430 \\u0434\\u0435\\u043A\\u0442\\u0430\\u0436 \\u0434\\u0451\\u0436\\u043A\\u044D\\u0440\\u044D \\u043A\\u043E\\u0442\\u0451\\u0434\\u0438\\u044D\\u043A\\u0432\\u044E\\u044D \\u0430\\u043D. \\u0412\\u0435\\u0434\\u044F\\u0442 \\u0431\\u0440\\u0443\\u0442\\u044D \\u043C\\u044D\\u0434\\u0438\\u043E\\u043A\\u0440\\u0435\\u0442\\u0430\\u0442\\u044B\\u043C \\u0439\\u043D \\u043F\\u0440\\u0451", + "Μει ει παρτεμ μολλις δελισατα, σιφιβυς σονσυλατυ ραθιονιβυς συ φις, φερι μυνερε μεα ετ. Ειρμωδ απεριρι δισενθιετ εα υσυ, κυο θωτα φευγαιθ δισενθιετ νο": "\\u039C\\u03B5\\u03B9 \\u03B5\\u03B9 \\u03C0\\u03B1\\u03C1\\u03C4\\u03B5\\u03BC \\u03BC\\u03BF\\u03BB\\u03BB\\u03B9\\u03C2 \\u03B4\\u03B5\\u03BB\\u03B9\\u03C3\\u03B1\\u03C4\\u03B1, \\u03C3\\u03B9\\u03C6\\u03B9\\u03B2\\u03C5\\u03C2 \\u03C3\\u03BF\\u03BD\\u03C3\\u03C5\\u03BB\\u03B1\\u03C4\\u03C5 \\u03C1\\u03B1\\u03B8\\u03B9\\u03BF\\u03BD\\u03B9\\u03B2\\u03C5\\u03C2 \\u03C3\\u03C5 \\u03C6\\u03B9\\u03C2, \\u03C6\\u03B5\\u03C1\\u03B9 \\u03BC\\u03C5\\u03BD\\u03B5\\u03C1\\u03B5 \\u03BC\\u03B5\\u03B1 \\u03B5\\u03C4. \\u0395\\u03B9\\u03C1\\u03BC\\u03C9\\u03B4 \\u03B1\\u03C0\\u03B5\\u03C1\\u03B9\\u03C1\\u03B9 \\u03B4\\u03B9\\u03C3\\u03B5\\u03BD\\u03B8\\u03B9\\u03B5\\u03C4 \\u03B5\\u03B1 \\u03C5\\u03C3\\u03C5, \\u03BA\\u03C5\\u03BF \\u03B8\\u03C9\\u03C4\\u03B1 \\u03C6\\u03B5\\u03C5\\u03B3\\u03B1\\u03B9\\u03B8 \\u03B4\\u03B9\\u03C3\\u03B5\\u03BD\\u03B8\\u03B9\\u03B5\\u03C4 \\u03BD\\u03BF", + "供覧必責同界要努新少時購止上際英連動信。同売宗島載団報音改浅研壊趣全。並嗅整日放横旅関書文転方。天名他賞川日拠隊散境行尚島自模交最葉駒到": "\\u4F9B\\u89A7\\u5FC5\\u8CAC\\u540C\\u754C\\u8981\\u52AA\\u65B0\\u5C11\\u6642\\u8CFC\\u6B62\\u4E0A\\u969B\\u82F1\\u9023\\u52D5\\u4FE1\\u3002\\u540C\\u58F2\\u5B97\\u5CF6\\u8F09\\u56E3\\u5831\\u97F3\\u6539\\u6D45\\u7814\\u58CA\\u8DA3\\u5168\\u3002\\u4E26\\u55C5\\u6574\\u65E5\\u653E\\u6A2A\\u65C5\\u95A2\\u66F8\\u6587\\u8EE2\\u65B9\\u3002\\u5929\\u540D\\u4ED6\\u8CDE\\u5DDD\\u65E5\\u62E0\\u968A\\u6563\\u5883\\u884C\\u5C1A\\u5CF6\\u81EA\\u6A21\\u4EA4\\u6700\\u8449\\u99D2\\u5230", + "舞ばい支小ぜ館応ヌエマ得6備ルあ煮社義ゃフおづ報載通ナチセ東帯あスフず案務革た証急をのだ毎点十はぞじド。1芸キテ成新53験モワサセ断団ニカ働給相づらべさ境著ラさ映権護ミオヲ但半モ橋同タ価法ナカネ仙説時オコワ気社オ": "\\u821E\\u3070\\u3044\\u652F\\u5C0F\\u305C\\u9928\\u5FDC\\u30CC\\u30A8\\u30DE\\u5F976\\u5099\\u30EB\\u3042\\u716E\\u793E\\u7FA9\\u3083\\u30D5\\u304A\\u3065\\u5831\\u8F09\\u901A\\u30CA\\u30C1\\u30BB\\u6771\\u5E2F\\u3042\\u30B9\\u30D5\\u305A\\u6848\\u52D9\\u9769\\u305F\\u8A3C\\u6025\\u3092\\u306E\\u3060\\u6BCE\\u70B9\\u5341\\u306F\\u305E\\u3058\\u30C9\\u30021\\u82B8\\u30AD\\u30C6\\u6210\\u65B053\\u9A13\\u30E2\\u30EF\\u30B5\\u30BB\\u65AD\\u56E3\\u30CB\\u30AB\\u50CD\\u7D66\\u76F8\\u3065\\u3089\\u3079\\u3055\\u5883\\u8457\\u30E9\\u3055\\u6620\\u6A29\\u8B77\\u30DF\\u30AA\\u30F2\\u4F46\\u534A\\u30E2\\u6A4B\\u540C\\u30BF\\u4FA1\\u6CD5\\u30CA\\u30AB\\u30CD\\u4ED9\\u8AAC\\u6642\\u30AA\\u30B3\\u30EF\\u6C17\\u793E\\u30AA", + "أي جنوب بداية السبب بلا. تمهيد التكاليف العمليات إذ دول, عن كلّ أراضي اعتداء, بال الأوروبي الإقتصادية و. دخول تحرّكت بـ حين. أي شاسعة لليابان استطاعوا مكن. الأخذ الصينية والنرويج هو أخذ.": "\\u0623\\u064A \\u062C\\u0646\\u0648\\u0628 \\u0628\\u062F\\u0627\\u064A\\u0629 \\u0627\\u0644\\u0633\\u0628\\u0628 \\u0628\\u0644\\u0627. \\u062A\\u0645\\u0647\\u064A\\u062F \\u0627\\u0644\\u062A\\u0643\\u0627\\u0644\\u064A\\u0641 \\u0627\\u0644\\u0639\\u0645\\u0644\\u064A\\u0627\\u062A \\u0625\\u0630 \\u062F\\u0648\\u0644, \\u0639\\u0646 \\u0643\\u0644\\u0651 \\u0623\\u0631\\u0627\\u0636\\u064A \\u0627\\u0639\\u062A\\u062F\\u0627\\u0621, \\u0628\\u0627\\u0644 \\u0627\\u0644\\u0623\\u0648\\u0631\\u0648\\u0628\\u064A \\u0627\\u0644\\u0625\\u0642\\u062A\\u0635\\u0627\\u062F\\u064A\\u0629 \\u0648. \\u062F\\u062E\\u0648\\u0644 \\u062A\\u062D\\u0631\\u0651\\u0643\\u062A \\u0628\\u0640 \\u062D\\u064A\\u0646. \\u0623\\u064A \\u0634\\u0627\\u0633\\u0639\\u0629 \\u0644\\u0644\\u064A\\u0627\\u0628\\u0627\\u0646 \\u0627\\u0633\\u062A\\u0637\\u0627\\u0639\\u0648\\u0627 \\u0645\\u0643\\u0646. \\u0627\\u0644\\u0623\\u062E\\u0630 \\u0627\\u0644\\u0635\\u064A\\u0646\\u064A\\u0629 \\u0648\\u0627\\u0644\\u0646\\u0631\\u0648\\u064A\\u062C \\u0647\\u0648 \\u0623\\u062E\\u0630.", + "זכר דפים בדפים מה, צילום מדינות היא או, ארץ צרפתית העברית אירועים ב. שונה קולנוע מתן אם, את אחד הארץ ציור וכמקובל. ויש העיר שימושי מדויקים בה, היא ויקי ברוכים תאולוגיה או. את זכר קהילה חבריכם ליצירתה, ערכים התפתחות חפש גם.": "\\u05D6\\u05DB\\u05E8 \\u05D3\\u05E4\\u05D9\\u05DD \\u05D1\\u05D3\\u05E4\\u05D9\\u05DD \\u05DE\\u05D4, \\u05E6\\u05D9\\u05DC\\u05D5\\u05DD \\u05DE\\u05D3\\u05D9\\u05E0\\u05D5\\u05EA \\u05D4\\u05D9\\u05D0 \\u05D0\\u05D5, \\u05D0\\u05E8\\u05E5 \\u05E6\\u05E8\\u05E4\\u05EA\\u05D9\\u05EA \\u05D4\\u05E2\\u05D1\\u05E8\\u05D9\\u05EA \\u05D0\\u05D9\\u05E8\\u05D5\\u05E2\\u05D9\\u05DD \\u05D1. \\u05E9\\u05D5\\u05E0\\u05D4 \\u05E7\\u05D5\\u05DC\\u05E0\\u05D5\\u05E2 \\u05DE\\u05EA\\u05DF \\u05D0\\u05DD, \\u05D0\\u05EA \\u05D0\\u05D7\\u05D3 \\u05D4\\u05D0\\u05E8\\u05E5 \\u05E6\\u05D9\\u05D5\\u05E8 \\u05D5\\u05DB\\u05DE\\u05E7\\u05D5\\u05D1\\u05DC. \\u05D5\\u05D9\\u05E9 \\u05D4\\u05E2\\u05D9\\u05E8 \\u05E9\\u05D9\\u05DE\\u05D5\\u05E9\\u05D9 \\u05DE\\u05D3\\u05D5\\u05D9\\u05E7\\u05D9\\u05DD \\u05D1\\u05D4, \\u05D4\\u05D9\\u05D0 \\u05D5\\u05D9\\u05E7\\u05D9 \\u05D1\\u05E8\\u05D5\\u05DB\\u05D9\\u05DD \\u05EA\\u05D0\\u05D5\\u05DC\\u05D5\\u05D2\\u05D9\\u05D4 \\u05D0\\u05D5. \\u05D0\\u05EA \\u05D6\\u05DB\\u05E8 \\u05E7\\u05D4\\u05D9\\u05DC\\u05D4 \\u05D7\\u05D1\\u05E8\\u05D9\\u05DB\\u05DD \\u05DC\\u05D9\\u05E6\\u05D9\\u05E8\\u05EA\\u05D4, \\u05E2\\u05E8\\u05DB\\u05D9\\u05DD \\u05D4\\u05EA\\u05E4\\u05EA\\u05D7\\u05D5\\u05EA \\u05D7\\u05E4\\u05E9 \\u05D2\\u05DD.", + } + for test, expected := range tests { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewStringValue(test))) + + s := mustSlice(b.Slice()) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, &velocypack.DumperOptions{EscapeUnicode: true}) + d.Append(s) + expected = "\"" + expected + "\"" + ASSERT_EQ(expected, buf.String(), t) + } +} + +func TestDumperDouble(t *testing.T) { + tests := []struct { + Value float64 + Expected string + }{ + {0.0, "0"}, + {123456.67, "123456.67"}, + {-123456.67, "-123456.67"}, + {-0.000442, "-0.000442"}, + {0.1, "0.1"}, + {2.41e-109, "2.41e-109"}, + {-3.423e+78, "-3.423e+78"}, + {3.423e+123, "3.423e+123"}, + {3.4239493e+104, "3.4239493e+104"}, + } + for _, test := range tests { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewDoubleValue(test.Value))) + + s := mustSlice(b.Slice()) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, nil) + d.Append(s) + ASSERT_EQ(test.Expected, buf.String(), t) + } +} + +func TestDumperInt(t *testing.T) { + tests := []struct { + Value int64 + Expected string + }{ + {0, "0"}, + {123456789, "123456789"}, + {-123456789, "-123456789"}, + } + for _, test := range tests { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewIntValue(test.Value))) + + s := mustSlice(b.Slice()) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, nil) + d.Append(s) + ASSERT_EQ(test.Expected, buf.String(), t) + } +} + +func TestDumperUInt(t *testing.T) { + tests := []struct { + Value uint64 + Expected string + }{ + {0, "0"}, + {5, "5"}, + {123456789, "123456789"}, + } + for _, test := range tests { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewUIntValue(test.Value))) + + s := mustSlice(b.Slice()) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, nil) + d.Append(s) + ASSERT_EQ(test.Expected, buf.String(), t) + } +} + +func TestDumperBinary(t *testing.T) { + tests := []struct { + Value []byte + Expected string + }{ + {[]byte{1, 2, 3, 4}, "null"}, // Binary data is not supported by the Dumper + } + for _, test := range tests { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewBinaryValue(test.Value))) + + s := mustSlice(b.Slice()) + buf := &bytes.Buffer{} + d := velocypack.NewDumper(buf, nil) + d.Append(s) + ASSERT_EQ(test.Expected, buf.String(), t) + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/encoder_array_test.go b/deps/github.com/arangodb/go-velocypack/test/encoder_array_test.go new file mode 100644 index 000000000..258e7bd2b --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/encoder_array_test.go @@ -0,0 +1,147 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestEncoderArrayEmptyArray(t *testing.T) { + bytes, err := velocypack.Marshal([0]struct{}{}) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_TRUE(s.IsEmptyArray(), t) + ASSERT_EQ(`[]`, mustString(s.JSONString()), t) +} + +func TestEncoderArrayByteArray(t *testing.T) { + bytes, err := velocypack.Marshal([5]byte{1, 2, 3, 4, 5}) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Array, t) // Byte slices are converted to Binary, byte arrays not. + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(`[1,2,3,4,5]`, mustString(s.JSONString()), t) + ASSERT_EQ(velocypack.ValueLength(5), mustLength(s.Length()), t) +} + +func TestEncoderArrayBoolArray(t *testing.T) { + bytes, err := velocypack.Marshal([4]bool{true, false, false, true}) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(`[true,false,false,true]`, mustString(s.JSONString()), t) +} + +func TestEncoderArrayIntArray(t *testing.T) { + bytes, err := velocypack.Marshal([7]int{1, 2, 3, -4, 5, 6, 100000}) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(`[1,2,3,-4,5,6,100000]`, mustString(s.JSONString()), t) +} + +func TestEncoderArrayUIntArray(t *testing.T) { + bytes, err := velocypack.Marshal([7]uint{1, 2, 3, 4, 5, 6, 100000}) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(`[1,2,3,4,5,6,100000]`, mustString(s.JSONString()), t) +} + +func TestEncoderArrayFloat32Array(t *testing.T) { + bytes, err := velocypack.Marshal([4]float32{0.0, -1.5, 66, 45}) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(`[0,-1.5,66,45]`, mustString(s.JSONString()), t) +} + +func TestEncoderArrayFloat64Array(t *testing.T) { + bytes, err := velocypack.Marshal([4]float64{0.0, -1.5, 6.23, 45e+10}) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(`[0,-1.5,6.23,4.5e+11]`, mustString(s.JSONString()), t) +} + +func TestEncoderArrayStructArray(t *testing.T) { + bytes, err := velocypack.Marshal([3]Struct1{ + Struct1{Field1: 1, field2: 2}, + Struct1{Field1: 10, field2: 200}, + Struct1{Field1: 100, field2: 200}, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + t.Log(s.String()) + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(`[{"Field1":1},{"Field1":10},{"Field1":100}]`, mustString(s.JSONString()), t) +} + +func TestEncoderArrayStructPtrArray(t *testing.T) { + bytes, err := velocypack.Marshal([5]*Struct1{ + &Struct1{Field1: 1, field2: 2}, + nil, + &Struct1{Field1: 10, field2: 200}, + &Struct1{Field1: 100, field2: 200}, + nil, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + t.Log(s.String()) + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(`[{"Field1":1},null,{"Field1":10},{"Field1":100},null]`, mustString(s.JSONString()), t) +} + +func TestEncoderArrayNestedArray(t *testing.T) { + bytes, err := velocypack.Marshal([3][]Struct1{ + []Struct1{Struct1{Field1: 1, field2: 2}, Struct1{Field1: 3, field2: 4}}, + []Struct1{Struct1{Field1: 10, field2: 200}}, + []Struct1{Struct1{Field1: 100, field2: 200}}, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + t.Log(s.String()) + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(`[[{"Field1":1},{"Field1":3}],[{"Field1":10}],[{"Field1":100}]]`, mustString(s.JSONString()), t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/encoder_custom_test.go b/deps/github.com/arangodb/go-velocypack/test/encoder_custom_test.go new file mode 100644 index 000000000..135142b6b --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/encoder_custom_test.go @@ -0,0 +1,268 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "encoding/json" + "fmt" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +type CustomStruct1 struct { + Field1 int +} + +func (cs *CustomStruct1) MarshalVPack() (velocypack.Slice, error) { + var b velocypack.Builder + if err := b.AddValue(velocypack.NewStringValue("Hello world")); err != nil { + return nil, err + } + return b.Slice() +} + +func TestEncoderCustomStruct1(t *testing.T) { + bytes, err := velocypack.Marshal(&CustomStruct1{ + Field1: 999, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.String, t) + ASSERT_EQ(`"Hello world"`, mustString(s.JSONString()), t) +} + +type CustomStruct1Object struct { + Field1 int +} + +func (cs *CustomStruct1Object) MarshalVPack() (velocypack.Slice, error) { + var b velocypack.Builder + if err := b.OpenObject(); err != nil { + return nil, err + } + if err := b.AddKeyValue("foo", velocypack.NewStringValue("Hello world")); err != nil { + return nil, err + } + if err := b.Close(); err != nil { + return nil, err + } + return b.Slice() +} + +func TestEncoderCustomCustomStruct1Object(t *testing.T) { + bytes, err := velocypack.Marshal(&CustomStruct1Object{ + Field1: 999, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(s.Length()), t) + + ss := mustSlice(s.Get("foo")) + ASSERT_EQ(ss.Type(), velocypack.String, t) + ASSERT_EQ(`"Hello world"`, mustString(ss.JSONString()), t) +} + +type CustomStruct1Array struct { + Field1 int +} + +func (cs *CustomStruct1Array) MarshalVPack() (velocypack.Slice, error) { + var b velocypack.Builder + if err := b.OpenArray(); err != nil { + return nil, err + } + if err := b.AddValue(velocypack.NewStringValue("Hello world Array")); err != nil { + return nil, err + } + if err := b.Close(); err != nil { + return nil, err + } + return b.Slice() +} + +func TestEncoderCustomCustomStruct1Array(t *testing.T) { + bytes, err := velocypack.Marshal(&CustomStruct1Array{ + Field1: 999, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(s.Length()), t) + + ss := mustSlice(s.At(0)) + ASSERT_EQ(ss.Type(), velocypack.String, t) + ASSERT_EQ(`"Hello world Array"`, mustString(ss.JSONString()), t) +} + +type CustomStruct2 struct { + Field CustomStruct1 +} + +func TestEncoderCustomStruct2(t *testing.T) { + bytes, err := velocypack.Marshal(CustomStruct2{ + Field: CustomStruct1{ + Field1: 999222, + }, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_EQ(`{"Field":{"Field1":999222}}`, mustString(s.JSONString()), t) +} + +type CustomStruct3 struct { + Field *CustomStruct1 +} + +func TestEncoderCustomStruct3(t *testing.T) { + bytes, err := velocypack.Marshal(CustomStruct3{ + Field: &CustomStruct1{ + Field1: 999222, + }, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_EQ(`{"Field":"Hello world"}`, mustString(s.JSONString()), t) +} + +type CustomText1 struct { + I int +} + +func (ct CustomText1) MarshalText() ([]byte, error) { + key := fmt.Sprintf("key%d", ct.I) + return []byte(key), nil +} + +func TestEncoderCustomText1(t *testing.T) { + bytes, err := velocypack.Marshal(map[CustomText1]bool{ + CustomText1{7}: true, + CustomText1{2}: false, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_EQ(`{"key2":false,"key7":true}`, mustString(s.JSONString()), t) +} + +type CustomJSONStruct1 struct { + Field1 int +} + +func (cs *CustomJSONStruct1) MarshalJSON() ([]byte, error) { + return json.Marshal("Hello JSON") +} + +func TestEncoderCustomJSONStruct1(t *testing.T) { + bytes, err := velocypack.Marshal(&CustomJSONStruct1{ + Field1: 999, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.String, t) + ASSERT_EQ(`"Hello JSON"`, mustString(s.JSONString()), t) +} + +type CustomJSONStruct1Object struct { + Field1 int +} + +func (cs *CustomJSONStruct1Object) MarshalJSON() ([]byte, error) { + return []byte(`{"foo":"Hello JSON Object"}`), nil +} + +func TestEncoderCustomJSONStruct1Object(t *testing.T) { + bytes, err := velocypack.Marshal(&CustomJSONStruct1Object{ + Field1: 999, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(s.Length()), t) + + ss := mustSlice(s.Get("foo")) + ASSERT_EQ(ss.Type(), velocypack.String, t) + ASSERT_EQ(`"Hello JSON Object"`, mustString(ss.JSONString()), t) +} + +type CustomJSONStruct1Array struct { + Field1 int +} + +func (cs *CustomJSONStruct1Array) MarshalJSON() ([]byte, error) { + return []byte(`["Hello JSON Array"]`), nil +} + +func TestEncoderCustomJSONStruct1Array(t *testing.T) { + bytes, err := velocypack.Marshal(&CustomJSONStruct1Array{ + Field1: 999, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(s.Length()), t) + + ss := mustSlice(s.At(0)) + ASSERT_EQ(ss.Type(), velocypack.String, t) + ASSERT_EQ(`"Hello JSON Array"`, mustString(ss.JSONString()), t) +} + +type CustomJSONVPACKStruct1 struct { + Field1 int +} + +func (cs *CustomJSONVPACKStruct1) MarshalVPack() (velocypack.Slice, error) { + var b velocypack.Builder + if err := b.AddValue(velocypack.NewStringValue("Hello VPACK, goodbye JSON")); err != nil { + return nil, err + } + return b.Slice() +} + +func (cs *CustomJSONVPACKStruct1) MarshalJSON() ([]byte, error) { + return json.Marshal("Hello JSON, goodbye VPACK") +} + +func TestEncoderCustomJSONVPACKStruct1(t *testing.T) { + // MarshalVPack is preferred over MarshalJSON + bytes, err := velocypack.Marshal(&CustomJSONVPACKStruct1{ + Field1: 999, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.String, t) + ASSERT_EQ(`"Hello VPACK, goodbye JSON"`, mustString(s.JSONString()), t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/encoder_map_test.go b/deps/github.com/arangodb/go-velocypack/test/encoder_map_test.go new file mode 100644 index 000000000..d56173ff0 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/encoder_map_test.go @@ -0,0 +1,264 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestEncoderMapEmpty(t *testing.T) { + bytes, err := velocypack.Marshal(map[string]interface{}{}) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_TRUE(s.IsEmptyObject(), t) +} + +func TestEncoderMapOneField(t *testing.T) { + bytes, err := velocypack.Marshal(map[string]string{ + "Name": "Max", + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"Name":"Max"}`, mustString(s.JSONString()), t) +} + +func TestEncoderMapMultipleFields(t *testing.T) { + bytes, err := velocypack.Marshal(map[string]interface{}{ + "Name": "Max", + "A": true, + "D": 123.456, + "I": 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Max"}`, mustString(s.JSONString()), t) +} + +func TestEncoderMapMultipleFieldsEmpty(t *testing.T) { + bytes, err := velocypack.Marshal(map[string]interface{}{ + "Name": "", + "A": false, + "D": 0.0, + "I": 0, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":false,"D":0,"I":0,"Name":""}`, mustString(s.JSONString()), t) +} + +func TestEncoderMapNestedStruct(t *testing.T) { + bytes, err := velocypack.Marshal(map[string]interface{}{ + "Name": "Jan", + "Nested": map[string]interface{}{ + "Foo": 999, + }, + "A": true, + "D": 123.456, + "I": 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Jan","Nested":{"Foo":999}}`, mustString(s.JSONString()), t) +} + +func TestEncoderMapNestedStructs(t *testing.T) { + bytes, err := velocypack.Marshal(map[string]interface{}{ + "Name": "Jan", + "Nested": map[string]interface{}{ + "Foo": 999, + "Nested": map[string]bool{ + "Foo": true, + }, + }, + "A": true, + "D": 123.456, + "I": 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Jan","Nested":{"Foo":999,"Nested":{"Foo":true}}}`, mustString(s.JSONString()), t) +} + +func TestEncoderMapNestedStructPtr(t *testing.T) { + bytes, err := velocypack.Marshal(map[string]interface{}{ + "Name": "Jan", + "Nested": &struct { + Foo int + }{ + Foo: 999, + }, + "A": true, + "D": 123.456, + "I": 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Jan","Nested":{"Foo":999}}`, mustString(s.JSONString()), t) +} + +func TestEncoderMapNestedStructPtrNil(t *testing.T) { + bytes, err := velocypack.Marshal(map[string]interface{}{ + "Name": "Jan", + "Nested": nil, + "A": true, + "D": 123.456, + "I": 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Jan","Nested":null}`, mustString(s.JSONString()), t) +} + +func TestEncoderMapNestedByteSlice(t *testing.T) { + bytes, err := velocypack.Marshal(map[string]interface{}{ + "Name": "Jan", + "Nested": []byte{1, 2, 3, 4, 5, 6}, + "A": true, + "D": 123.456, + "I": 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Jan","Nested":"(non-representable type Binary)"}`, mustString(s.JSONString(velocypack.DumperOptions{UnsupportedTypeBehavior: velocypack.ConvertUnsupportedType})), t) +} + +func TestEncoderMapNestedIntSlice(t *testing.T) { + bytes, err := velocypack.Marshal(map[string]interface{}{ + "Name": "Jan", + "Nested": []int{1, 2, 3, 4, 5}, + "A": true, + "D": 123.456, + "I": 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Jan","Nested":[1,2,3,4,5]}`, mustString(s.JSONString()), t) +} + +func TestEncoderMapNestedStringSlice(t *testing.T) { + bytes, err := velocypack.Marshal(map[string]interface{}{ + "Name": "Jan", + "Nested": []string{"Aap", "Noot"}, + "A": true, + "D": 123.456, + "I": 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Jan","Nested":["Aap","Noot"]}`, mustString(s.JSONString()), t) +} + +func TestEncoderMapNestedStringSliceEmpty(t *testing.T) { + bytes, err := velocypack.Marshal(map[string]interface{}{ + "Name": "Jan", + "Nested": []string{}, + "A": true, + "D": 123.456, + "I": 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Jan","Nested":[]}`, mustString(s.JSONString()), t) +} + +func TestEncoderMapRandomIterator(t *testing.T) { + bytesA, err := velocypack.Marshal(map[string]interface{}{ + "_key": "1246", + "_rev": "_U4_BZxm---", + }) + ASSERT_NIL(err, t) + bytesB, err := velocypack.Marshal(map[string]interface{}{ + "age": 34, + }) + ASSERT_NIL(err, t) + s := mustSlice(velocypack.Merge(bytesA, bytesB)) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + + genOutput := func(s velocypack.Slice) string { + output := "{" + it, err := velocypack.NewObjectIterator(s, true) // Allow random iteration + ASSERT_NIL(err, t) + for it.IsValid() { + if !it.IsFirst() { + output = output + "," + } + k, err := it.Key(true) + ASSERT_NIL(err, t) + output += mustString(k.JSONString()) + output += ":" + v, err := it.Value() + ASSERT_NIL(err, t) + output += mustString(v.JSONString()) + err = it.Next() + ASSERT_NIL(err, t) + } + return output + "}" + } + + outputA := genOutput(bytesA) + outputB := genOutput(bytesB) + output := genOutput(s) + + ASSERT_EQ(`{"_key":"1246","_rev":"_U4_BZxm---"}`, outputA, t) + ASSERT_EQ(`{"age":34}`, outputB, t) + ASSERT_EQ(`{"_key":"1246","_rev":"_U4_BZxm---","age":34}`, output, t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/encoder_object_test.go b/deps/github.com/arangodb/go-velocypack/test/encoder_object_test.go new file mode 100644 index 000000000..5909d4fbd --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/encoder_object_test.go @@ -0,0 +1,566 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "encoding/json" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestEncoderObjectEmpty(t *testing.T) { + bytes, err := velocypack.Marshal(struct{}{}) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_TRUE(s.IsEmptyObject(), t) +} + +func TestEncoderObjectOneField(t *testing.T) { + bytes, err := velocypack.Marshal(struct { + Name string + }{ + Name: "Max", + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"Name":"Max"}`, mustString(s.JSONString()), t) +} + +func TestEncoderObjectMultipleFields(t *testing.T) { + bytes, err := velocypack.Marshal(struct { + Name string + A bool + D float64 + I int + }{ + Name: "Max", + A: true, + D: 123.456, + I: 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Max"}`, mustString(s.JSONString()), t) +} + +func TestEncoderObjectTagRename(t *testing.T) { + bytes, err := velocypack.Marshal(struct { + Name string `json:"name"` + A bool `json:"field9"` + D float64 `json:"field7"` + I int + }{ + Name: "Max", + A: true, + D: 123.456, + I: 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"I":789,"field7":123.456,"field9":true,"name":"Max"}`, mustString(s.JSONString()), t) +} + +func TestEncoderObjectTagOmitEmptyFull(t *testing.T) { + bytes, err := velocypack.Marshal(struct { + Name string `json:"name,omitempty"` + A bool `json:"field9,omitempty"` + D float64 `json:"field7,omitempty"` + I int `json:"field8,omitempty"` + }{ + Name: "Jan", + A: true, + D: 123.456, + I: 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"field7":123.456,"field8":789,"field9":true,"name":"Jan"}`, mustString(s.JSONString()), t) +} + +func TestEncoderObjectTagOmitEmptyEmpty(t *testing.T) { + bytes, err := velocypack.Marshal(struct { + Name string `json:"name,omitempty"` + A bool `json:"field9,omitempty"` + D float64 `json:"field7,omitempty"` + I int `json:"field8,omitempty"` + }{ + Name: "", + A: false, + D: 0.0, + I: 0, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_TRUE(s.IsEmptyObject(), t) + ASSERT_EQ(`{}`, mustString(s.JSONString()), t) +} + +func TestEncoderObjectTagOmitFields(t *testing.T) { + bytes, err := velocypack.Marshal(struct { + Name string `json:"name,omitempty"` + A bool `json:"field9,omitempty"` + D float64 `json:"-"` + I int `json:"-,"` + }{ + Name: "Jan", + A: true, + D: 123.456, + I: 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"-":789,"field9":true,"name":"Jan"}`, mustString(s.JSONString()), t) +} + +func TestEncoderObjectNestedStruct(t *testing.T) { + bytes, err := velocypack.Marshal(struct { + Name string + Nested struct { + Foo int + } + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: struct{ Foo int }{999}, + A: true, + D: 123.456, + I: 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Jan","Nested":{"Foo":999}}`, mustString(s.JSONString()), t) +} + +func TestEncoderObjectNestedStructs(t *testing.T) { + bytes, err := velocypack.Marshal(struct { + Name string + Nested struct { + Foo int + Nested struct { + Foo bool + } + } + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: struct { + Foo int + Nested struct{ Foo bool } + }{999, struct{ Foo bool }{true}}, + A: true, + D: 123.456, + I: 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Jan","Nested":{"Foo":999,"Nested":{"Foo":true}}}`, mustString(s.JSONString()), t) +} + +func TestEncoderObjectNestedStructPtr(t *testing.T) { + bytes, err := velocypack.Marshal(struct { + Name string + Nested *struct { + Foo int + } + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: &struct{ Foo int }{999}, + A: true, + D: 123.456, + I: 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Jan","Nested":{"Foo":999}}`, mustString(s.JSONString()), t) +} + +func TestEncoderObjectNestedStructPtrNil(t *testing.T) { + bytes, err := velocypack.Marshal(struct { + Name string + Nested *struct { + Foo int + } + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: nil, + A: true, + D: 123.456, + I: 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Jan","Nested":null}`, mustString(s.JSONString()), t) +} + +func TestEncoderObjectNestedStructPtrNilOmitEmpty(t *testing.T) { + bytes, err := velocypack.Marshal(struct { + Name string + Nested *struct { + Foo int + } `json:",omitempty"` + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: nil, + A: true, + D: 123.456, + I: 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Jan"}`, mustString(s.JSONString()), t) +} + +func TestEncoderObjectNestedByteSlice(t *testing.T) { + bytes, err := velocypack.Marshal(struct { + Name string + Nested []byte + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: []byte{1, 2, 3, 4, 5}, + A: true, + D: 123.456, + I: 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Jan","Nested":"(non-representable type Binary)"}`, mustString(s.JSONString(velocypack.DumperOptions{UnsupportedTypeBehavior: velocypack.ConvertUnsupportedType})), t) +} + +func TestEncoderObjectNestedIntSlice(t *testing.T) { + bytes, err := velocypack.Marshal(struct { + Name string + Nested []int + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: []int{1, 2, 3, 4, 5}, + A: true, + D: 123.456, + I: 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Jan","Nested":[1,2,3,4,5]}`, mustString(s.JSONString()), t) +} + +func TestEncoderObjectNestedStringSlice(t *testing.T) { + bytes, err := velocypack.Marshal(struct { + Name string + Nested []string + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: []string{"Aap", "Noot"}, + A: true, + D: 123.456, + I: 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Jan","Nested":["Aap","Noot"]}`, mustString(s.JSONString()), t) +} + +func TestEncoderObjectNestedStringSliceEmpty(t *testing.T) { + bytes, err := velocypack.Marshal(struct { + Name string + Nested []string + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: []string{}, + A: true, + D: 123.456, + I: 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Jan","Nested":[]}`, mustString(s.JSONString()), t) +} + +func TestEncoderObjectNestedStringSliceNil(t *testing.T) { + bytes, err := velocypack.Marshal(struct { + Name string + Nested []string + A bool + D float64 + I int + }{ + Name: "Jan", + Nested: nil, + A: true, + D: 123.456, + I: 789, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"A":true,"D":123.456,"I":789,"Name":"Jan","Nested":null}`, mustString(s.JSONString()), t) +} + +type Struct1 struct { + Field1 int + field2 int // Not exposed, must not be exported +} + +func TestEncoderObjectStruct1(t *testing.T) { + bytes, err := velocypack.Marshal(Struct1{ + Field1: 1, + field2: 2, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"Field1":1}`, mustString(s.JSONString()), t) +} + +type Struct2 struct { + Field1 bool + Struct1 // Anonymous struct +} + +func TestEncoderObjectStruct2(t *testing.T) { + bytes, err := velocypack.Marshal(Struct2{ + Field1: true, + Struct1: Struct1{ + Field1: 101, + field2: 102, + }, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"Field1":true}`, mustString(s.JSONString()), t) +} + +type Struct3 struct { + Struct1 // Anonymous struct + Field1 bool +} + +func TestEncoderObjectStruct3(t *testing.T) { + bytes, err := velocypack.Marshal(Struct3{ + Struct1: Struct1{ + Field1: 101, + field2: 102, + }, + Field1: true, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"Field1":true}`, mustString(s.JSONString()), t) +} + +type Struct4 struct { + Field4 bool `json:"a"` + Struct5 +} + +type Struct5 struct { + Field5 int `json:"a"` +} + +func TestEncoderObjectStruct4(t *testing.T) { + bytes, err := velocypack.Marshal(Struct4{ + Field4: true, + Struct5: Struct5{ + Field5: 5, + }, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"a":true}`, mustString(s.JSONString()), t) +} + +type Struct6 struct { + Field4 bool `json:"a6"` + Struct5 +} + +func TestEncoderObjectStruct6(t *testing.T) { + bytes, err := velocypack.Marshal(Struct6{ + Field4: true, + Struct5: Struct5{ + Field5: 5, + }, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"a":5,"a6":true}`, mustString(s.JSONString()), t) +} + +func TestEncoderObjectStructPtr6(t *testing.T) { + bytes, err := velocypack.Marshal(&Struct6{ + Field4: true, + Struct5: Struct5{ + Field5: 5, + }, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"a":5,"a6":true}`, mustString(s.JSONString()), t) +} + +type Struct7 struct { + B bool `json:"b,string"` + I int `json:"i,string"` + U uint `json:"u,string"` + F float64 `json:"f,string"` + S string `json:"s,string"` +} + +func TestEncoderObjectStruct7(t *testing.T) { + input := Struct7{ + B: true, + I: -77, + U: 211, + F: 3.2, + S: "Hello world", + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"b":"true","f":"3.2","i":"-77","s":"\"Hello world\"","u":"211"}`, mustString(s.JSONString()), t) + + goJSON, err := json.Marshal(input) + ASSERT_NIL(err, t) + ASSERT_EQ(`{"b":"true","i":"-77","u":"211","f":"3.2","s":"\"Hello world\""}`, string(goJSON), t) +} + +type Struct8 struct { + B bool `json:",string"` + I int `json:",string"` + U uint `json:",string"` + F float64 `json:",string"` + S string `json:",string"` +} + +func TestEncoderObjectStruct8(t *testing.T) { + input := Struct8{ + B: true, + I: -77, + U: 211, + F: 3.2, + S: "Hello world", + } + bytes, err := velocypack.Marshal(input) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Object, t) + ASSERT_FALSE(s.IsEmptyObject(), t) + ASSERT_EQ(`{"B":"true","F":"3.2","I":"-77","S":"\"Hello world\"","U":"211"}`, mustString(s.JSONString()), t) + + goJSON, err := json.Marshal(input) + ASSERT_NIL(err, t) + ASSERT_EQ(`{"B":"true","I":"-77","U":"211","F":"3.2","S":"\"Hello world\""}`, string(goJSON), t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/encoder_primitive_test.go b/deps/github.com/arangodb/go-velocypack/test/encoder_primitive_test.go new file mode 100644 index 000000000..949efe29d --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/encoder_primitive_test.go @@ -0,0 +1,241 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "math" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestEncoderPrimitiveAddNull(t *testing.T) { + bytes, err := velocypack.Marshal(nil) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + ASSERT_EQ(s.Type(), velocypack.Null, t) + ASSERT_TRUE(s.IsNull(), t) +} + +func TestEncoderPrimitiveAddBool(t *testing.T) { + tests := []bool{true, false} + for _, test := range tests { + bytes, err := velocypack.Marshal(test) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_TRUE(s.IsBool(), t) + if test { + ASSERT_TRUE(s.IsTrue(), t) + ASSERT_FALSE(s.IsFalse(), t) + } else { + ASSERT_FALSE(s.IsTrue(), t) + ASSERT_TRUE(s.IsFalse(), t) + } + } +} + +func TestEncoderPrimitiveAddDoubleFloat32(t *testing.T) { + tests := []float32{10.4, -6, 0.0, -999999999, 24643783456252.4545345, math.MaxFloat32, -math.MaxFloat32} + for _, test := range tests { + bytes, err := velocypack.Marshal(test) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_TRUE(s.IsDouble(), t) + ASSERT_DOUBLE_EQ(float64(test), mustDouble(s.GetDouble()), t) + } +} + +func TestEncoderPrimitiveAddDoubleFloat64(t *testing.T) { + tests := []float64{10.4, -6, 0.0, -999999999, 24643783456252.4545345, math.MaxFloat64, -math.MaxFloat64} + for _, test := range tests { + bytes, err := velocypack.Marshal(test) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_TRUE(s.IsDouble(), t) + ASSERT_DOUBLE_EQ(test, mustDouble(s.GetDouble()), t) + } +} + +func TestEncoderPrimitiveAddInt(t *testing.T) { + tests := []int{10, -7, -34, 344366, math.MaxInt32, 233224, math.MinInt32} + for _, test := range tests { + bytes, err := velocypack.Marshal(test) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_TRUE(s.IsInt(), t) + ASSERT_EQ(int64(test), mustInt(s.GetInt()), t) + } +} + +func TestEncoderPrimitiveAddInt8(t *testing.T) { + tests := []int8{10, -7, -34, math.MinInt8, math.MaxInt8} + for _, test := range tests { + bytes, err := velocypack.Marshal(test) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_TRUE(s.IsInt(), t) + ASSERT_EQ(int64(test), mustInt(s.GetInt()), t) + } +} + +func TestEncoderPrimitiveAddInt16(t *testing.T) { + tests := []int16{10, -7, -34, math.MinInt16, math.MaxInt16} + for _, test := range tests { + bytes, err := velocypack.Marshal(test) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_TRUE(s.IsInt(), t) + ASSERT_EQ(int64(test), mustInt(s.GetInt()), t) + } +} + +func TestEncoderPrimitiveAddInt32(t *testing.T) { + tests := []int32{10, -7, -34, math.MinInt32, math.MaxInt32} + for _, test := range tests { + bytes, err := velocypack.Marshal(test) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_TRUE(s.IsInt(), t) + ASSERT_EQ(int64(test), mustInt(s.GetInt()), t) + } +} + +func TestEncoderPrimitiveAddInt64(t *testing.T) { + tests := []int64{10, -7, -34, math.MinInt64, math.MaxInt64} + for _, test := range tests { + bytes, err := velocypack.Marshal(test) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_TRUE(s.IsInt(), t) + ASSERT_EQ(int64(test), mustInt(s.GetInt()), t) + } +} + +func TestEncoderPrimitiveAddUInt(t *testing.T) { + tests := []uint{10, 34, math.MaxUint32} + for _, test := range tests { + bytes, err := velocypack.Marshal(test) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_TRUE(s.IsUInt(), t) + ASSERT_EQ(uint64(test), mustUInt(s.GetUInt()), t) + } +} + +func TestEncoderPrimitiveAddUInt8(t *testing.T) { + tests := []uint8{10, 34, math.MaxUint8} + for _, test := range tests { + bytes, err := velocypack.Marshal(test) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_TRUE(s.IsUInt(), t) + ASSERT_EQ(uint64(test), mustUInt(s.GetUInt()), t) + } +} + +func TestEncoderPrimitiveAddUInt16(t *testing.T) { + tests := []uint16{10, 34, math.MaxUint16} + for _, test := range tests { + bytes, err := velocypack.Marshal(test) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_TRUE(s.IsUInt(), t) + ASSERT_EQ(uint64(test), mustUInt(s.GetUInt()), t) + } +} + +func TestEncoderPrimitiveAddUInt32(t *testing.T) { + tests := []uint32{10, 34, 56345344, math.MaxUint32} + for _, test := range tests { + bytes, err := velocypack.Marshal(test) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_TRUE(s.IsUInt(), t) + ASSERT_EQ(uint64(test), mustUInt(s.GetUInt()), t) + } +} + +func TestEncoderPrimitiveAddUInt64(t *testing.T) { + tests := []uint64{10, 34, 636346346345342355, 0, math.MaxUint64} + for _, test := range tests { + bytes, err := velocypack.Marshal(test) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + if test == 0 { + ASSERT_TRUE(s.IsSmallInt(), t) + } else { + ASSERT_TRUE(s.IsUInt(), t) + } + ASSERT_EQ(uint64(test), mustUInt(s.GetUInt()), t) + } +} + +func TestEncoderPrimitiveAddSmallInt(t *testing.T) { + tests := []int{-6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 9} + for _, test := range tests { + bytes, err := velocypack.Marshal(test) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_TRUE(s.IsSmallInt(), t) + ASSERT_EQ(int64(test), mustInt(s.GetInt()), t) + } +} + +func TestEncoderPrimitiveAddString(t *testing.T) { + tests := []string{"", "foo", "你好,世界", "\t\n\x00", "Some space and stuff"} + for _, test := range tests { + bytes, err := velocypack.Marshal(test) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_TRUE(s.IsString(), t) + ASSERT_EQ(test, mustString(s.GetString()), t) + } +} + +func TestEncoderPrimitiveAddBinary(t *testing.T) { + tests := [][]byte{[]byte{1, 2, 3}, []byte{}, []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 12, 13, 14, 15, 16, 17, 18, 19, 20}} + for _, test := range tests { + bytes, err := velocypack.Marshal(test) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Binary, t) + ASSERT_TRUE(s.IsBinary(), t) + ASSERT_EQ(test, mustBytes(s.GetBinary()), t) + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/encoder_slice_test.go b/deps/github.com/arangodb/go-velocypack/test/encoder_slice_test.go new file mode 100644 index 000000000..dfcd2397b --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/encoder_slice_test.go @@ -0,0 +1,147 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestEncoderArrayEmptySlice(t *testing.T) { + bytes, err := velocypack.Marshal([]struct{}{}) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_TRUE(s.IsEmptyArray(), t) + ASSERT_EQ(`[]`, mustString(s.JSONString()), t) +} + +func TestEncoderArrayByteSlice(t *testing.T) { + bytes, err := velocypack.Marshal([]byte{1, 2, 3, 4, 5}) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Binary, t) + ASSERT_TRUE(s.IsBinary(), t) + ASSERT_EQ(`null`, mustString(s.JSONString()), t) // Dumper does not support Binary data + ASSERT_EQ(velocypack.ValueLength(5), mustLength(s.GetBinaryLength()), t) +} + +func TestEncoderArrayBoolSlice(t *testing.T) { + bytes, err := velocypack.Marshal([]bool{true, false, false, true}) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(`[true,false,false,true]`, mustString(s.JSONString()), t) +} + +func TestEncoderArrayIntSlice(t *testing.T) { + bytes, err := velocypack.Marshal([]int{1, 2, 3, -4, 5, 6, 100000}) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(`[1,2,3,-4,5,6,100000]`, mustString(s.JSONString()), t) +} + +func TestEncoderArrayUIntSlice(t *testing.T) { + bytes, err := velocypack.Marshal([]uint{1, 2, 3, 4, 5, 6, 100000}) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(`[1,2,3,4,5,6,100000]`, mustString(s.JSONString()), t) +} + +func TestEncoderArrayFloat32Slice(t *testing.T) { + bytes, err := velocypack.Marshal([]float32{0.0, -1.5, 66, 45}) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(`[0,-1.5,66,45]`, mustString(s.JSONString()), t) +} + +func TestEncoderArrayFloat64Slice(t *testing.T) { + bytes, err := velocypack.Marshal([]float64{0.0, -1.5, 6.23, 45e+10}) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(`[0,-1.5,6.23,4.5e+11]`, mustString(s.JSONString()), t) +} + +func TestEncoderArrayStructSlice(t *testing.T) { + bytes, err := velocypack.Marshal([]Struct1{ + Struct1{Field1: 1, field2: 2}, + Struct1{Field1: 10, field2: 200}, + Struct1{Field1: 100, field2: 200}, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + t.Log(s.String()) + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(`[{"Field1":1},{"Field1":10},{"Field1":100}]`, mustString(s.JSONString()), t) +} + +func TestEncoderArrayStructPtrSlice(t *testing.T) { + bytes, err := velocypack.Marshal([]*Struct1{ + &Struct1{Field1: 1, field2: 2}, + nil, + &Struct1{Field1: 10, field2: 200}, + &Struct1{Field1: 100, field2: 200}, + nil, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + t.Log(s.String()) + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(`[{"Field1":1},null,{"Field1":10},{"Field1":100},null]`, mustString(s.JSONString()), t) +} + +func TestEncoderArrayNestedSlice(t *testing.T) { + bytes, err := velocypack.Marshal([][]Struct1{ + []Struct1{Struct1{Field1: 1, field2: 2}, Struct1{Field1: 3, field2: 4}}, + []Struct1{Struct1{Field1: 10, field2: 200}}, + []Struct1{Struct1{Field1: 100, field2: 200}}, + }) + ASSERT_NIL(err, t) + s := velocypack.Slice(bytes) + + t.Log(s.String()) + ASSERT_EQ(s.Type(), velocypack.Array, t) + ASSERT_TRUE(s.IsArray(), t) + ASSERT_EQ(`[[{"Field1":1},{"Field1":3}],[{"Field1":10}],[{"Field1":100}]]`, mustString(s.JSONString()), t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/encoder_writer_test.go b/deps/github.com/arangodb/go-velocypack/test/encoder_writer_test.go new file mode 100644 index 000000000..7970f5689 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/encoder_writer_test.go @@ -0,0 +1,95 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "bytes" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestEncoderWriterSmall(t *testing.T) { + var buf bytes.Buffer + e := velocypack.NewEncoder(&buf) + + must(e.Encode(nil)) + must(e.Encode(true)) + + r := bytes.NewReader(buf.Bytes()) + d := velocypack.NewDecoder(r) + + var v1 interface{} + must(d.Decode(&v1)) + + var v2 bool + must(d.Decode(&v2)) + + ASSERT_EQ(v1, nil, t) + ASSERT_EQ(v2, true, t) +} + +func TestEncoderWriterLarge(t *testing.T) { + testX := func(x int) string { + result := "" + for i := 0; i < x; i++ { + result = result + "-foo-" + } + return result + } + var buf bytes.Buffer + e := velocypack.NewEncoder(&buf) + for i := 0; i < 1000; i++ { + must(e.Encode(testX(i))) + } + r := bytes.NewReader(buf.Bytes()) + d := velocypack.NewDecoder(r) + + for i := 0; i < 1000; i++ { + var v string + must(d.Decode(&v)) + ASSERT_EQ(v, testX(i), t) + } +} + +func TestEncoderWriterStruct1(t *testing.T) { + var buf bytes.Buffer + e := velocypack.NewEncoder(&buf) + for i := 0; i < 1000; i++ { + input := Struct1{ + Field1: i, + } + must(e.Encode(input)) + } + r := bytes.NewReader(buf.Bytes()) + d := velocypack.NewDecoder(r) + + for i := 0; i < 1000; i++ { + var v Struct1 + must(d.Decode(&v)) + expected := Struct1{ + Field1: i, + } + ASSERT_EQ(v, expected, t) + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/must.go b/deps/github.com/arangodb/go-velocypack/test/must.go new file mode 100644 index 000000000..e66b429fb --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/must.go @@ -0,0 +1,119 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "time" + + velocypack "github.com/arangodb/go-velocypack" +) + +func must(err error) { + if err != nil { + panic(err) + } +} + +func mustArrayIterator(v *velocypack.ArrayIterator, err error) *velocypack.ArrayIterator { + if err != nil { + panic(err) + } + return v +} + +func mustBool(v bool, err error) bool { + if err != nil { + panic(err) + } + return v +} + +func mustBytes(v []byte, err error) []byte { + if err != nil { + panic(err) + } + return v +} + +func mustDouble(v float64, err error) float64 { + if err != nil { + panic(err) + } + return v +} + +func mustInt(v int64, err error) int64 { + if err != nil { + panic(err) + } + return v +} + +func mustGoInt(v int, err error) int { + if err != nil { + panic(err) + } + return v +} + +func mustLength(v velocypack.ValueLength, err error) velocypack.ValueLength { + if err != nil { + panic(err) + } + return v +} + +func mustObjectIterator(v *velocypack.ObjectIterator, err error) *velocypack.ObjectIterator { + if err != nil { + panic(err) + } + return v +} + +func mustSlice(v velocypack.Slice, err error) velocypack.Slice { + if err != nil { + panic(err) + } + return v +} + +func mustString(v string, err error) string { + if err != nil { + panic(err) + } + return v +} + +func mustTime(v time.Time, err error) time.Time { + if err != nil { + panic(err) + } + return v +} + +func mustUInt(v uint64, err error) uint64 { + if err != nil { + panic(err) + } + return v +} diff --git a/deps/github.com/arangodb/go-velocypack/test/object_iterator_test.go b/deps/github.com/arangodb/go-velocypack/test/object_iterator_test.go new file mode 100644 index 000000000..a799f8087 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/object_iterator_test.go @@ -0,0 +1,82 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "fmt" + "sort" + "strings" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestObjectIteratorInvalidSlice(t *testing.T) { + tests := []velocypack.Slice{ + velocypack.NullSlice(), + velocypack.TrueSlice(), + velocypack.FalseSlice(), + mustSlice(velocypack.ParseJSONFromString("1")), + mustSlice(velocypack.ParseJSONFromString("7.7")), + mustSlice(velocypack.ParseJSONFromString("\"foo\"")), + mustSlice(velocypack.ParseJSONFromString("[]")), + mustSlice(velocypack.ParseJSONFromString("[]", velocypack.ParserOptions{BuildUnindexedArrays: true})), + } + for _, test := range tests { + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(velocypack.NewObjectIterator(test)) + } +} + +func TestObjectIteratorValues(t *testing.T) { + tests := []map[string]string{ + map[string]string{}, + map[string]string{"foo": "1"}, + } + for _, unindexed := range []bool{true, false} { + for _, test := range tests { + var keyValuePairs []string + for k, v := range test { + keyValuePairs = append(keyValuePairs, fmt.Sprintf(`"%s":%s`, k, v)) + } + json := "{" + strings.Join(keyValuePairs, ",") + "}" + sort.Strings(keyValuePairs) + s := mustSlice(velocypack.ParseJSONFromString(json, velocypack.ParserOptions{BuildUnindexedObjects: unindexed})) + it, err := velocypack.NewObjectIterator(s) + if err != nil { + t.Errorf("Failed to create ObjectIterator for '%s': %v", json, err) + } else { + i := 0 + for it.IsValid() { + k := mustSlice(it.Key(true)) + v := mustSlice(it.Value()) + kv := fmt.Sprintf(`"%s":%s`, mustString(k.GetString()), mustString(v.JSONString())) + if kv != keyValuePairs[i] { + t.Errorf("Element %d is invalid; got '%s', expected '%s'", i, kv, keyValuePairs[i]) + } + must(it.Next()) + i++ + } + } + } + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/parser_array_test.go b/deps/github.com/arangodb/go-velocypack/test/parser_array_test.go new file mode 100644 index 000000000..19a5573bb --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/parser_array_test.go @@ -0,0 +1,45 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestParserArray(t *testing.T) { + tests := map[string][]interface{}{ + `[]`: []interface{}{}, + `[1,2,3]`: []interface{}{1, 2, 3}, + `[1,[2,"foo"],3]`: []interface{}{1, []interface{}{2, "foo"}, 3}, + `[1,[[],[]],[[3]]]`: []interface{}{1, []interface{}{[]interface{}{}, []interface{}{}}, []interface{}{[]interface{}{3}}}, + } + for test, expected := range tests { + slice := mustSlice(velocypack.ParseJSONFromString(test)) + + var v interface{} + must(velocypack.Unmarshal(slice, &v)) + ASSERT_EQ(v, expected, t) + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/parser_invalid_test.go b/deps/github.com/arangodb/go-velocypack/test/parser_invalid_test.go new file mode 100644 index 000000000..80561a862 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/parser_invalid_test.go @@ -0,0 +1,48 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestParserGarbage(t *testing.T) { + tests := map[string]func(error) bool{ + `foo`: velocypack.IsParse, + `'quoted "foo"'`: velocypack.IsParse, + `x`: velocypack.IsParse, + `!`: velocypack.IsParse, + `/`: velocypack.IsParse, + `-`: velocypack.IsParse, + `--11`: velocypack.IsParse, + `[[}`: velocypack.IsParse, + `5.6.7`: velocypack.IsParse, + `[`: velocypack.IsBuilderNotClosed, + `{`: velocypack.IsBuilderNotClosed, + } + for test, errFunc := range tests { + ASSERT_VELOCYPACK_EXCEPTION(errFunc, t)(velocypack.ParseJSONFromString(test)) + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/parser_object_test.go b/deps/github.com/arangodb/go-velocypack/test/parser_object_test.go new file mode 100644 index 000000000..3884b3b95 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/parser_object_test.go @@ -0,0 +1,50 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestParserObject(t *testing.T) { + tests := map[string]map[string]interface{}{ + `{}`: map[string]interface{}{}, + `{"a":5}`: map[string]interface{}{"a": 5}, + `{"b":true}`: map[string]interface{}{"b": true}, + `{"c":false}`: map[string]interface{}{"c": false}, + `{"d":null}`: map[string]interface{}{"d": nil}, + `{"e":"foo"}`: map[string]interface{}{"e": "foo"}, + `{"f":"foo","g":123}`: map[string]interface{}{"f": "foo", "g": uint64(123)}, + `{"h":"foo","i":{}}`: map[string]interface{}{"h": "foo", "i": map[string]interface{}{}}, + `{"j":{"k":false, "l":[]}}`: map[string]interface{}{"j": map[string]interface{}{"k": false, "l": []interface{}{}}}, + } + for test, expected := range tests { + slice := mustSlice(velocypack.ParseJSONFromString(test)) + + var v interface{} + must(velocypack.Unmarshal(slice, &v)) + ASSERT_EQ(v, expected, t) + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/parser_primitive_test.go b/deps/github.com/arangodb/go-velocypack/test/parser_primitive_test.go new file mode 100644 index 000000000..d6ab2fb08 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/parser_primitive_test.go @@ -0,0 +1,115 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "encoding/json" + "math" + "strconv" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestParserNull(t *testing.T) { + s := mustSlice(velocypack.ParseJSONFromString("null")) + + ASSERT_TRUE(s.IsNull(), t) +} + +func TestParserWhitespace(t *testing.T) { + s := mustSlice(velocypack.ParseJSONFromString(" ")) + + ASSERT_TRUE(s.IsNone(), t) +} + +func TestParserFalse(t *testing.T) { + s := mustSlice(velocypack.ParseJSONFromString("false")) + + ASSERT_TRUE(s.IsBool(), t) + ASSERT_EQ(false, mustBool(s.GetBool()), t) +} + +func TestParserTrue(t *testing.T) { + s := mustSlice(velocypack.ParseJSONFromString("true")) + + ASSERT_TRUE(s.IsBool(), t) + ASSERT_EQ(true, mustBool(s.GetBool()), t) +} + +func TestParserSmallInt(t *testing.T) { + tests := []int{-6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + for _, test := range tests { + s := mustSlice(velocypack.ParseJSONFromString(strconv.Itoa(test))) + + ASSERT_EQ(velocypack.SmallInt, s.Type(), t) + ASSERT_EQ(int64(test), mustInt(s.GetInt()), t) + } +} + +func TestParserInt(t *testing.T) { + tests := []int{-7, -10, -23, -456, math.MinInt32} + for _, test := range tests { + s := mustSlice(velocypack.ParseJSONFromString(strconv.Itoa(test))) + + ASSERT_EQ(velocypack.Int, s.Type(), t) + ASSERT_EQ(int64(test), mustInt(s.GetInt()), t) + } +} + +func TestParserUInt(t *testing.T) { + tests := []int{10, 23, 456, math.MaxInt32} + for _, test := range tests { + s := mustSlice(velocypack.ParseJSONFromString(strconv.Itoa(test))) + + ASSERT_EQ(velocypack.UInt, s.Type(), t) + ASSERT_EQ(uint64(test), mustUInt(s.GetUInt()), t) + } +} + +func TestParserDouble(t *testing.T) { + tests := []float64{10.77, 23.88, 456.01, 10e45, -9223372036854775809 /*MinInt64-1*/, 18446744073709551616 /*MaxUint64+1*/} + jsons := []string{"10.77", "23.88", "456.01", "10e45", "-9223372036854775809", "18446744073709551616"} + for i, test := range tests { + s := mustSlice(velocypack.ParseJSONFromString(jsons[i])) + + ASSERT_EQ(velocypack.Double, s.Type(), t) + ASSERT_DOUBLE_EQ(test, mustDouble(s.GetDouble()), t) + } +} + +func TestParserString(t *testing.T) { + tests := []string{ + `foo`, + `'quoted "foo"'`, + ``, + } + for _, test := range tests { + j, err := json.Marshal(test) + ASSERT_NIL(err, t) + s := mustSlice(velocypack.ParseJSONFromString(string(j))) + + ASSERT_EQ(velocypack.String, s.Type(), t) + ASSERT_EQ(test, mustString(s.GetString()), t) + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/runtime/runtime_test.go b/deps/github.com/arangodb/go-velocypack/test/runtime/runtime_test.go new file mode 100644 index 000000000..d92da30a1 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/runtime/runtime_test.go @@ -0,0 +1,45 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package runtime + +import ( + "encoding/binary" + "testing" + "unsafe" +) + +const intSize int = int(unsafe.Sizeof(0)) + +func detectByteOrder() binary.ByteOrder { + i := int(0x1) + bs := (*[intSize]byte)(unsafe.Pointer(&i)) + if bs[0] == 0 { + return binary.BigEndian + } + return binary.LittleEndian +} + +func TestShowRuntime(t *testing.T) { + t.Logf("Sizeof(int): %d", unsafe.Sizeof(int(0))) + t.Logf("Byte order: %s", detectByteOrder()) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/slice_array_test.go b/deps/github.com/arangodb/go-velocypack/test/slice_array_test.go new file mode 100644 index 000000000..8236a4b27 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/slice_array_test.go @@ -0,0 +1,323 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "math" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestSliceArrayEmpty(t *testing.T) { + slice := velocypack.Slice{0x01} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_TRUE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(0), mustLength(slice.Length()), t) + + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsIndexOutOfBounds, t)(slice.At(0)) +} + +func TestSliceArrayCases1(t *testing.T) { + slice := velocypack.Slice{0x02, 0x05, 0x31, 0x32, 0x33} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_FALSE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.At(0)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) + + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsIndexOutOfBounds, t)(slice.At(4)) +} + +func TestSliceArrayCases2(t *testing.T) { + slice := velocypack.Slice{0x02, 0x06, 0x00, 0x31, 0x32, 0x33} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_FALSE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.At(0)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceArrayCases3(t *testing.T) { + slice := velocypack.Slice{0x02, 0x08, 0x00, 0x00, 0x00, 0x31, 0x32, 0x33} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_FALSE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.At(0)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceArrayCases4(t *testing.T) { + slice := velocypack.Slice{0x02, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x32, 0x33} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_FALSE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.At(0)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceArrayCases5(t *testing.T) { + slice := velocypack.Slice{0x03, 0x06, 0x00, 0x31, 0x32, 0x33} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_FALSE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.At(0)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceArrayCases6(t *testing.T) { + slice := velocypack.Slice{0x03, 0x08, 0x00, 0x00, 0x00, 0x31, 0x32, 0x33} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_FALSE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.At(0)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceArrayCases7(t *testing.T) { + slice := velocypack.Slice{0x03, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x32, 0x33} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_FALSE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.At(0)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceArrayCases8(t *testing.T) { + slice := velocypack.Slice{0x04, 0x08, 0x00, 0x00, 0x00, 0x31, 0x32, 0x33} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_FALSE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.At(0)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceArrayCases9(t *testing.T) { + slice := velocypack.Slice{0x04, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x32, 0x33} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_FALSE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.At(0)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceArrayCases10(t *testing.T) { + slice := velocypack.Slice{0x05, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x32, 0x33} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_FALSE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.At(0)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceArrayCases11(t *testing.T) { + slice := velocypack.Slice{0x06, 0x09, 0x03, 0x31, 0x32, 0x33, 0x03, 0x04, 0x05} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_FALSE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.At(0)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceArrayCases12(t *testing.T) { + slice := velocypack.Slice{0x06, 0x0b, 0x03, 0x00, 0x00, 0x31, 0x32, 0x33, 0x05, 0x06, 0x07} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_FALSE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.At(0)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceArrayCases13(t *testing.T) { + slice := velocypack.Slice{0x06, 0x0f, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x32, 0x33, 0x09, 0x0a, 0x0b} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_FALSE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.At(0)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceArrayCases14(t *testing.T) { + slice := velocypack.Slice{0x07, 0x0e, 0x00, 0x03, 0x00, 0x31, 0x32, 0x33, 0x05, 0x00, 0x06, 0x00, 0x07, 0x00} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_FALSE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.At(0)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceArrayCases15(t *testing.T) { + slice := velocypack.Slice{0x07, 0x12, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x31, 0x32, 0x33, 0x09, 0x00, 0x0a, 0x00, 0x0b, 0x00} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_FALSE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.At(0)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceArrayCases16(t *testing.T) { + slice := velocypack.Slice{0x08, 0x18, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x00, 0x31, 0x32, 0x33, 0x09, 0x00, 0x00, 0x00, + 0x0a, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_FALSE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.At(0)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceArrayCases17(t *testing.T) { + slice := velocypack.Slice{0x09, 0x2c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x31, 0x32, 0x33, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_FALSE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.At(0)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceArrayCasesCompact(t *testing.T) { + slice := velocypack.Slice{0x13, 0x08, 0x30, 0x31, 0x32, 0x33, 0x34, 0x05} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Array, slice.Type(), t) + ASSERT_TRUE(slice.IsArray(), t) + ASSERT_FALSE(slice.IsEmptyArray(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(5), mustLength(slice.Length()), t) + ss := mustSlice(slice.At(0)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(0), mustInt(ss.GetInt()), t) + + ss = mustSlice(slice.At(1)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) + + ss = mustSlice(slice.At(4)) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(4), mustInt(ss.GetInt()), t) + + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsIndexOutOfBounds, t)(slice.At(5)) +} + +func TestSliceArrayAtInvalidType(t *testing.T) { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewUIntValue(math.MaxUint64))) + slice := mustSlice(b.Slice()) + + ASSERT_EQ(velocypack.UInt, slice.Type(), t) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(slice.At(0)) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/slice_binary_test.go b/deps/github.com/arangodb/go-velocypack/test/slice_binary_test.go new file mode 100644 index 000000000..cbcb8d91e --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/slice_binary_test.go @@ -0,0 +1,67 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestSliceBinaryEmpty(t *testing.T) { + slice := velocypack.Slice{0xc0, 0x00} + assertEqualFromReader(t, slice) + + ASSERT_TRUE(slice.IsBinary(), t) + ASSERT_EQ([]byte{}, mustBytes(slice.GetBinary()), t) + ASSERT_EQ(velocypack.ValueLength(0), mustLength(slice.GetBinaryLength()), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) +} + +func TestSliceBinarySomeValue(t *testing.T) { + slice := velocypack.Slice{0xc0, 0x05, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa} + assertEqualFromReader(t, slice) + + ASSERT_TRUE(slice.IsBinary(), t) + ASSERT_EQ([]byte{0xfe, 0xfd, 0xfc, 0xfb, 0xfa}, mustBytes(slice.GetBinary()), t) + ASSERT_EQ(velocypack.ValueLength(5), mustLength(slice.GetBinaryLength()), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) +} + +func TestSliceBinaryWithNullBytes(t *testing.T) { + slice := velocypack.Slice{0xc0, 0x05, 0x01, 0x02, 0x00, 0x03, 0x00} + assertEqualFromReader(t, slice) + + ASSERT_TRUE(slice.IsBinary(), t) + ASSERT_EQ([]byte{0x01, 0x02, 0x00, 0x03, 0x00}, mustBytes(slice.GetBinary()), t) + ASSERT_EQ(velocypack.ValueLength(5), mustLength(slice.GetBinaryLength()), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) +} + +func TestSliceBinaryNonBinary(t *testing.T) { + var slice velocypack.Slice + assertEqualFromReader(t, slice) + + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(slice.GetBinary()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(slice.GetBinaryLength()) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/slice_bool_test.go b/deps/github.com/arangodb/go-velocypack/test/slice_bool_test.go new file mode 100644 index 000000000..dd315edb2 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/slice_bool_test.go @@ -0,0 +1,53 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestSliceFalse(t *testing.T) { + slice := velocypack.Slice{0x19} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Bool, slice.Type(), t) + ASSERT_TRUE(slice.IsBool(), t) + ASSERT_TRUE(slice.IsFalse(), t) + ASSERT_FALSE(slice.IsTrue(), t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(slice.ByteSize()), t) + ASSERT_FALSE(mustBool(slice.GetBool()), t) +} + +func TestSliceTrue(t *testing.T) { + slice := velocypack.Slice{0x1a} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Bool, slice.Type(), t) + ASSERT_TRUE(slice.IsBool(), t) + ASSERT_FALSE(slice.IsFalse(), t) + ASSERT_TRUE(slice.IsTrue(), t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(slice.ByteSize()), t) + ASSERT_TRUE(mustBool(slice.GetBool()), t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/slice_custom_test.go b/deps/github.com/arangodb/go-velocypack/test/slice_custom_test.go new file mode 100644 index 000000000..a6f3c9d6c --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/slice_custom_test.go @@ -0,0 +1,58 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestSliceCustomTypeByteSize(t *testing.T) { + tests := []velocypack.Slice{ + velocypack.Slice([]byte{0xf0, 0x00}), + velocypack.Slice([]byte{0xf1, 0x00, 0x00}), + velocypack.Slice([]byte{0xf2, 0x00, 0x00, 0x00, 0x00}), + velocypack.Slice([]byte{0xf3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}), + velocypack.Slice([]byte{0xf4, 0x03, 0x00, 0x00, 0x00}), + velocypack.Slice([]byte{0xf5, 0x02, 0x00, 0x00}), + velocypack.Slice([]byte{0xf6, 0x01, 0x00}), + velocypack.Slice([]byte{0xf7, 0x01, 0x00, 0x00}), + velocypack.Slice([]byte{0xf8, 0x02, 0x00, 0x00, 0x00}), + velocypack.Slice([]byte{0xf9, 0x03, 0x00, 0x00, 0x00, 0x00}), + velocypack.Slice([]byte{0xfa, 0x01, 0x00, 0x00, 0x00, 0x00}), + velocypack.Slice([]byte{0xfb, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00}), + velocypack.Slice([]byte{0xfc, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}), + velocypack.Slice([]byte{0xfd, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}), + velocypack.Slice([]byte{0xfe, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}), + velocypack.Slice([]byte{0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}), + } + + for _, test := range tests { + assertEqualFromReader(t, test) + sz := mustLength(test.ByteSize()) + if sz != velocypack.ValueLength(len(test)) { + t.Errorf("Invalid ByteSize in '%s', expected %d, got %d", test, len(test), sz) + } + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/slice_double_test.go b/deps/github.com/arangodb/go-velocypack/test/slice_double_test.go new file mode 100644 index 000000000..d135558a5 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/slice_double_test.go @@ -0,0 +1,55 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "encoding/binary" + "math" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestSliceDouble(t *testing.T) { + slice := velocypack.Slice{0x1b, 1, 2, 3, 4, 5, 6, 7, 8} + value := 23.5 + binary.LittleEndian.PutUint64(slice[1:], math.Float64bits(value)) + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Double, slice.Type(), t) + ASSERT_TRUE(slice.IsDouble(), t) + ASSERT_EQ(velocypack.ValueLength(9), mustLength(slice.ByteSize()), t) + ASSERT_DOUBLE_EQ(value, mustDouble(slice.GetDouble()), t) +} + +func TestSliceDoubleNegative(t *testing.T) { + slice := velocypack.Slice{0x1b, 1, 2, 3, 4, 5, 6, 7, 8} + value := -999.91355 + binary.LittleEndian.PutUint64(slice[1:], math.Float64bits(value)) + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Double, slice.Type(), t) + ASSERT_TRUE(slice.IsDouble(), t) + ASSERT_EQ(velocypack.ValueLength(9), mustLength(slice.ByteSize()), t) + ASSERT_DOUBLE_EQ(value, mustDouble(slice.GetDouble()), t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/slice_factory_test.go b/deps/github.com/arangodb/go-velocypack/test/slice_factory_test.go new file mode 100644 index 000000000..4587ebdac --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/slice_factory_test.go @@ -0,0 +1,105 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestSliceNoneFactory(t *testing.T) { + slice := velocypack.NoneSlice() + assertEqualFromReader(t, slice) + ASSERT_TRUE(slice.IsNone(), t) +} + +func TestSliceNullFactory(t *testing.T) { + slice := velocypack.NullSlice() + assertEqualFromReader(t, slice) + ASSERT_TRUE(slice.IsNull(), t) +} + +func TestSliceZeroFactory(t *testing.T) { + slice := velocypack.ZeroSlice() + assertEqualFromReader(t, slice) + ASSERT_TRUE(slice.IsSmallInt(), t) + ASSERT_EQ(int64(0), mustInt(slice.GetSmallInt()), t) +} + +func TestSliceIllegalFactory(t *testing.T) { + slice := velocypack.IllegalSlice() + assertEqualFromReader(t, slice) + ASSERT_TRUE(slice.IsIllegal(), t) +} + +func TestSliceFalseFactory(t *testing.T) { + slice := velocypack.FalseSlice() + assertEqualFromReader(t, slice) + ASSERT_TRUE(slice.IsBool() && !mustBool(slice.GetBool()), t) +} + +func TestSliceTrueFactory(t *testing.T) { + slice := velocypack.TrueSlice() + assertEqualFromReader(t, slice) + ASSERT_TRUE(slice.IsBool() && mustBool(slice.GetBool()), t) +} + +func TestSliceEmptyArrayFactory(t *testing.T) { + slice := velocypack.EmptyArraySlice() + assertEqualFromReader(t, slice) + ASSERT_TRUE(slice.IsArray() && mustLength(slice.Length()) == 0, t) +} + +func TestSliceEmptyObjectFactory(t *testing.T) { + slice := velocypack.EmptyObjectSlice() + assertEqualFromReader(t, slice) + ASSERT_TRUE(slice.IsObject() && mustLength(slice.Length()) == 0, t) +} + +func TestSliceMinKeyFactory(t *testing.T) { + slice := velocypack.MinKeySlice() + assertEqualFromReader(t, slice) + ASSERT_TRUE(slice.IsMinKey(), t) +} + +func TestSliceMaxKeyFactory(t *testing.T) { + slice := velocypack.MaxKeySlice() + assertEqualFromReader(t, slice) + ASSERT_TRUE(slice.IsMaxKey(), t) +} + +func TestSliceStringFactory(t *testing.T) { + slice := velocypack.StringSlice("short") + assertEqualFromReader(t, slice) + ASSERT_TRUE(slice.IsString(), t) + + slice = velocypack.StringSlice(`long long long long long long long long long long long long long long long long long long + long long long long long long long long long long long long long long long long long long long long long long long long + long long long long long long long long long long long long long long long long long long long long long long long long + long long long long long long long long long long long long long long long long long long long long long long long long + long long long long long long long long long long long long long long long long long long long long long long long long + long long long long long long long long long long long long long long long long long long long long long long long long `) + assertEqualFromReader(t, slice) + ASSERT_TRUE(slice.IsString(), t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/slice_from_reader_test.go b/deps/github.com/arangodb/go-velocypack/test/slice_from_reader_test.go new file mode 100644 index 000000000..196bfb492 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/slice_from_reader_test.go @@ -0,0 +1,67 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "bufio" + "bytes" + "fmt" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +// assertEqualFromReader wraps the given slice in a byte Buffer (the io.Reader) and +// calls SliceFromReader on that. +// It then compares the 2 slices. +func assertEqualFromReader(t *testing.T, s velocypack.Slice, args ...interface{}) { + // Normal reader + { + buf := bytes.NewBuffer(s) + s2, err := velocypack.SliceFromReader(buf) + var msg string + if len(args) > 0 { + msg = " (" + fmt.Sprintf(args[0].(string), args[1:]...) + ")" + } + if err != nil { + t.Errorf("SliceFromReader failed at %s: %v%s", callerInfo(2), err, msg) + } else if s.String() != s2.String() { + t.Errorf("SliceFromReader return different slice at %s. Got:\n\t'%s', expected:\n\t'%s'%s", callerInfo(2), s2.String(), s.String(), msg) + } + } + + // Buffered reader + { + brd := bufio.NewReader(bytes.NewBuffer(s)) + s2, err := velocypack.SliceFromReader(brd) + var msg string + if len(args) > 0 { + msg = " (" + fmt.Sprintf(args[0].(string), args[1:]...) + ")" + } + if err != nil { + t.Errorf("SliceFromReader failed at %s: %v%s", callerInfo(2), err, msg) + } else if s.String() != s2.String() { + t.Errorf("SliceFromReader return different slice at %s. Got:\n\t'%s', expected:\n\t'%s'%s", callerInfo(2), s2.String(), s.String(), msg) + } + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/slice_int_test.go b/deps/github.com/arangodb/go-velocypack/test/slice_int_test.go new file mode 100644 index 000000000..aee4651c3 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/slice_int_test.go @@ -0,0 +1,237 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "math" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestSliceInt1(t *testing.T) { + slice := velocypack.Slice{0x20, 0x33} + assertEqualFromReader(t, slice) + value := int64(0x33) + + ASSERT_EQ(velocypack.Int, slice.Type(), t) + ASSERT_TRUE(slice.IsInt(), t) + ASSERT_EQ(velocypack.ValueLength(2), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(value, mustInt(slice.GetInt()), t) + ASSERT_EQ(value, mustInt(slice.GetSmallInt()), t) + ASSERT_EQ(uint64(value), mustUInt(slice.GetUInt()), t) +} + +func TestSliceInt2(t *testing.T) { + slice := velocypack.Slice{0x21, 0x23, 0x42} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Int, slice.Type(), t) + ASSERT_TRUE(slice.IsInt(), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(int64(0x4223), mustInt(slice.GetInt()), t) + ASSERT_EQ(int64(0x4223), mustInt(slice.GetSmallInt()), t) +} + +func TestSliceInt3(t *testing.T) { + slice := velocypack.Slice{0x22, 0x23, 0x42, 0x66} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Int, slice.Type(), t) + ASSERT_TRUE(slice.IsInt(), t) + ASSERT_EQ(velocypack.ValueLength(4), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(int64(0x664223), mustInt(slice.GetInt()), t) + ASSERT_EQ(int64(0x664223), mustInt(slice.GetSmallInt()), t) +} + +func TestSliceInt4(t *testing.T) { + slice := velocypack.Slice{0x23, 0x23, 0x42, 0x66, 0x7c} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Int, slice.Type(), t) + ASSERT_TRUE(slice.IsInt(), t) + ASSERT_EQ(velocypack.ValueLength(5), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(int64(0x7c664223), mustInt(slice.GetInt()), t) + ASSERT_EQ(int64(0x7c664223), mustInt(slice.GetSmallInt()), t) +} + +func TestSliceInt5(t *testing.T) { + slice := velocypack.Slice{0x24, 0x23, 0x42, 0x66, 0xac, 0x6f} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Int, slice.Type(), t) + ASSERT_TRUE(slice.IsInt(), t) + ASSERT_EQ(velocypack.ValueLength(6), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(int64(0x6fac664223), mustInt(slice.GetInt()), t) + ASSERT_EQ(int64(0x6fac664223), mustInt(slice.GetSmallInt()), t) +} + +func TestSliceInt6(t *testing.T) { + slice := velocypack.Slice{0x25, 0x23, 0x42, 0x66, 0xac, 0xff, 0x3f} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Int, slice.Type(), t) + ASSERT_TRUE(slice.IsInt(), t) + ASSERT_EQ(velocypack.ValueLength(7), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(int64(0x3fffac664223), mustInt(slice.GetInt()), t) + ASSERT_EQ(int64(0x3fffac664223), mustInt(slice.GetSmallInt()), t) +} + +func TestSliceInt7(t *testing.T) { + slice := velocypack.Slice{0x26, 0x23, 0x42, 0x66, 0xac, 0xff, 0x3f, 0x5a} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Int, slice.Type(), t) + ASSERT_TRUE(slice.IsInt(), t) + ASSERT_EQ(velocypack.ValueLength(8), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(int64(0x5a3fffac664223), mustInt(slice.GetInt()), t) + ASSERT_EQ(int64(0x5a3fffac664223), mustInt(slice.GetSmallInt()), t) +} + +func TestSliceInt8(t *testing.T) { + slice := velocypack.Slice{0x27, 0x23, 0x42, 0x66, 0xac, 0xff, 0x3f, 0xfa, 0x6f} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Int, slice.Type(), t) + ASSERT_TRUE(slice.IsInt(), t) + ASSERT_EQ(velocypack.ValueLength(9), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(int64(0x6ffa3fffac664223), mustInt(slice.GetInt()), t) + ASSERT_EQ(int64(0x6ffa3fffac664223), mustInt(slice.GetSmallInt()), t) +} + +func TestSliceIntMax(t *testing.T) { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewIntValue(math.MaxInt64))) + slice := mustSlice(b.Slice()) + + ASSERT_EQ(velocypack.Int, slice.Type(), t) + ASSERT_TRUE(slice.IsInt(), t) + ASSERT_EQ(velocypack.ValueLength(9), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(int64(math.MaxInt64), mustInt(slice.GetInt()), t) +} + +func TestSliceNegInt1(t *testing.T) { + slice := velocypack.Slice{0x20, 0xa3} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Int, slice.Type(), t) + ASSERT_TRUE(slice.IsInt(), t) + ASSERT_EQ(velocypack.ValueLength(2), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(staticCastInt64(0xffffffffffffffa3), mustInt(slice.GetInt()), t) +} + +func TestSliceNegInt2(t *testing.T) { + slice := velocypack.Slice{0x21, 0x23, 0xe2} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Int, slice.Type(), t) + ASSERT_TRUE(slice.IsInt(), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(staticCastInt64(0xffffffffffffe223), mustInt(slice.GetInt()), t) +} + +func TestSliceNegInt3(t *testing.T) { + slice := velocypack.Slice{0x22, 0x23, 0x42, 0xd6} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Int, slice.Type(), t) + ASSERT_TRUE(slice.IsInt(), t) + ASSERT_EQ(velocypack.ValueLength(4), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(staticCastInt64(0xffffffffffd64223), mustInt(slice.GetInt()), t) +} + +func TestSliceNegInt4(t *testing.T) { + slice := velocypack.Slice{0x23, 0x23, 0x42, 0x66, 0xac} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Int, slice.Type(), t) + ASSERT_TRUE(slice.IsInt(), t) + ASSERT_EQ(velocypack.ValueLength(5), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(staticCastInt64(0xffffffffac664223), mustInt(slice.GetInt()), t) +} + +func TestSliceNegInt5(t *testing.T) { + slice := velocypack.Slice{0x24, 0x23, 0x42, 0x66, 0xac, 0xff} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Int, slice.Type(), t) + ASSERT_TRUE(slice.IsInt(), t) + ASSERT_EQ(velocypack.ValueLength(6), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(staticCastInt64(0xffffffffac664223), mustInt(slice.GetInt()), t) +} + +func TestSliceNegInt6(t *testing.T) { + slice := velocypack.Slice{0x25, 0x23, 0x42, 0x66, 0xac, 0xff, 0xef} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Int, slice.Type(), t) + ASSERT_TRUE(slice.IsInt(), t) + ASSERT_EQ(velocypack.ValueLength(7), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(staticCastInt64(0xffffefffac664223), mustInt(slice.GetInt()), t) +} + +func TestSliceNegInt7(t *testing.T) { + slice := velocypack.Slice{0x26, 0x23, 0x42, 0x66, 0xac, 0xff, 0xef, 0xfa} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Int, slice.Type(), t) + ASSERT_TRUE(slice.IsInt(), t) + ASSERT_EQ(velocypack.ValueLength(8), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(staticCastInt64(0xfffaefffac664223), mustInt(slice.GetInt()), t) +} + +func TestSliceNegInt8(t *testing.T) { + slice := velocypack.Slice{0x27, 0x23, 0x42, 0x66, 0xac, 0xff, 0xef, 0xfa, 0x8e} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Int, slice.Type(), t) + ASSERT_TRUE(slice.IsInt(), t) + ASSERT_EQ(velocypack.ValueLength(9), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(staticCastInt64(0x8efaefffac664223), mustInt(slice.GetInt()), t) +} + +func TestSliceIntOverflow(t *testing.T) { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewUIntValue(math.MaxUint64))) + slice := mustSlice(b.Slice()) + + ASSERT_EQ(velocypack.UInt, slice.Type(), t) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsNumberOutOfRange, t)(slice.GetInt()) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/slice_key_test.go b/deps/github.com/arangodb/go-velocypack/test/slice_key_test.go new file mode 100644 index 000000000..54e146075 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/slice_key_test.go @@ -0,0 +1,47 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestSliceMinKey(t *testing.T) { + slice := velocypack.Slice{0x1e} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.MinKey, slice.Type(), t) + ASSERT_TRUE(slice.IsMinKey(), t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(slice.ByteSize()), t) +} + +func TestSliceMaxKey(t *testing.T) { + slice := velocypack.Slice{0x1f} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.MaxKey, slice.Type(), t) + ASSERT_TRUE(slice.IsMaxKey(), t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(slice.ByteSize()), t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/slice_length_test.go b/deps/github.com/arangodb/go-velocypack/test/slice_length_test.go new file mode 100644 index 000000000..5323f0f6e --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/slice_length_test.go @@ -0,0 +1,85 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +// TestSliceLength checks the Length function of a slice. +func TestSliceLength(t *testing.T) { + tests := []struct { + JSON string + Length velocypack.ValueLength + ErrorType func(error) bool + Unindexed bool + Head byte + }{ + {"null", velocypack.ValueLength(0), velocypack.IsInvalidType, false, 0x18}, + {"true", velocypack.ValueLength(0), velocypack.IsInvalidType, false, 0x1a}, + {"false", velocypack.ValueLength(0), velocypack.IsInvalidType, false, 0x19}, + {"[]", velocypack.ValueLength(0), nil, false, 0x01}, + {"[1]", velocypack.ValueLength(1), nil, false, 0}, + {"[2,[]]", velocypack.ValueLength(2), nil, false, 0}, + {"[2,{},3]", velocypack.ValueLength(3), nil, false, 0}, + {"[1,2,3,4,5,6,7,8,9,\"ten\"]", velocypack.ValueLength(10), nil, false, 0}, + {"{}", velocypack.ValueLength(0), nil, false, 0x0a}, + {"{\"foo\":1}", velocypack.ValueLength(1), nil, false, 0}, + {"{\"foo\":1,\"bar\":{}}", velocypack.ValueLength(2), nil, false, 0}, + {"{\"a\":1,\"b\":2,\"c\":3,\"d\":4,\"e\":5,\"f\":6,\"g\":7,\"h\":8,\"i\":9,\"j\":10,\"k\":11,\"l\":12}", velocypack.ValueLength(12), nil, false, 0}, + // Unindexed + {"[]", velocypack.ValueLength(0), nil, true, 0x01}, + {"[1]", velocypack.ValueLength(1), nil, true, 0x13}, + {"[2,[]]", velocypack.ValueLength(2), nil, true, 0x13}, + {"[2,{},3]", velocypack.ValueLength(3), nil, true, 0x13}, + {"[1,2,3,4,5,6,7,8,9,\"ten\"]", velocypack.ValueLength(10), nil, true, 0x13}, + {"{}", velocypack.ValueLength(0), nil, true, 0x0a}, + {"{\"foo\":1}", velocypack.ValueLength(1), nil, true, 0x14}, + {"{\"foo\":1,\"bar\":{}}", velocypack.ValueLength(2), nil, true, 0x14}, + {"{\"a\":1,\"b\":2,\"c\":3,\"d\":4,\"e\":5,\"f\":6,\"g\":7,\"h\":8,\"i\":9,\"j\":10,\"k\":11,\"l\":12}", velocypack.ValueLength(12), nil, true, 0x14}, + } + + for _, test := range tests { + slice := mustSlice(velocypack.ParseJSONFromString(test.JSON, velocypack.ParserOptions{ + BuildUnindexedArrays: test.Unindexed, + BuildUnindexedObjects: test.Unindexed, + })) + if test.Head != 0 && slice[0] != test.Head { + t.Errorf("Invalid Head for '%s': got %02x, expected %02x", test.JSON, slice[0], test.Head) + } + l, err := slice.Length() + if test.ErrorType != nil { + if !test.ErrorType(err) { + t.Errorf("Length: invalid error for '%s': got %v", test.JSON, err) + } + } else if err != nil { + t.Errorf("Length failed for '%s': got %v", test.JSON, err) + } else { + if l != test.Length { + t.Errorf("Length returned invalid value for '%s': got %d, expected %d", test.JSON, l, test.Length) + } + } + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/slice_merge_test.go b/deps/github.com/arangodb/go-velocypack/test/slice_merge_test.go new file mode 100644 index 000000000..b17e4a645 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/slice_merge_test.go @@ -0,0 +1,95 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +// TestSliceMerge checks the Merge. +func TestSliceMerge(t *testing.T) { + tests := []struct { + InputJSONs []string + OutputJSON string + }{ + { + InputJSONs: []string{ + `{"a":1,"b":2}`, + `{"a":7}`, + }, + OutputJSON: `{"a":1,"b":2}`, + }, + { + InputJSONs: []string{ + `{"a":1,"b":2}`, + `{"a":7,"d":true}`, + }, + OutputJSON: `{"a":1,"b":2,"d":true}`, + }, + { + InputJSONs: []string{ + `{"a":1,"b":{"c":"foo"},"d":[5,6,7]}`, + `{"a":7,"b":[1,2,3,4]}`, + }, + OutputJSON: `{"a":1,"b":{"c":"foo"},"d":[5,6,7]}`, + }, + { + InputJSONs: []string{ + `{"a":1,"b":{"c":"foo"},"d":[5,6,7]}`, + `{"A":7,"B":[1,2,3,4]}`, + }, + OutputJSON: `{"A":7,"B":[1,2,3,4],"a":1,"b":{"c":"foo"},"d":[5,6,7]}`, + }, + } + + for testIndex, test := range tests { + slices := make([]velocypack.Slice, len(test.InputJSONs)) + for i, inp := range test.InputJSONs { + var err error + slices[i], err = velocypack.ParseJSONFromString(inp) + if err != nil { + t.Fatalf("Failed to parse '%s': %#v", inp, err) + } + } + result, err := velocypack.Merge(slices...) + if err != nil { + t.Fatalf("Failed to Merge test %d: %#v", testIndex, err) + } + output, err := result.JSONString() + if err != nil { + t.Fatalf("Failed to Dump result of test %d: %#v", testIndex, err) + } + if output != test.OutputJSON { + t.Errorf("Unexpected result in test %d\nExpected: %s\nGot: %s", testIndex, test.OutputJSON, output) + } + } +} + +// TestSliceMergeNonObject checks the Merge with invalid input. +func TestSliceMergeNonObject(t *testing.T) { + if _, err := velocypack.Merge(velocypack.NullSlice()); !velocypack.IsInvalidType(err) { + t.Errorf("Expected InvalidTypeError, got %#v", err) + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/slice_object_large_test.go b/deps/github.com/arangodb/go-velocypack/test/slice_object_large_test.go new file mode 100644 index 000000000..07b28b2c7 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/slice_object_large_test.go @@ -0,0 +1,51 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +// +build !nolarge + +package test + +import ( + "fmt" + "math" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestSliceObjectGetLengthMany2(t *testing.T) { + max := math.MaxUint16 + var builder velocypack.Builder + must(builder.OpenObject()) + for i := 1; i <= max; i++ { + key := fmt.Sprintf("f%d", i) + must(builder.AddKeyValue(key, velocypack.NewUIntValue(uint64(i)+10))) + } + must(builder.Close()) + slice := mustSlice(builder.Slice()) + + for i := max; i >= 1; i-- { + value := mustSlice(slice.Get(fmt.Sprintf("f%d", i))) + ASSERT_EQ(velocypack.UInt, value.Type(), t) + ASSERT_EQ(uint64(i)+10, mustUInt(value.GetUInt()), t) + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/slice_object_test.go b/deps/github.com/arangodb/go-velocypack/test/slice_object_test.go new file mode 100644 index 000000000..18a5b8498 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/slice_object_test.go @@ -0,0 +1,310 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "fmt" + "strings" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestSliceObjectEmpty(t *testing.T) { + slice := velocypack.Slice{0x0a} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Object, slice.Type(), t) + ASSERT_TRUE(slice.IsObject(), t) + ASSERT_TRUE(slice.IsEmptyObject(), t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(0), mustLength(slice.Length()), t) + + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsIndexOutOfBounds, t)(slice.KeyAt(0)) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsIndexOutOfBounds, t)(slice.ValueAt(0)) +} + +func TestSliceObjectCases1(t *testing.T) { + slice := velocypack.Slice{0x0b, 0x00, 0x03, 0x41, 0x61, 0x31, 0x41, 0x62, + 0x32, 0x41, 0x63, 0x33, 0x03, 0x06, 0x09} + slice[1] = byte(len(slice)) // Set byte length + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Object, slice.Type(), t) + ASSERT_TRUE(slice.IsObject(), t) + ASSERT_FALSE(slice.IsEmptyObject(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.Get("a")) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) + ASSERT_EQ(int64(1), mustInt(mustSlice(slice.ValueAt(0)).GetInt()), t) + + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsIndexOutOfBounds, t)(slice.KeyAt(4)) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsIndexOutOfBounds, t)(slice.ValueAt(4)) +} + +func TestSliceObjectCases2(t *testing.T) { + slice := velocypack.Slice{0x0b, 0x00, 0x03, 0x00, 0x00, 0x41, 0x61, 0x31, 0x41, + 0x62, 0x32, 0x41, 0x63, 0x33, 0x05, 0x08, 0x0b} + slice[1] = byte(len(slice)) // Set byte length + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Object, slice.Type(), t) + ASSERT_TRUE(slice.IsObject(), t) + ASSERT_FALSE(slice.IsEmptyObject(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.Get("a")) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceObjectCases3(t *testing.T) { + slice := velocypack.Slice{0x0b, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x41, 0x61, 0x31, 0x41, 0x62, + 0x32, 0x41, 0x63, 0x33, 0x09, 0x0c, 0x0f} + slice[1] = byte(len(slice)) // Set byte length + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Object, slice.Type(), t) + ASSERT_TRUE(slice.IsObject(), t) + ASSERT_FALSE(slice.IsEmptyObject(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.Get("a")) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceObjectCases7(t *testing.T) { + slice := velocypack.Slice{0x0c, 0x00, 0x00, 0x03, 0x00, 0x41, 0x61, 0x31, 0x41, 0x62, + 0x32, 0x41, 0x63, 0x33, 0x05, 0x00, 0x08, 0x00, 0x0b, 0x00} + slice[1] = byte(len(slice)) // Set byte length + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Object, slice.Type(), t) + ASSERT_TRUE(slice.IsObject(), t) + ASSERT_FALSE(slice.IsEmptyObject(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.Get("a")) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceObjectCases8(t *testing.T) { + slice := velocypack.Slice{0x0c, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x41, 0x61, 0x31, 0x41, 0x62, 0x32, 0x41, + 0x63, 0x33, 0x09, 0x00, 0x0c, 0x00, 0x0f, 0x00} + slice[1] = byte(len(slice)) // Set byte length + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Object, slice.Type(), t) + ASSERT_TRUE(slice.IsObject(), t) + ASSERT_FALSE(slice.IsEmptyObject(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.Get("a")) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceObjectCases11(t *testing.T) { + slice := velocypack.Slice{0x0d, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x41, + 0x61, 0x31, 0x41, 0x62, 0x32, 0x41, 0x63, 0x33, 0x09, 0x00, + 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00} + slice[1] = byte(len(slice)) // Set byte length + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Object, slice.Type(), t) + ASSERT_TRUE(slice.IsObject(), t) + ASSERT_FALSE(slice.IsEmptyObject(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.Get("a")) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceObjectCases13(t *testing.T) { + slice := velocypack.Slice{0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, + 0x61, 0x31, 0x41, 0x62, 0x32, 0x41, 0x63, 0x33, 0x09, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + slice[1] = byte(len(slice)) // Set byte length + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Object, slice.Type(), t) + ASSERT_TRUE(slice.IsObject(), t) + ASSERT_FALSE(slice.IsEmptyObject(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.Length()), t) + ss := mustSlice(slice.Get("a")) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) +} + +func TestSliceObjectCompact(t *testing.T) { + slice := velocypack.Slice{0x14, 0x0f, 0x41, 0x61, 0x30, 0x41, 0x62, 0x31, + 0x41, 0x63, 0x32, 0x41, 0x64, 0x33, 0x04} + slice[1] = byte(len(slice)) // Set byte length + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.Object, slice.Type(), t) + ASSERT_TRUE(slice.IsObject(), t) + ASSERT_FALSE(slice.IsEmptyObject(), t) + ASSERT_EQ(velocypack.ValueLength(len(slice)), mustLength(slice.ByteSize()), t) + ASSERT_EQ(velocypack.ValueLength(4), mustLength(slice.Length()), t) + ss := mustSlice(slice.Get("a")) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(0), mustInt(ss.GetInt()), t) + + ss = mustSlice(slice.Get("b")) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(1), mustInt(ss.GetInt()), t) + + ss = mustSlice(slice.Get("d")) + ASSERT_TRUE(ss.IsSmallInt(), t) + ASSERT_EQ(int64(3), mustInt(ss.GetInt()), t) + + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsIndexOutOfBounds, t)(slice.KeyAt(4)) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsIndexOutOfBounds, t)(slice.ValueAt(4)) +} + +func TestSliceObjectNestedGet1(t *testing.T) { + slice := mustSlice(velocypack.ParseJSONFromString(`{"a":{"b":{"c":55},"d":true}}`)) + + a := mustSlice(slice.Get("a")) + ASSERT_EQ(velocypack.Object, a.Type(), t) + ASSERT_EQ(velocypack.ValueLength(2), mustLength(a.Length()), t) + + b := mustSlice(slice.Get("a", "b")) + ASSERT_EQ(velocypack.Object, a.Type(), t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(b.Length()), t) + + c := mustSlice(slice.Get("a", "b", "c")) + ASSERT_EQ(velocypack.UInt, c.Type(), t) + ASSERT_EQ(int64(55), mustInt(c.GetInt()), t) + + d := mustSlice(slice.Get("a", "d")) + ASSERT_EQ(velocypack.Bool, d.Type(), t) + ASSERT_TRUE(mustBool(d.GetBool()), t) + + // Not found + ASSERT_EQ(velocypack.None, mustSlice(slice.Get("a", "e")).Type(), t) + ASSERT_EQ(velocypack.None, mustSlice(slice.Get("a", "b", "f")).Type(), t) + ASSERT_EQ(velocypack.None, mustSlice(slice.Get("g")).Type(), t) + + // Special: no path + ASSERT_EQ(slice, mustSlice(slice.Get()), t) +} + +func TestSliceObjectGetLength1(t *testing.T) { + // Test fast path with single object field + slice := velocypack.Slice{0x0b, + 0x07, // Bytesize + 0x01, // NoItems + 0x41, 0x61, 0x1a, // "a": true + 0x03, // Index of "a" + } + + a := mustSlice(slice.Get("a")) + ASSERT_EQ(velocypack.Bool, a.Type(), t) + ASSERT_TRUE(mustBool(a.GetBool()), t) +} + +func TestSliceObjectGetLength0(t *testing.T) { + slice := velocypack.Slice{0x0a} // empty object + + a := mustSlice(slice.Get("a")) + ASSERT_EQ(velocypack.None, a.Type(), t) +} + +func TestSliceObjectGetLength2(t *testing.T) { + // Test fast path with two fields (linear search of fields kicks in from 2..3 fields) + slice := velocypack.Slice{0x0b, + 0, // Bytesize + 0x02, // NoItems + 0x41, 0x61, 0x1a, // "a": true + 0x32, 0x19, // "_rev": false + 0x03, 0x06, // Index of "a", "_rev" + } + slice[1] = byte(len(slice)) + + a := mustSlice(slice.Get("a")) + ASSERT_EQ(velocypack.Bool, a.Type(), t) + ASSERT_TRUE(mustBool(a.GetBool()), t) + + b := mustSlice(slice.Get("_rev")) + ASSERT_EQ(velocypack.Bool, b.Type(), t) + ASSERT_FALSE(mustBool(b.GetBool()), t) +} + +func TestSliceObjectGetLengthMany(t *testing.T) { + fields := []string{} + for i := 0; i <= 255; i++ { + fields = append(fields, fmt.Sprintf(`"f%d":%d`, i, i+10)) + } + json := fmt.Sprintf(`{%s}`, strings.Join(fields, ",")) + slice := mustSlice(velocypack.ParseJSONFromString(json)) + + for i := 255; i >= 0; i-- { + value := mustSlice(slice.Get(fmt.Sprintf("f%d", i))) + ASSERT_EQ(velocypack.UInt, value.Type(), t) + ASSERT_EQ(uint64(i)+10, mustUInt(value.GetUInt()), t) + } +} + +func TestSliceObjectNestedHasKey(t *testing.T) { + slice := mustSlice(velocypack.ParseJSONFromString(`{"a":{"b":{"c":55},"d":true}}`)) + + ASSERT_TRUE(mustBool(slice.HasKey("a")), t) + ASSERT_TRUE(mustBool(slice.HasKey("a", "b")), t) + ASSERT_TRUE(mustBool(slice.HasKey("a", "b", "c")), t) + ASSERT_TRUE(mustBool(slice.HasKey("a", "d")), t) + + // Not found + ASSERT_FALSE(mustBool(slice.HasKey("a", "e")), t) + ASSERT_FALSE(mustBool(slice.HasKey("a", "b", "f")), t) + ASSERT_FALSE(mustBool(slice.HasKey("g")), t) + + // Special: no path + ASSERT_TRUE(mustBool(slice.HasKey()), t) +} + +func TestSliceObjectKeyValueAtInvalidType(t *testing.T) { + slice := mustSlice(velocypack.ParseJSONFromString(`77`)) + + ASSERT_EQ(velocypack.UInt, slice.Type(), t) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(slice.KeyAt(0)) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(slice.ValueAt(0)) +} + +func TestSliceObjectGetInvalidType(t *testing.T) { + slice := mustSlice(velocypack.ParseJSONFromString(`77`)) + + ASSERT_EQ(velocypack.UInt, slice.Type(), t) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(slice.Get("foo")) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/slice_smallint_test.go b/deps/github.com/arangodb/go-velocypack/test/slice_smallint_test.go new file mode 100644 index 000000000..4893631cf --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/slice_smallint_test.go @@ -0,0 +1,45 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestSliceSmallInt(t *testing.T) { + expected := []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -6, -5, -4, -3, -2, -1} + + for i := 0; i < 16; i++ { + slice := velocypack.Slice{byte(0x30 + i)} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.SmallInt, slice.Type(), t) + ASSERT_TRUE(slice.IsSmallInt(), t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(expected[i], mustInt(slice.GetSmallInt()), t) + ASSERT_EQ(expected[i], mustInt(slice.GetInt()), t) + } +} diff --git a/deps/github.com/arangodb/go-velocypack/test/slice_string_test.go b/deps/github.com/arangodb/go-velocypack/test/slice_string_test.go new file mode 100644 index 000000000..b12ccea83 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/slice_string_test.go @@ -0,0 +1,127 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestSliceStringNoString(t *testing.T) { + slice := velocypack.Slice{} + assertEqualFromReader(t, slice) + + ASSERT_FALSE(slice.IsString(), t) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(slice.GetString()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(slice.GetStringLength()) +} + +func TestSliceStringEmpty(t *testing.T) { + slice := velocypack.Slice{0x40} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.String, slice.Type(), t) + ASSERT_TRUE(slice.IsString(), t) + ASSERT_EQ(velocypack.ValueLength(1), mustLength(slice.ByteSize()), t) + ASSERT_EQ("", mustString(slice.GetString()), t) + ASSERT_EQ(velocypack.ValueLength(0), mustLength(slice.GetStringLength()), t) + ASSERT_EQ(0, mustGoInt(slice.CompareString("")), t) +} + +func TestSliceStringLengths(t *testing.T) { + for i := 0; i < 255; i++ { + builder := velocypack.Builder{} + temp := "" + for j := 0; j < i; j++ { + temp = temp + "x" + } + must(builder.AddValue(velocypack.NewStringValue(temp))) + slice := mustSlice(builder.Slice()) + + ASSERT_TRUE(slice.IsString(), t) + ASSERT_EQ(velocypack.String, slice.Type(), t) + ASSERT_EQ(0, mustGoInt(slice.CompareString(temp)), t) + ASSERT_EQ(temp, mustString(slice.GetString()), t) + + ASSERT_EQ(velocypack.ValueLength(i), mustLength(slice.GetStringLength()), t) + + if i <= 126 { + ASSERT_EQ(velocypack.ValueLength(i+1), mustLength(slice.ByteSize()), t) + } else { + ASSERT_EQ(velocypack.ValueLength(i+9), mustLength(slice.ByteSize()), t) + } + } +} + +func TestSliceString1(t *testing.T) { + value := "foobar" + slice := velocypack.Slice(append([]byte{byte(0x40 + len(value))}, value...)) + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.String, slice.Type(), t) + ASSERT_TRUE(slice.IsString(), t) + ASSERT_EQ(velocypack.ValueLength(7), mustLength(slice.ByteSize()), t) + ASSERT_EQ(value, mustString(slice.GetString()), t) + ASSERT_EQ(velocypack.ValueLength(len(value)), mustLength(slice.GetStringLength()), t) +} + +func TestSliceString2(t *testing.T) { + slice := velocypack.Slice{0x48, '1', '2', '3', 'f', '\r', '\t', '\n', 'x'} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.String, slice.Type(), t) + ASSERT_TRUE(slice.IsString(), t) + ASSERT_EQ(velocypack.ValueLength(9), mustLength(slice.ByteSize()), t) + ASSERT_EQ("123f\r\t\nx", mustString(slice.GetString()), t) + ASSERT_EQ(velocypack.ValueLength(8), mustLength(slice.GetStringLength()), t) +} + +func TestSliceStringNullBytes(t *testing.T) { + slice := velocypack.Slice{0x48, 0, '1', '2', 0, '3', '4', 0, 'x'} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.String, slice.Type(), t) + ASSERT_TRUE(slice.IsString(), t) + ASSERT_EQ(velocypack.ValueLength(9), mustLength(slice.ByteSize()), t) + ASSERT_EQ("\x0012\x0034\x00x", mustString(slice.GetString()), t) + ASSERT_EQ(velocypack.ValueLength(8), mustLength(slice.GetStringLength()), t) +} + +func TestSliceStringLong(t *testing.T) { + slice := velocypack.Slice{0xbf, 6, 0, 0, 0, 0, 0, 0, 0, 'f', 'o', 'o', 'b', 'a', 'r'} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.String, slice.Type(), t) + ASSERT_TRUE(slice.IsString(), t) + ASSERT_EQ(velocypack.ValueLength(15), mustLength(slice.ByteSize()), t) + ASSERT_EQ("foobar", mustString(slice.GetString()), t) + ASSERT_EQ(velocypack.ValueLength(6), mustLength(slice.GetStringLength()), t) +} + +func TestSliceStringToStringNull(t *testing.T) { + slice := velocypack.NullSlice() + assertEqualFromReader(t, slice) + + ASSERT_EQ("null", mustString(slice.JSONString()), t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/slice_type_test.go b/deps/github.com/arangodb/go-velocypack/test/slice_type_test.go new file mode 100644 index 000000000..243487f52 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/slice_type_test.go @@ -0,0 +1,145 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +// TestSliceTypes checks the Type function of a slice. +func TestSliceTypes(t *testing.T) { + s := velocypack.SliceFromHex + tests := []struct { + Slice velocypack.Slice + Type velocypack.ValueType + }{ + {s("00"), velocypack.None}, + {s("01"), velocypack.Array}, + {s("0a"), velocypack.Object}, + {s("18"), velocypack.Null}, + {s("19"), velocypack.Bool}, + {s("1a"), velocypack.Bool}, + {s("1b"), velocypack.Double}, + {s("1c"), velocypack.UTCDate}, + {s("1e"), velocypack.MinKey}, + {s("1f"), velocypack.MaxKey}, + } + + var allTypes []velocypack.ValueType + for _, test := range tests { + vt := test.Slice.Type() + allTypes = append(allTypes, test.Type) + if vt != test.Type { + t.Errorf("Invalid type for '%s', expected '%s', got '%s'", test.Slice, test.Type, vt) + } + if err := test.Slice.AssertType(test.Type); err != nil { + t.Errorf("AssertType returns unexpected error: %v", err) + } + if err := test.Slice.AssertTypeAny(allTypes...); err != nil { + t.Errorf("AssertTypeAny returns unexpected error: %v", err) + } + } +} + +// TestSliceInvalidGetters checks that getters return an error when passed an invalid type. +func TestSliceInvalidGetters(t *testing.T) { + slice := mustSlice(velocypack.ParseJSONFromString("[null,true,1,\"foo\",[],{},7.1]")) + + ss := mustSlice(slice.At(0)) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetBool()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetInt()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetUInt()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetSmallInt()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetDouble()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetString()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetBinary()) + //ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetExternal()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetUTCDate()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.Length()) + + ss = mustSlice(slice.At(1)) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetInt()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetUInt()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetSmallInt()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetDouble()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetString()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetBinary()) + //ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetExternal()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetUTCDate()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.Length()) + + ss = mustSlice(slice.At(2)) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetBool()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetDouble()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetString()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetBinary()) + //ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetExternal()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetUTCDate()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.Length()) + + ss = mustSlice(slice.At(3)) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetBool()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetInt()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetUInt()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetSmallInt()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetDouble()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetBinary()) + //ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetExternal()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetUTCDate()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.Length()) + + ss = mustSlice(slice.At(4)) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetBool()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetInt()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetUInt()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetSmallInt()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetDouble()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetString()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetBinary()) + //ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetExternal()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetUTCDate()) + + ss = mustSlice(slice.At(5)) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetBool()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetInt()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetUInt()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetSmallInt()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetDouble()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetString()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetBinary()) + //ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetExternal()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetUTCDate()) + + ss = mustSlice(slice.At(6)) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetBool()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetInt()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetUInt()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetSmallInt()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetString()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetBinary()) + //ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetExternal()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.GetUTCDate()) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsInvalidType, t)(ss.Length()) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/slice_uint_test.go b/deps/github.com/arangodb/go-velocypack/test/slice_uint_test.go new file mode 100644 index 000000000..c97e4f98e --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/slice_uint_test.go @@ -0,0 +1,150 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "math" + "testing" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestSliceUInt1(t *testing.T) { + slice := velocypack.Slice{0x28, 0x33} + value := uint64(0x33) + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.UInt, slice.Type(), t) + ASSERT_TRUE(slice.IsUInt(), t) + ASSERT_EQ(velocypack.ValueLength(2), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(value, mustUInt(slice.GetUInt()), t) + ASSERT_EQ(int64(value), mustInt(slice.GetInt()), t) +} + +func TestSliceUInt2(t *testing.T) { + slice := velocypack.Slice{0x29, 0x23, 0x42} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.UInt, slice.Type(), t) + ASSERT_TRUE(slice.IsUInt(), t) + ASSERT_EQ(velocypack.ValueLength(3), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(uint64(0x4223), mustUInt(slice.GetUInt()), t) +} + +func TestSliceUInt3(t *testing.T) { + slice := velocypack.Slice{0x2a, 0x23, 0x42, 0x66} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.UInt, slice.Type(), t) + ASSERT_TRUE(slice.IsUInt(), t) + ASSERT_EQ(velocypack.ValueLength(4), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(uint64(0x664223), mustUInt(slice.GetUInt()), t) +} + +func TestSliceUInt4(t *testing.T) { + slice := velocypack.Slice{0x2b, 0x23, 0x42, 0x66, 0x7c} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.UInt, slice.Type(), t) + ASSERT_TRUE(slice.IsUInt(), t) + ASSERT_EQ(velocypack.ValueLength(5), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(uint64(0x7c664223), mustUInt(slice.GetUInt()), t) +} + +func TestSliceUInt5(t *testing.T) { + slice := velocypack.Slice{0x2c, 0x23, 0x42, 0x66, 0xac, 0x6f} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.UInt, slice.Type(), t) + ASSERT_TRUE(slice.IsUInt(), t) + ASSERT_EQ(velocypack.ValueLength(6), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(uint64(0x6fac664223), mustUInt(slice.GetUInt()), t) +} + +func TestSliceUInt6(t *testing.T) { + slice := velocypack.Slice{0x2d, 0x23, 0x42, 0x66, 0xac, 0xff, 0x3f} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.UInt, slice.Type(), t) + ASSERT_TRUE(slice.IsUInt(), t) + ASSERT_EQ(velocypack.ValueLength(7), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(uint64(0x3fffac664223), mustUInt(slice.GetUInt()), t) +} + +func TestSliceUInt7(t *testing.T) { + slice := velocypack.Slice{0x2e, 0x23, 0x42, 0x66, 0xac, 0xff, 0x3f, 0x5a} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.UInt, slice.Type(), t) + ASSERT_TRUE(slice.IsUInt(), t) + ASSERT_EQ(velocypack.ValueLength(8), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(uint64(0x5a3fffac664223), mustUInt(slice.GetUInt()), t) +} + +func TestSliceUInt8(t *testing.T) { + slice := velocypack.Slice{0x2f, 0x23, 0x42, 0x66, 0xac, 0xff, 0x3f, 0xfa, 0x6f} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.UInt, slice.Type(), t) + ASSERT_TRUE(slice.IsUInt(), t) + ASSERT_EQ(velocypack.ValueLength(9), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(uint64(0x6ffa3fffac664223), mustUInt(slice.GetUInt()), t) +} + +func TestSliceUIntMax(t *testing.T) { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewUIntValue(math.MaxUint64))) + slice := mustSlice(b.Slice()) + + ASSERT_EQ(velocypack.UInt, slice.Type(), t) + ASSERT_TRUE(slice.IsUInt(), t) + ASSERT_EQ(velocypack.ValueLength(9), mustLength(slice.ByteSize()), t) + + ASSERT_EQ(uint64(math.MaxUint64), mustUInt(slice.GetUInt()), t) +} + +func TestSliceUIntNegative1(t *testing.T) { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewIntValue(-3))) // SmallInt + slice := mustSlice(b.Slice()) + + ASSERT_EQ(velocypack.SmallInt, slice.Type(), t) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsNumberOutOfRange, t)(slice.GetUInt()) +} + +func TestSliceUIntNegative2(t *testing.T) { + b := velocypack.Builder{} + must(b.AddValue(velocypack.NewIntValue(-300))) // Int + slice := mustSlice(b.Slice()) + + ASSERT_EQ(velocypack.Int, slice.Type(), t) + ASSERT_VELOCYPACK_EXCEPTION(velocypack.IsNumberOutOfRange, t)(slice.GetUInt()) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/slice_utcdate_test.go b/deps/github.com/arangodb/go-velocypack/test/slice_utcdate_test.go new file mode 100644 index 000000000..ae7d45f06 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/slice_utcdate_test.go @@ -0,0 +1,53 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import ( + "encoding/binary" + "testing" + "time" + + velocypack "github.com/arangodb/go-velocypack" +) + +func TestSliceUTCDate1(t *testing.T) { + slice := velocypack.Slice{0x1c, 0, 0, 0, 0, 0, 0, 0, 0} + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.UTCDate, slice.Type(), t) + ASSERT_TRUE(slice.IsUTCDate(), t) + ASSERT_EQ(velocypack.ValueLength(9), mustLength(slice.ByteSize()), t) + ASSERT_EQ(time.Unix(0, 0).UTC(), mustTime(slice.GetUTCDate()), t) +} + +func TestSliceUTCDate2(t *testing.T) { + msec := 1234567 + slice := velocypack.Slice{0x1c, 0, 0, 0, 0, 0, 0, 0, 0} + binary.LittleEndian.PutUint64(slice[1:], uint64(msec)) + assertEqualFromReader(t, slice) + + ASSERT_EQ(velocypack.UTCDate, slice.Type(), t) + ASSERT_TRUE(slice.IsUTCDate(), t) + ASSERT_EQ(velocypack.ValueLength(9), mustLength(slice.ByteSize()), t) + ASSERT_EQ(time.Unix(0, 0).UTC().Add(time.Millisecond*time.Duration(msec)), mustTime(slice.GetUTCDate()), t) +} diff --git a/deps/github.com/arangodb/go-velocypack/test/util.go b/deps/github.com/arangodb/go-velocypack/test/util.go new file mode 100644 index 000000000..54ef5a378 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/test/util.go @@ -0,0 +1,29 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package test + +import "unsafe" + +func staticCastInt64(ux uint64) int64 { + return *(*int64)(unsafe.Pointer(&ux)) +} diff --git a/deps/github.com/arangodb/go-velocypack/tools/dump.go b/deps/github.com/arangodb/go-velocypack/tools/dump.go new file mode 100644 index 000000000..a45212c33 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/tools/dump.go @@ -0,0 +1,28 @@ +package main + +import ( + "encoding/hex" + "flag" + "fmt" + "log" + "strings" + + velocypack "github.com/arangodb/go-velocypack" +) + +func main() { + flag.Parse() + args := flag.Args() + if len(args) == 0 { + log.Fatalln("Usage: dump ") + } + slice, err := hex.DecodeString(strings.TrimSpace(args[0])) + if err != nil { + log.Fatalf("Failed to decode hex slice: %#v\n", err) + } + json, err := velocypack.Slice(slice).JSONString() + if err != nil { + log.Fatalf("Failed to convert slice: %#v\n", err) + } + fmt.Println(json) +} diff --git a/deps/github.com/arangodb/go-velocypack/util.go b/deps/github.com/arangodb/go-velocypack/util.go new file mode 100644 index 000000000..b3dcbc1fa --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/util.go @@ -0,0 +1,202 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "fmt" + "io" +) + +// vpackAssert panics if v is false. +func vpackAssert(v bool) { + if !v { + panic("VELOCYPACK_ASSERT failed") + } +} + +// readBytes reads bytes from the given reader until the given slice is full. +func readBytes(dst []byte, r io.Reader) error { + offset := 0 + l := len(dst) + for { + n, err := r.Read(dst[offset:]) + offset += n + l -= n + if l == 0 { + // We're done + return nil + } + if err != nil { + return WithStack(err) + } + } +} + +// read an unsigned little endian integer value of the +// specified length, starting at the specified byte offset +func readIntegerFixed(start []byte, length uint) uint64 { + return readIntegerNonEmpty(start, length) +} + +// read an unsigned little endian integer value of the +// specified length, starting at the specified byte offset +func readIntegerFixedFromReader(r io.Reader, length uint) (uint64, []byte, error) { + buf := make([]byte, length) + if err := readBytes(buf, r); err != nil { + return 0, nil, WithStack(err) + } + return readIntegerFixed(buf, length), buf, nil +} + +// read an unsigned little endian integer value of the +// specified length, starting at the specified byte offset +func readIntegerNonEmpty(s []byte, length uint) uint64 { + x := uint(0) + v := uint64(0) + for i := uint(0); i < length; i++ { + v += uint64(s[i]) << x + x += 8 + } + return v +} + +// read an unsigned little endian integer value of the +// specified length, starting at the specified byte offset +func readIntegerNonEmptyFromReader(r io.Reader, length uint) (uint64, []byte, error) { + buf := make([]byte, length) + if err := readBytes(buf, r); err != nil { + return 0, nil, WithStack(err) + } + return readIntegerNonEmpty(buf, length), buf, nil +} + +func toInt64(v uint64) int64 { + shift2 := uint64(1) << 63 + shift := int64(shift2 - 1) + if v >= shift2 { + return (int64(v-shift2) - shift) - 1 + } else { + return int64(v) + } +} + +func toUInt64(v int64) uint64 { + // If v is negative, we need to add 2^63 to make it positive, + // before we can cast it to an uint64_t: + if v >= 0 { + return uint64(v) + } + shift2 := uint64(1) << 63 + shift := int64(shift2 - 1) + return uint64((v+shift)+1) + shift2 + // return v >= 0 ? static_cast(v) + // : static_cast((v + shift) + 1) + shift2; + // Note that g++ and clang++ with -O3 compile this away to + // nothing. Further note that a plain cast from int64_t to + // uint64_t is not guaranteed to work for negative values! +} + +// read a variable length integer in unsigned LEB128 format +func readVariableValueLength(source []byte, offset ValueLength, reverse bool) ValueLength { + length := ValueLength(0) + p := uint(0) + for { + v := ValueLength(source[offset]) + length += (v & 0x7f) << p + p += 7 + if reverse { + offset-- + } else { + offset++ + } + if v&0x80 == 0 { + break + } + } + return length +} + +// read a variable length integer in unsigned LEB128 format +func readVariableValueLengthFromReader(r io.Reader, reverse bool) (ValueLength, []byte, error) { + if reverse { + return 0, nil, WithStack(fmt.Errorf("reverse is not supported")) + } + length := ValueLength(0) + p := uint(0) + buf := make([]byte, 1) + bytes := make([]byte, 0, 8) + for { + if n, err := r.Read(buf); n != 1 { + if err != nil { + return 0, nil, WithStack(err) + } else { + return 0, nil, WithStack(fmt.Errorf("failed to read 1 byte")) + } + } + bytes = append(bytes, buf[0]) + v := ValueLength(buf[0]) + length += (v & 0x7f) << p + p += 7 + if v&0x80 == 0 { + break + } + } + return length, bytes, nil +} + +// store a variable length integer in unsigned LEB128 format +func storeVariableValueLength(dst []byte, offset, value ValueLength, reverse bool) { + vpackAssert(value > 0) + + idx := offset + if reverse { + for value >= 0x80 { + dst[idx] = byte(value | 0x80) + idx-- + value >>= 7 + } + dst[idx] = byte(value & 0x7f) + } else { + for value >= 0x80 { + dst[idx] = byte(value | 0x80) + idx++ + value >>= 7 + } + dst[idx] = byte(value & 0x7f) + } +} + +// optionalBool returns the first arg element if available, otherwise returns defaultValue. +func optionalBool(arg []bool, defaultValue bool) bool { + if len(arg) == 0 { + return defaultValue + } + return arg[0] +} + +// alignAt returns the first number >= value that is aligned at the given alignment. +// alignment must be a power of 2. +func alignAt(value, alignment uint) uint { + mask := ^(alignment - 1) + return (value + alignment - 1) & mask +} diff --git a/deps/github.com/arangodb/go-velocypack/util_test.go b/deps/github.com/arangodb/go-velocypack/util_test.go new file mode 100644 index 000000000..e4266b891 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/util_test.go @@ -0,0 +1,44 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import "testing" + +func TestAlignAt(t *testing.T) { + tests := []struct { + Value uint + Alignment uint + Expected uint + }{ + {10, 16, 16}, + {16, 16, 16}, + {2345, 4096, 4096}, + {7000, 4096, 4096 * 2}, + } + for _, test := range tests { + result := alignAt(test.Value, test.Alignment) + if result != test.Expected { + t.Errorf("alignAt(%d, %d) failed. Expected %d, got %d", test.Value, test.Alignment, test.Expected, result) + } + } +} diff --git a/deps/github.com/arangodb/go-velocypack/value.go b/deps/github.com/arangodb/go-velocypack/value.go new file mode 100644 index 000000000..6cb748a0c --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/value.go @@ -0,0 +1,199 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "reflect" + "time" +) + +// Value is a helper structure used to build VPack structures. +// It holds a single data value with a specific type. +type Value struct { + vt ValueType + data interface{} + unindexed bool +} + +// NewValue creates a new Value with type derived from Go type of given value. +// If the given value is not a supported type, a Value of type Illegal is returned. +func NewValue(value interface{}) Value { + v := reflect.ValueOf(value) + return NewReflectValue(v) +} + +// NewReflectValue creates a new Value with type derived from Go type of given reflect value. +// If the given value is not a supported type, a Value of type Illegal is returned. +func NewReflectValue(v reflect.Value) Value { + vt := v.Type() + switch vt.Kind() { + case reflect.Bool: + return NewBoolValue(v.Bool()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return NewIntValue(v.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return NewUIntValue(v.Uint()) + case reflect.Float32, reflect.Float64: + return NewDoubleValue(v.Float()) + case reflect.String: + return NewStringValue(v.String()) + case reflect.Slice: + if vt.Elem().Kind() == reflect.Uint8 { + } + } + if v.CanInterface() { + raw := v.Interface() + if v, ok := raw.([]byte); ok { + return NewBinaryValue(v) + } + if v, ok := raw.(Slice); ok { + return NewSliceValue(v) + } + if v, ok := raw.(time.Time); ok { + return NewUTCDateValue(v) + } + if v, ok := raw.(Value); ok { + return v + } + } + return Value{Illegal, nil, false} +} + +// NewBoolValue creates a new Value of type Bool with given value. +func NewBoolValue(value bool) Value { + return Value{Bool, value, false} +} + +// NewIntValue creates a new Value of type Int with given value. +func NewIntValue(value int64) Value { + if value >= -6 && value <= 9 { + return Value{SmallInt, value, false} + } + return Value{Int, value, false} +} + +// NewUIntValue creates a new Value of type UInt with given value. +func NewUIntValue(value uint64) Value { + return Value{UInt, value, false} +} + +// NewDoubleValue creates a new Value of type Double with given value. +func NewDoubleValue(value float64) Value { + return Value{Double, value, false} +} + +// NewStringValue creates a new Value of type String with given value. +func NewStringValue(value string) Value { + return Value{String, value, false} +} + +// NewBinaryValue creates a new Value of type Binary with given value. +func NewBinaryValue(value []byte) Value { + return Value{Binary, value, false} +} + +// NewUTCDateValue creates a new Value of type UTCDate with given value. +func NewUTCDateValue(value time.Time) Value { + return Value{UTCDate, value, false} +} + +// NewSliceValue creates a new Value of from the given slice. +func NewSliceValue(value Slice) Value { + return Value{value.Type(), value, false} +} + +// NewObjectValue creates a new Value that opens a new object. +func NewObjectValue(unindexed ...bool) Value { + return Value{Object, nil, optionalBool(unindexed, false)} +} + +// NewArrayValue creates a new Value that opens a new array. +func NewArrayValue(unindexed ...bool) Value { + return Value{Array, nil, optionalBool(unindexed, false)} +} + +// NewNullValue creates a new Value of type Null. +func NewNullValue() Value { + return Value{Null, nil, false} +} + +// NewMinKeyValue creates a new Value of type MinKey. +func NewMinKeyValue() Value { + return Value{MinKey, nil, false} +} + +// NewMaxKeyValue creates a new Value of type MaxKey. +func NewMaxKeyValue() Value { + return Value{MaxKey, nil, false} +} + +// Type returns the ValueType of this value. +func (v Value) Type() ValueType { + return v.vt +} + +// IsSlice returns true when the value already contains a slice. +func (v Value) IsSlice() bool { + _, ok := v.data.(Slice) + return ok +} + +// IsIllegal returns true if the type of value is Illegal. +func (v Value) IsIllegal() bool { + return v.vt == Illegal +} + +func (v Value) boolValue() bool { + return v.data.(bool) +} + +func (v Value) intValue() int64 { + return v.data.(int64) +} + +func (v Value) uintValue() uint64 { + return v.data.(uint64) +} + +func (v Value) doubleValue() float64 { + return v.data.(float64) +} + +func (v Value) stringValue() string { + return v.data.(string) +} + +func (v Value) binaryValue() []byte { + return v.data.([]byte) +} + +func (v Value) utcDateValue() int64 { + time := v.data.(time.Time) + sec := time.Unix() + nsec := int64(time.Nanosecond()) + return sec*1000 + nsec/1000000 +} + +func (v Value) sliceValue() Slice { + return v.data.(Slice) +} diff --git a/deps/github.com/arangodb/go-velocypack/value_length.go b/deps/github.com/arangodb/go-velocypack/value_length.go new file mode 100644 index 000000000..dd17edd61 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/value_length.go @@ -0,0 +1,53 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +import ( + "fmt" + "strconv" +) + +type ValueLength uint64 + +func (s ValueLength) String() string { + return strconv.FormatInt(int64(s), 10) +} + +// getVariableValueLength calculates the length of a variable length integer in unsigned LEB128 format +func getVariableValueLength(value ValueLength) ValueLength { + l := ValueLength(1) + for value >= 0x80 { + value >>= 7 + l++ + } + return l +} + +// check if the length is beyond the size of a SIZE_MAX on this platform +func checkOverflow(length ValueLength) error { + if length < 0 { + return fmt.Errorf("Negative length") + } + // TODO + return nil +} diff --git a/deps/github.com/arangodb/go-velocypack/value_type.go b/deps/github.com/arangodb/go-velocypack/value_type.go new file mode 100644 index 000000000..ac2b846c1 --- /dev/null +++ b/deps/github.com/arangodb/go-velocypack/value_type.go @@ -0,0 +1,382 @@ +// +// DISCLAIMER +// +// Copyright 2017 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package velocypack + +type ValueType int + +const ( + None ValueType = iota // not yet initialized + Illegal // illegal value + Null // JSON null + Bool + Array + Object + Double + UTCDate + External + MinKey + MaxKey + Int + UInt + SmallInt + String + Binary + BCD + Custom +) + +// String returns a string representation of the given type. +func (vt ValueType) String() string { + return typeNames[vt] +} + +var typeNames = [...]string{ + "None", + "Illegal", + "Null", + "Bool", + "Array", + "Object", + "Double", + "UTCDate", + "External", + "MinKey", + "MaxKey", + "Int", + "UInt", + "SmallInt", + "String", + "Binary", + "BCD", + "Custom", +} + +var typeMap = [256]ValueType{ + /* 0x00 */ None /* 0x01 */, Array, + /* 0x02 */ Array /* 0x03 */, Array, + /* 0x04 */ Array /* 0x05 */, Array, + /* 0x06 */ Array /* 0x07 */, Array, + /* 0x08 */ Array /* 0x09 */, Array, + /* 0x0a */ Object /* 0x0b */, Object, + /* 0x0c */ Object /* 0x0d */, Object, + /* 0x0e */ Object /* 0x0f */, Object, + /* 0x10 */ Object /* 0x11 */, Object, + /* 0x12 */ Object /* 0x13 */, Array, + /* 0x14 */ Object /* 0x15 */, None, + /* 0x16 */ None /* 0x17 */, Illegal, + /* 0x18 */ Null /* 0x19 */, Bool, + /* 0x1a */ Bool /* 0x1b */, Double, + /* 0x1c */ UTCDate /* 0x1d */, External, + /* 0x1e */ MinKey /* 0x1f */, MaxKey, + /* 0x20 */ Int /* 0x21 */, Int, + /* 0x22 */ Int /* 0x23 */, Int, + /* 0x24 */ Int /* 0x25 */, Int, + /* 0x26 */ Int /* 0x27 */, Int, + /* 0x28 */ UInt /* 0x29 */, UInt, + /* 0x2a */ UInt /* 0x2b */, UInt, + /* 0x2c */ UInt /* 0x2d */, UInt, + /* 0x2e */ UInt /* 0x2f */, UInt, + /* 0x30 */ SmallInt /* 0x31 */, SmallInt, + /* 0x32 */ SmallInt /* 0x33 */, SmallInt, + /* 0x34 */ SmallInt /* 0x35 */, SmallInt, + /* 0x36 */ SmallInt /* 0x37 */, SmallInt, + /* 0x38 */ SmallInt /* 0x39 */, SmallInt, + /* 0x3a */ SmallInt /* 0x3b */, SmallInt, + /* 0x3c */ SmallInt /* 0x3d */, SmallInt, + /* 0x3e */ SmallInt /* 0x3f */, SmallInt, + /* 0x40 */ String /* 0x41 */, String, + /* 0x42 */ String /* 0x43 */, String, + /* 0x44 */ String /* 0x45 */, String, + /* 0x46 */ String /* 0x47 */, String, + /* 0x48 */ String /* 0x49 */, String, + /* 0x4a */ String /* 0x4b */, String, + /* 0x4c */ String /* 0x4d */, String, + /* 0x4e */ String /* 0x4f */, String, + /* 0x50 */ String /* 0x51 */, String, + /* 0x52 */ String /* 0x53 */, String, + /* 0x54 */ String /* 0x55 */, String, + /* 0x56 */ String /* 0x57 */, String, + /* 0x58 */ String /* 0x59 */, String, + /* 0x5a */ String /* 0x5b */, String, + /* 0x5c */ String /* 0x5d */, String, + /* 0x5e */ String /* 0x5f */, String, + /* 0x60 */ String /* 0x61 */, String, + /* 0x62 */ String /* 0x63 */, String, + /* 0x64 */ String /* 0x65 */, String, + /* 0x66 */ String /* 0x67 */, String, + /* 0x68 */ String /* 0x69 */, String, + /* 0x6a */ String /* 0x6b */, String, + /* 0x6c */ String /* 0x6d */, String, + /* 0x6e */ String /* 0x6f */, String, + /* 0x70 */ String /* 0x71 */, String, + /* 0x72 */ String /* 0x73 */, String, + /* 0x74 */ String /* 0x75 */, String, + /* 0x76 */ String /* 0x77 */, String, + /* 0x78 */ String /* 0x79 */, String, + /* 0x7a */ String /* 0x7b */, String, + /* 0x7c */ String /* 0x7d */, String, + /* 0x7e */ String /* 0x7f */, String, + /* 0x80 */ String /* 0x81 */, String, + /* 0x82 */ String /* 0x83 */, String, + /* 0x84 */ String /* 0x85 */, String, + /* 0x86 */ String /* 0x87 */, String, + /* 0x88 */ String /* 0x89 */, String, + /* 0x8a */ String /* 0x8b */, String, + /* 0x8c */ String /* 0x8d */, String, + /* 0x8e */ String /* 0x8f */, String, + /* 0x90 */ String /* 0x91 */, String, + /* 0x92 */ String /* 0x93 */, String, + /* 0x94 */ String /* 0x95 */, String, + /* 0x96 */ String /* 0x97 */, String, + /* 0x98 */ String /* 0x99 */, String, + /* 0x9a */ String /* 0x9b */, String, + /* 0x9c */ String /* 0x9d */, String, + /* 0x9e */ String /* 0x9f */, String, + /* 0xa0 */ String /* 0xa1 */, String, + /* 0xa2 */ String /* 0xa3 */, String, + /* 0xa4 */ String /* 0xa5 */, String, + /* 0xa6 */ String /* 0xa7 */, String, + /* 0xa8 */ String /* 0xa9 */, String, + /* 0xaa */ String /* 0xab */, String, + /* 0xac */ String /* 0xad */, String, + /* 0xae */ String /* 0xaf */, String, + /* 0xb0 */ String /* 0xb1 */, String, + /* 0xb2 */ String /* 0xb3 */, String, + /* 0xb4 */ String /* 0xb5 */, String, + /* 0xb6 */ String /* 0xb7 */, String, + /* 0xb8 */ String /* 0xb9 */, String, + /* 0xba */ String /* 0xbb */, String, + /* 0xbc */ String /* 0xbd */, String, + /* 0xbe */ String /* 0xbf */, String, + /* 0xc0 */ Binary /* 0xc1 */, Binary, + /* 0xc2 */ Binary /* 0xc3 */, Binary, + /* 0xc4 */ Binary /* 0xc5 */, Binary, + /* 0xc6 */ Binary /* 0xc7 */, Binary, + /* 0xc8 */ BCD /* 0xc9 */, BCD, + /* 0xca */ BCD /* 0xcb */, BCD, + /* 0xcc */ BCD /* 0xcd */, BCD, + /* 0xce */ BCD /* 0xcf */, BCD, + /* 0xd0 */ BCD /* 0xd1 */, BCD, + /* 0xd2 */ BCD /* 0xd3 */, BCD, + /* 0xd4 */ BCD /* 0xd5 */, BCD, + /* 0xd6 */ BCD /* 0xd7 */, BCD, + /* 0xd8 */ None /* 0xd9 */, None, + /* 0xda */ None /* 0xdb */, None, + /* 0xdc */ None /* 0xdd */, None, + /* 0xde */ None /* 0xdf */, None, + /* 0xe0 */ None /* 0xe1 */, None, + /* 0xe2 */ None /* 0xe3 */, None, + /* 0xe4 */ None /* 0xe5 */, None, + /* 0xe6 */ None /* 0xe7 */, None, + /* 0xe8 */ None /* 0xe9 */, None, + /* 0xea */ None /* 0xeb */, None, + /* 0xec */ None /* 0xed */, None, + /* 0xee */ None /* 0xef */, None, + /* 0xf0 */ Custom /* 0xf1 */, Custom, + /* 0xf2 */ Custom /* 0xf3 */, Custom, + /* 0xf4 */ Custom /* 0xf5 */, Custom, + /* 0xf6 */ Custom /* 0xf7 */, Custom, + /* 0xf8 */ Custom /* 0xf9 */, Custom, + /* 0xfa */ Custom /* 0xfb */, Custom, + /* 0xfc */ Custom /* 0xfd */, Custom, + /* 0xfe */ Custom /* 0xff */, Custom} + +const ( + doubleLength = 8 + int64Length = 8 + charPtrLength = 8 +) + +var fixedTypeLengths = [256]int{ + /* 0x00 */ 1 /* 0x01 */, 1, + /* 0x02 */ 0 /* 0x03 */, 0, + /* 0x04 */ 0 /* 0x05 */, 0, + /* 0x06 */ 0 /* 0x07 */, 0, + /* 0x08 */ 0 /* 0x09 */, 0, + /* 0x0a */ 1 /* 0x0b */, 0, + /* 0x0c */ 0 /* 0x0d */, 0, + /* 0x0e */ 0 /* 0x0f */, 0, + /* 0x10 */ 0 /* 0x11 */, 0, + /* 0x12 */ 0 /* 0x13 */, 0, + /* 0x14 */ 0 /* 0x15 */, 0, + /* 0x16 */ 0 /* 0x17 */, 1, + /* 0x18 */ 1 /* 0x19 */, 1, + /* 0x1a */ 1 /* 0x1b */, 1 + doubleLength, /*sizeof(double)*/ + /* 0x1c */ 1 + int64Length /*sizeof(int64_t)*/ /* 0x1d */, 1 + charPtrLength, /* sizeof(char*)*/ + /* 0x1e */ 1 /* 0x1f */, 1, + /* 0x20 */ 2 /* 0x21 */, 3, + /* 0x22 */ 4 /* 0x23 */, 5, + /* 0x24 */ 6 /* 0x25 */, 7, + /* 0x26 */ 8 /* 0x27 */, 9, + /* 0x28 */ 2 /* 0x29 */, 3, + /* 0x2a */ 4 /* 0x2b */, 5, + /* 0x2c */ 6 /* 0x2d */, 7, + /* 0x2e */ 8 /* 0x2f */, 9, + /* 0x30 */ 1 /* 0x31 */, 1, + /* 0x32 */ 1 /* 0x33 */, 1, + /* 0x34 */ 1 /* 0x35 */, 1, + /* 0x36 */ 1 /* 0x37 */, 1, + /* 0x38 */ 1 /* 0x39 */, 1, + /* 0x3a */ 1 /* 0x3b */, 1, + /* 0x3c */ 1 /* 0x3d */, 1, + /* 0x3e */ 1 /* 0x3f */, 1, + /* 0x40 */ 1 /* 0x41 */, 2, + /* 0x42 */ 3 /* 0x43 */, 4, + /* 0x44 */ 5 /* 0x45 */, 6, + /* 0x46 */ 7 /* 0x47 */, 8, + /* 0x48 */ 9 /* 0x49 */, 10, + /* 0x4a */ 11 /* 0x4b */, 12, + /* 0x4c */ 13 /* 0x4d */, 14, + /* 0x4e */ 15 /* 0x4f */, 16, + /* 0x50 */ 17 /* 0x51 */, 18, + /* 0x52 */ 19 /* 0x53 */, 20, + /* 0x54 */ 21 /* 0x55 */, 22, + /* 0x56 */ 23 /* 0x57 */, 24, + /* 0x58 */ 25 /* 0x59 */, 26, + /* 0x5a */ 27 /* 0x5b */, 28, + /* 0x5c */ 29 /* 0x5d */, 30, + /* 0x5e */ 31 /* 0x5f */, 32, + /* 0x60 */ 33 /* 0x61 */, 34, + /* 0x62 */ 35 /* 0x63 */, 36, + /* 0x64 */ 37 /* 0x65 */, 38, + /* 0x66 */ 39 /* 0x67 */, 40, + /* 0x68 */ 41 /* 0x69 */, 42, + /* 0x6a */ 43 /* 0x6b */, 44, + /* 0x6c */ 45 /* 0x6d */, 46, + /* 0x6e */ 47 /* 0x6f */, 48, + /* 0x70 */ 49 /* 0x71 */, 50, + /* 0x72 */ 51 /* 0x73 */, 52, + /* 0x74 */ 53 /* 0x75 */, 54, + /* 0x76 */ 55 /* 0x77 */, 56, + /* 0x78 */ 57 /* 0x79 */, 58, + /* 0x7a */ 59 /* 0x7b */, 60, + /* 0x7c */ 61 /* 0x7d */, 62, + /* 0x7e */ 63 /* 0x7f */, 64, + /* 0x80 */ 65 /* 0x81 */, 66, + /* 0x82 */ 67 /* 0x83 */, 68, + /* 0x84 */ 69 /* 0x85 */, 70, + /* 0x86 */ 71 /* 0x87 */, 72, + /* 0x88 */ 73 /* 0x89 */, 74, + /* 0x8a */ 75 /* 0x8b */, 76, + /* 0x8c */ 77 /* 0x8d */, 78, + /* 0x8e */ 79 /* 0x8f */, 80, + /* 0x90 */ 81 /* 0x91 */, 82, + /* 0x92 */ 83 /* 0x93 */, 84, + /* 0x94 */ 85 /* 0x95 */, 86, + /* 0x96 */ 87 /* 0x97 */, 88, + /* 0x98 */ 89 /* 0x99 */, 90, + /* 0x9a */ 91 /* 0x9b */, 92, + /* 0x9c */ 93 /* 0x9d */, 94, + /* 0x9e */ 95 /* 0x9f */, 96, + /* 0xa0 */ 97 /* 0xa1 */, 98, + /* 0xa2 */ 99 /* 0xa3 */, 100, + /* 0xa4 */ 101 /* 0xa5 */, 102, + /* 0xa6 */ 103 /* 0xa7 */, 104, + /* 0xa8 */ 105 /* 0xa9 */, 106, + /* 0xaa */ 107 /* 0xab */, 108, + /* 0xac */ 109 /* 0xad */, 110, + /* 0xae */ 111 /* 0xaf */, 112, + /* 0xb0 */ 113 /* 0xb1 */, 114, + /* 0xb2 */ 115 /* 0xb3 */, 116, + /* 0xb4 */ 117 /* 0xb5 */, 118, + /* 0xb6 */ 119 /* 0xb7 */, 120, + /* 0xb8 */ 121 /* 0xb9 */, 122, + /* 0xba */ 123 /* 0xbb */, 124, + /* 0xbc */ 125 /* 0xbd */, 126, + /* 0xbe */ 127 /* 0xbf */, 0, + /* 0xc0 */ 0 /* 0xc1 */, 0, + /* 0xc2 */ 0 /* 0xc3 */, 0, + /* 0xc4 */ 0 /* 0xc5 */, 0, + /* 0xc6 */ 0 /* 0xc7 */, 0, + /* 0xc8 */ 0 /* 0xc9 */, 0, + /* 0xca */ 0 /* 0xcb */, 0, + /* 0xcc */ 0 /* 0xcd */, 0, + /* 0xce */ 0 /* 0xcf */, 0, + /* 0xd0 */ 0 /* 0xd1 */, 0, + /* 0xd2 */ 0 /* 0xd3 */, 0, + /* 0xd4 */ 0 /* 0xd5 */, 0, + /* 0xd6 */ 0 /* 0xd7 */, 0, + /* 0xd8 */ 0 /* 0xd9 */, 0, + /* 0xda */ 0 /* 0xdb */, 0, + /* 0xdc */ 0 /* 0xdd */, 0, + /* 0xde */ 0 /* 0xdf */, 0, + /* 0xe0 */ 0 /* 0xe1 */, 0, + /* 0xe2 */ 0 /* 0xe3 */, 0, + /* 0xe4 */ 0 /* 0xe5 */, 0, + /* 0xe6 */ 0 /* 0xe7 */, 0, + /* 0xe8 */ 0 /* 0xe9 */, 0, + /* 0xea */ 0 /* 0xeb */, 0, + /* 0xec */ 0 /* 0xed */, 0, + /* 0xee */ 0 /* 0xef */, 0, + /* 0xf0 */ 2 /* 0xf1 */, 3, + /* 0xf2 */ 5 /* 0xf3 */, 9, + /* 0xf4 */ 0 /* 0xf5 */, 0, + /* 0xf6 */ 0 /* 0xf7 */, 0, + /* 0xf8 */ 0 /* 0xf9 */, 0, + /* 0xfa */ 0 /* 0xfb */, 0, + /* 0xfc */ 0 /* 0xfd */, 0, + /* 0xfe */ 0 /* 0xff */, 0} + +var widthMap = [32]uint{ + 0, // 0x00, None + 1, // 0x01, empty array + 1, // 0x02, array without index table + 2, // 0x03, array without index table + 4, // 0x04, array without index table + 8, // 0x05, array without index table + 1, // 0x06, array with index table + 2, // 0x07, array with index table + 4, // 0x08, array with index table + 8, // 0x09, array with index table + 1, // 0x0a, empty object + 1, // 0x0b, object with sorted index table + 2, // 0x0c, object with sorted index table + 4, // 0x0d, object with sorted index table + 8, // 0x0e, object with sorted index table + 1, // 0x0f, object with unsorted index table + 2, // 0x10, object with unsorted index table + 4, // 0x11, object with unsorted index table + 8, // 0x12, object with unsorted index table + 0} + +var firstSubMap = [32]int{ + 0, // 0x00, None + 1, // 0x01, empty array + 2, // 0x02, array without index table + 3, // 0x03, array without index table + 5, // 0x04, array without index table + 9, // 0x05, array without index table + 3, // 0x06, array with index table + 5, // 0x07, array with index table + 9, // 0x08, array with index table + 9, // 0x09, array with index table + 1, // 0x0a, empty object + 3, // 0x0b, object with sorted index table + 5, // 0x0c, object with sorted index table + 9, // 0x0d, object with sorted index table + 9, // 0x0e, object with sorted index table + 3, // 0x0f, object with unsorted index table + 5, // 0x10, object with unsorted index table + 9, // 0x11, object with unsorted index table + 9, // 0x12, object with unsorted index table + 0} diff --git a/examples/setup-rbac.sh b/examples/setup-rbac.sh new file mode 100755 index 000000000..088796690 --- /dev/null +++ b/examples/setup-rbac.sh @@ -0,0 +1,104 @@ +#!/bin/bash + +function usage { + echo "$(basename "$0") - Create Kubernetes RBAC role and bindings for ArangoDB operator +Usage: $(basename "$0") [options...] +Options: + --role-name=STRING Name of ClusterRole to create + (default=\"arangodb-operator\", environment variable: ROLE_NAME) + --role-binding-name=STRING Name of ClusterRoleBinding to create + (default=\"arangodb-operator\", environment variable: ROLE_BINDING_NAME) + --namespace=STRING namespace to create role and role binding in. Must already exist. + (default=\"default\", environment vairable: NAMESPACE) +" >&2 +} + +ROLE_NAME="${ROLE_NAME:-arangodb-operator}" +ROLE_BINDING_NAME="${ROLE_BINDING_NAME:-arangodb-operator}" +NAMESPACE="${NAMESPACE:-default}" + +function setupRole { + yaml=$(cat << EOYAML +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: ${ROLE_NAME} +rules: +- apiGroups: + - database.arangodb.com + resources: + - arangodeployments + verbs: + - "*" +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - "*" +- apiGroups: + - "" + resources: + - pods + - services + - endpoints + - persistentvolumeclaims + - events + - secrets + verbs: + - "*" +- apiGroups: + - apps + resources: + - deployments + verbs: + - "*" +EOYAML +) + echo "$yaml" | kubectl apply -f - +} + +function setupRoleBinding { + yaml=$(cat << EOYAML +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: ${ROLE_BINDING_NAME} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ${ROLE_NAME} +subjects: +- kind: ServiceAccount + name: default + namespace: ${NAMESPACE} +EOYAML +) + echo "$yaml" | kubectl apply -f - +} + +for i in "$@" +do +case $i in + --role-name=*) + ROLE_NAME="${i#*=}" + ;; + --role-binding-name=*) + ROLE_BINDING_NAME="${i#*=}" + ;; + --namespace=*) + NAMESPACE="${i#*=}" + ;; + -h|--help) + usage + exit 0 + ;; + *) + usage + exit 1 + ;; +esac +done + +setupRole +setupRoleBinding diff --git a/pkg/deployment/secrets.go b/pkg/deployment/secrets.go index 49cb71323..47447a875 100644 --- a/pkg/deployment/secrets.go +++ b/pkg/deployment/secrets.go @@ -25,8 +25,10 @@ package deployment import ( "fmt" - api "github.com/arangodb/k8s-operator/pkg/apis/arangodb/v1alpha" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + api "github.com/arangodb/k8s-operator/pkg/apis/arangodb/v1alpha" + "github.com/arangodb/k8s-operator/pkg/util/k8sutil" ) // getJWTSecret loads the JWT secret from a Secret configured in apiObject.Spec.Authentication.JWTSecretName. @@ -36,30 +38,24 @@ func (d *Deployment) getJWTSecret(apiObject *api.ArangoDeployment) (string, erro } kubecli := d.deps.KubeCli secretName := apiObject.Spec.Authentication.JWTSecretName - s, err := kubecli.CoreV1().Secrets(apiObject.GetNamespace()).Get(secretName, metav1.GetOptions{}) + s, err := k8sutil.GetJWTSecret(kubecli, secretName, apiObject.GetNamespace()) if err != nil { d.deps.Log.Debug().Err(err).Str("secret-name", secretName).Msg("Failed to get JWT secret") + return "", maskAny(err) } - // Take the first data - for _, v := range s.Data { - return string(v), nil - } - return "", maskAny(fmt.Errorf("No data found in secret '%s'", secretName)) + return s, nil } // getSyncJWTSecret loads the JWT secret used for syncmasters from a Secret configured in apiObject.Spec.Sync.Authentication.JWTSecretName. func (d *Deployment) getSyncJWTSecret(apiObject *api.ArangoDeployment) (string, error) { kubecli := d.deps.KubeCli secretName := apiObject.Spec.Sync.Authentication.JWTSecretName - s, err := kubecli.CoreV1().Secrets(apiObject.GetNamespace()).Get(secretName, metav1.GetOptions{}) + s, err := k8sutil.GetJWTSecret(kubecli, secretName, apiObject.GetNamespace()) if err != nil { d.deps.Log.Debug().Err(err).Str("secret-name", secretName).Msg("Failed to get sync JWT secret") + return "", maskAny(err) } - // Take the first data - for _, v := range s.Data { - return string(v), nil - } - return "", maskAny(fmt.Errorf("No data found in secret '%s'", secretName)) + return s, nil } // getSyncMonitoringToken loads the token secret used for monitoring sync masters & workers. diff --git a/pkg/util/arangod/client.go b/pkg/util/arangod/client.go new file mode 100644 index 000000000..c5b4cac19 --- /dev/null +++ b/pkg/util/arangod/client.go @@ -0,0 +1,89 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package arangod + +import ( + "net" + nhttp "net/http" + "strconv" + "time" + + driver "github.com/arangodb/go-driver" + "github.com/arangodb/go-driver/http" + "k8s.io/client-go/kubernetes" + + api "github.com/arangodb/k8s-operator/pkg/apis/arangodb/v1alpha" + "github.com/arangodb/k8s-operator/pkg/util/k8sutil" +) + +var ( + sharedHTTPTransport = &nhttp.Transport{ + Proxy: nhttp.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +) + +// CreateArangodClient creates a go-driver client for a specific member in the given group. +func CreateArangodClient(kubecli kubernetes.Interface, apiObject *api.ArangoDeployment, group api.ServerGroup, id string) (driver.Client, error) { + // Create connection + dnsName := k8sutil.CreatePodDNSName(apiObject, group.AsRole(), id) + scheme := "http" + connConfig := http.ConnectionConfig{ + Endpoints: []string{scheme + "://" + net.JoinHostPort(dnsName, strconv.Itoa(k8sutil.ArangoPort))}, + Transport: sharedHTTPTransport, + } + // TODO deal with TLS + conn, err := http.NewConnection(connConfig) + if err != nil { + return nil, maskAny(err) + } + + // Create client + config := driver.ClientConfig{ + Connection: conn, + } + if apiObject.Spec.IsAuthenticated() { + s, err := k8sutil.GetJWTSecret(kubecli, apiObject.Spec.Authentication.JWTSecretName, apiObject.GetNamespace()) + if err != nil { + return nil, maskAny(err) + } + jwt, err := CreateArangodJwtAuthorizationHeader(s) + if err != nil { + return nil, maskAny(err) + } + config.Authentication = driver.RawAuthentication(jwt) + } + c, err := driver.NewClient(config) + if err != nil { + return nil, maskAny(err) + } + return c, nil +} diff --git a/pkg/util/k8sutil/secrets.go b/pkg/util/k8sutil/secrets.go new file mode 100644 index 000000000..9642a0b91 --- /dev/null +++ b/pkg/util/k8sutil/secrets.go @@ -0,0 +1,43 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package k8sutil + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +// GetJWTSecret loads the JWT secret from a Secret with given name. +func GetJWTSecret(kubecli kubernetes.Interface, secretName, namespace string) (string, error) { + s, err := kubecli.CoreV1().Secrets(namespace).Get(secretName, metav1.GetOptions{}) + if err != nil { + return "", maskAny(err) + } + // Take the first data + for _, v := range s.Data { + return string(v), nil + } + return "", maskAny(fmt.Errorf("No data found in secret '%s'", secretName)) +} diff --git a/scripts/kube_create_operator.sh b/scripts/kube_create_operator.sh new file mode 100755 index 000000000..ca5c54dcd --- /dev/null +++ b/scripts/kube_create_operator.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +# Create the operator deployment with custom image option + +NS=$1 +IMAGE=$2 + +if [ -z $NS ]; then + echo "Specify a namespace argument" + exit 1 +fi +if [ -z $IMAGE ]; then + echo "Specify an image argument" + exit 1 +fi + +yaml=$(cat << EOYAML +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: arangodb-operator +spec: + replicas: 1 + template: + metadata: + labels: + name: arangodb-operator + spec: + containers: + - name: arangodb-operator + imagePullPolicy: IfNotPresent + image: ${IMAGE} + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + +EOYAML +) +echo "$yaml" | kubectl --namespace=$NS create -f - \ No newline at end of file diff --git a/scripts/kube_delete_namespace.sh b/scripts/kube_delete_namespace.sh new file mode 100755 index 000000000..0836607ac --- /dev/null +++ b/scripts/kube_delete_namespace.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# Delete a namespace and wait until it is gone + +NS=$1 + +if [ -z $NS ]; then + echo "Specify a namespace argument" + exit 1 +fi + +kubectl delete namespace $NS --now --ignore-not-found +response=$(kubectl get namespace $NS --template="non-empty" --ignore-not-found) +while [ ! -z $response ]; do + sleep 1 + response=$(kubectl get namespace $NS --template="non-empty" --ignore-not-found) +done diff --git a/tests/predicates.go b/tests/predicates.go new file mode 100644 index 000000000..fc081a0be --- /dev/null +++ b/tests/predicates.go @@ -0,0 +1,34 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package tests + +import ( + api "github.com/arangodb/k8s-operator/pkg/apis/arangodb/v1alpha" +) + +// deploymentHasState creates a predicate that returns true when the deployment has the given state. +func deploymentHasState(state api.DeploymentState) func(*api.ArangoDeployment) bool { + return func(obj *api.ArangoDeployment) bool { + return obj.Status.State == state + } +} diff --git a/tests/single_test.go b/tests/single_test.go new file mode 100644 index 000000000..93b84fe99 --- /dev/null +++ b/tests/single_test.go @@ -0,0 +1,31 @@ +package tests + +import ( + "testing" + + "github.com/dchest/uniuri" + + api "github.com/arangodb/k8s-operator/pkg/apis/arangodb/v1alpha" + "github.com/arangodb/k8s-operator/pkg/client" +) + +// TestSimpleSingle tests the creating of a single server deployment +// with default settings. +func TestSimpleSingle(t *testing.T) { + c := client.MustNewInCluster() + ns := getNamespace(t) + + // Prepare deployment config + depl := newDeployment("test-single-" + uniuri.NewLen(4)) + + // Create deployment + _, err := c.DatabaseV1alpha().ArangoDeployments(ns).Create(depl) + if err != nil { + t.Fatalf("Create deployment failed: %v", err) + } + + // Wait for deployment to be ready + if _, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentHasState(api.DeploymentStateRunning)); err != nil { + t.Errorf("Deployment not running in time: %v", err) + } +} diff --git a/tests/test_util.go b/tests/test_util.go new file mode 100644 index 000000000..e42314f9e --- /dev/null +++ b/tests/test_util.go @@ -0,0 +1,91 @@ +// +// DISCLAIMER +// +// Copyright 2018 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// +// Author Ewout Prangsma +// + +package tests + +import ( + "fmt" + "os" + "strings" + "testing" + "time" + + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + api "github.com/arangodb/k8s-operator/pkg/apis/arangodb/v1alpha" + "github.com/arangodb/k8s-operator/pkg/generated/clientset/versioned" + "github.com/arangodb/k8s-operator/pkg/util/retry" +) + +const ( + deploymentReadyTimeout = time.Minute * 2 +) + +var ( + maskAny = errors.WithStack +) + +// getNamespace returns the kubernetes namespace in which to run tests. +func getNamespace(t *testing.T) string { + ns := os.Getenv("TEST_NAMESPACE") + if ns == "" { + t.Fatal("Missing environment variable TEST_NAMESPACE") + } + return ns +} + +// newDeployment creates a basic ArangoDeployment with configured +// type & name. +func newDeployment(name string) *api.ArangoDeployment { + return &api.ArangoDeployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: api.SchemeGroupVersion.String(), + Kind: api.ArangoDeploymentResourceKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: strings.ToLower(name), + }, + } +} + +// waitUntilDeployment waits until a deployment with given name in given namespace +// reached a state where the given predicate returns true. +func waitUntilDeployment(cli versioned.Interface, deploymentName, ns string, predicate func(*api.ArangoDeployment) bool) (*api.ArangoDeployment, error) { + var result *api.ArangoDeployment + op := func() error { + obj, err := cli.DatabaseV1alpha().ArangoDeployments(ns).Get(deploymentName, metav1.GetOptions{}) + if err != nil { + result = nil + return maskAny(err) + } + result = obj + if predicate(obj) { + return nil + } + return fmt.Errorf("Predicate returns false") + } + if err := retry.Retry(op, deploymentReadyTimeout); err != nil { + return nil, maskAny(err) + } + return result, nil +}