diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..25cb946a --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +bin/* +pkg/* +tags +coverage* +y.output +src/github.com/* diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..97e8cc66 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,15 @@ +language: go +sudo: required +go: + - 1.8.x + +before_install: + +script: + - make build + - make test + - make coverage + +after_success: + # send coverage reports to Codecov + - bash <(curl -s https://codecov.io/bash) -f "!mock.go" diff --git a/README.md b/README.md index 96dad840..be30626e 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,7 @@ +[![Build Status](https://travis-ci.org/radondb/radon.png)](https://travis-ci.org/radondb/radon) +[![Go Report Card](https://goreportcard.com/badge/github.com/radondb/radon)](https://goreportcard.com/report/github.com/radondb/radon) +[![codecov.io](https://codecov.io/gh/radondb/radon/graphs/badge.svg)](https://codecov.io/gh/radondb/radon/branch/master) + # OverView RadonDB is an open source, Cloud-native MySQL database for unlimited scalability and performance. @@ -23,7 +27,7 @@ large-capacity database、automatic plane split table、 scalable and strong con ## SQL Layer ### SQL surpported -On SQL syntax level, RadonDB Fully compatible with MySQL.You can view all of the SQL features RadonDB supports here [radon_sql_surported](radon_SQL_surpported.md) +On SQL syntax level, RadonDB Fully compatible with MySQL.You can view all of the SQL features RadonDB supports here [radon_sql_surported](docs/radon_sql_surpport.md) ### SQL parser, planner, excutor @@ -80,3 +84,12 @@ RadonDB achieves the level of SI (Snapshot Isolation) at the level of consistenc ``` Transaction with SQL Layer``` The SQL node is stateless, but in order to guarantee transaction `Snapshot Isolation`, it is currently a write-multiple-read mode. + +## Issues + +The [integrated github issue tracker](https://github.com/radondb/radon/issues) +is used for this project. + +## License + +RadonDB is released under the GPLv3. See LICENSE diff --git a/conf/conf.json.sample b/conf/conf.json.sample new file mode 100644 index 00000000..a6a30bd8 --- /dev/null +++ b/conf/conf.json.sample @@ -0,0 +1,20 @@ +{ + "proxy": { + "endpoint": "{RADON-HOST}:{PORT}", + "meta-dir": "{RADON-META-DIR}", + "twopc-enable": false, + "peer-address": "{RADON-HOST:8080}" + }, + "binlog": { + "binlog-dir": "{RADON-BINLOG-DIR}", + "enable-binlog": true, + "enable-relay": true + }, + "audit": { + "mode": "N", + "audit-dir": "{AUDIT-LOG-DIR}" + }, + "log": { + "level": "INFO" + } +} diff --git a/conf/radon.default.json b/conf/radon.default.json new file mode 100644 index 00000000..ac10c66e --- /dev/null +++ b/conf/radon.default.json @@ -0,0 +1,15 @@ +{ + "proxy": { + "endpoint": ":3306", + "meta-dir": "bin/radon-meta" + }, + "binlog": { + "binlog-dir": "bin/radon-binlog" + }, + "audit": { + "audit-dir": "bin/radon-audit" + }, + "log": { + "level": "INFO" + } +} diff --git a/docs/api.md b/docs/api.md new file mode 100644 index 00000000..6a268a1a --- /dev/null +++ b/docs/api.md @@ -0,0 +1,871 @@ +[TOC] + +# API + +# Background + +This document describes the RadonDB REST API, which allows users to achieve most tasks on WebUI. + +# radon + +## config + +``` +Path: /v1/radon/config +Method: PUT +Request: { + "max-connections": The maximum permitted number of simultaneous client connections, [required] + "max-result-size": The maximum result size(in bytes) of a query, [required] + "ddl-timeout": The execution timeout(in millisecond) for DDL statements, [required] + "query-timeout": The execution timeout(in millisecond) for DML statements, [required] + "twopc-enable": Enables(true or false) radon two phase commit, for distrubuted transaction, [required] + "allowip": ["allow-ip-1", "allow-ip-2"], [required] + "audit-mode": The audit log mode, "N": disabled, "R": read enabled, "W": write enabled, "A": read/write enabled, [required] + } + +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 500: StatusInternalServerError +``` +`Example: ` +``` +$ curl -i -H 'Content-Type: application/json' -X PUT -d '{"max-connections":1024, "max-result-size":1073741824, "ddl-timeout":3600, "query-timeout":600, "twopc-enable":true, "allowip": ["127.0.0.1", "127.0.0.2"]}' \ + http://127.0.0.1:8080/v1/radon/config + +---Response--- +HTTP/1.1 200 OK +Date: Mon, 09 Apr 2018 16:19:44 GMT +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +## readonly + +``` +Path: /v1/radon/readonly +Method: PUT +Request: { + "readonly": The value of the read-only(true) or not(false), [required] + } +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 500: StatusInternalServerError +``` +`Example: ` +``` +$ curl -i -H 'Content-Type: application/json' -X PUT -d '{"readonly":true}' \ + http://127.0.0.1:8080/v1/radon/readonly + +---Response--- +HTTP/1.1 200 OK +Date: Mon, 09 Apr 2018 16:28:40 GMT +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +## throttle + +``` +Path: /v1/radon/throttle +Method: PUT +Request: { + "limits": The max number of requests in a second, defaults 0, means no limits, [required] + } +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 500: StatusInternalServerError +``` +`Example:` +``` +$ curl -i -H 'Content-Type: application/json' -X PUT -d '{"limits":4800}' \ + http://127.0.0.1:8080/v1/radon/throttle + +---Response--- +HTTP/1.1 200 OK +Date: Mon, 09 Apr 2018 16:32:43 GMT +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +## status + +``` +Path: /v1/radon/status +Method: GET +Response:{ + readonly:true/false + } +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed +``` +`Example: ` +``` +$ curl http://127.0.0.1:8080/v1/radon/status + +---Response--- +{"readonly":true} +``` + +# shard + +## shardz + +This api used to get all shard tables from router. + +``` +Path: /v1/shard/shardz +Method: GET +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 503: StatusServiceUnavailable, backend(s) MySQL seems to be down. +``` +`Example: ` +``` +$ curl http://127.0.0.1:8080/v1/shard/shardz + +---Response--- +{"Schemas":[{"DB":"db_test1","Tables":[{"Name":"t2","ShardKey":"id","Partition":{"Segments":[{"Table":"t2_0000","Backend":"backend1","Range":{"Start":0,"End":128}},{"Table":"t2_0001","Backend":"backend1","Range":{"Start":128,"End":256}},{"Table":"t2_0002","Backend":"backend1","Range":{"Start":256,"End":384}}, + +...... + +{"Start":3584,"End":3712}},{"Table":"t1_0029","Backend":"backend1","Range":{"Start":3712,"End":3840}},{"Table":"t1_0030","Backend":"backend1","Range":{"Start":3840,"End":3968}},{"Table":"t1_0031","Backend":"backend1","Range":{"Start":3968,"End":4096}}]}}]}]} +``` + + +## balanceadvice + +This api used to get the best table(only one) which should be transferred from the max-backend to min-backend. + +``` +Path: /v1/shard/balanceadvice +Method: GET + +Response: [{ + "from-address": The from end address(host:port). + "from-datasize": The from end data size in MB. + "from-user": The from backend MySQL user. + "from-password": The from backend MySQL password. + "to-address": The to end address(host:port). + "to-datasize": The to end data size in MB. + "from-user": The to backend MySQL user. + "from-password": The to backend MySQL password. + "database": The transfered table database. + "table": The transfered table name. + "tablesize": The transfered table size. + }] + +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 503: StatusServiceUnavailable, radon has no advice. + +Notes: +If response is NULL, means there is no advice. +``` +`Example:` +``` +$ curl http://127.0.0.1:8080/v1/shard/balanceadvice + +---Response--- +null +``` + + +## shift + +This api used to change the partition backend from one to another. + +``` +Path: /v1/shard/shift +Method: POST +Request: { + "database": "database name", [required] + "table": "table name", [required] + "from-address": "the from backend address(host:port)", [required] + "to-address": "the to backend address(host:port)", [required] + } +``` +`Status:1` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 503: StatusServiceUnavailable, radon has no advice. +``` +`Example: ` +``` +$ url -i -H 'Content-Type: application/json' -X POST -d '{"database": "db_test1", "table": "t1", "from-address": "127.0.0.1:3306", "to-address": "127.0.0.1:3306"} \ + http://127.0.0.1:8080/v1/shard/shift +``` + + +## reload + +This api used to re-load the router info from metadir. + +``` +Path: /v1/shard/reload +Method: POST +Request: NIL +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 503: StatusServiceUnavailable, radon has no advice. +``` +`Example: ` +``` +$ curl -i -H 'Content-Type: application/json' -X POST http://127.0.0.1:8080/v1/shard/reload + +---Response--- +HTTP/1.1 200 OK +Date: Tue, 10 Apr 2018 02:07:15 GMT +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +# backend + +## health + +This api can perform a backend health check by sending the PING(select 1) command to backends. + +``` +Path: /v1/radon/ping +Method: GET +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 503: StatusServiceUnavailable, backend(s) MySQL seems to be down. +``` +`Example:` +``` +$ curl http://127.0.0.1:8080/v1/radon/ping +``` + +## backends + +This api used to add/delete a backend config. + +#### add + +``` +Path: /v1/radon/backend +Method: POST +Request: { + "name": "The unique name of this backend", [required] + "address": "The endpoint of this backend", [required] + "user": "The user(super) for radon to be able to connect to the backend MySQL server", [required] + "password": "The password of the user", [required] + "max-connections": The maximum permitted number of backend connection pool, [optional] + } +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 500: StatusInternalServerError +``` +`Example: ` +``` +$ curl -i -H 'Content-Type: application/json' -X POST -d '{"name": "backend1", "address": "127.0.0.1:3306", "user": "root", "password": "318831", "max-connections":1024}' \ + http://127.0.0.1:8080/v1/radon/backend + +---Response--- +HTTP/1.1 200 OK +Date: Tue, 10 Apr 2018 06:13:59 GMT +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + +#### remove + +``` +Path: /v1/radon/backend/{backend-name} +Method: DELETE +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 500: StatusInternalServerError +``` +`Example: ` +``` +$ curl -X DELETE http://127.0.0.1:8080/v1/radon/backend/backend1 +``` + + +## backup + +This api used to add/delete a backup node config. + +#### add + +``` +Path: /v1/radon/backup +Method: POST +Request: { + "name": "The unique name of this backup",[required] + "address": "The endpoint of this backup", [required] + "user": "The user(super) for radon to be able to connect to the backend MySQL server", [required] + "password": "The password of the user", [required] + "max-connections": The maximum permitted number of backend connection pool, [optional] + } +``` +`Example: ` +``` +$ curl -i -H 'Content-Type: application/json' -X POST -d '{"name": "backupnode", "address": "127.0.0.1:3306", "user": "root", "password": "318831", "max-connections":1024}' \ + http://127.0.0.1:8080/v1/radon/backup + +---Response--- +HTTP/1.1 200 OK +Date: Tue, 10 Apr 2018 06:05:22 GMT +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 500: StatusInternalServerError +``` + +#### remove + +``` +Path: /v1/radon/backup/{backup-name} +Method: DELETE +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 500: StatusInternalServerError +``` +`Example: ` +``` +$ curl -X DELETE http://127.0.0.1:8080/v1/radon/backup/backupnode +``` + +## meta + +The API used to do multi-proxy meta synchronization. + +#### versions + +``` +Path: /v1/meta/versions +Method: GET +Response:{ + Ts int64 `json:"version"` + } +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed +``` +`Example: ` +``` +$ curl http://127.0.0.1:8080/v1/meta/versions + +---Response--- +{"version":1523328058632112022} +``` + + +#### versioncheck + +``` +Path: /v1/meta/versioncheck +Method: GET +Response:{ + "latest":true, + "peers":["127.0.0.1:8080"] + } +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed +``` +`Example:` +``` +$ curl http://127.0.0.1:8080/v1/meta/versioncheck + +---Response--- +{"latest":true,"peers":["127.0.0.1:8080"]} +``` + +#### metas + +``` +Path: /v1/meta/metas +Method: GET +Response:{ + Metas map[string]string `json:"metas"` + } + +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 500: StatusInternalServerError +``` +`Example: ` +``` +$ curl http://127.0.0.1:8080/v1/meta/metas + +---Response--- +{"metas":{"backend.json":"{\n\t\"backup\": null,\n\t\"backends\": null\n}","db_test1/t1.json":"{\n\t\"name\": \"t1\",\n\t\"shardtype\": \"HASH\",\n\t\"shardkey\": \"id\",\n\t\"partitions\": [\n\t\t{\n +..... +..... +t\t{\n\t\t\t\"table\": \"t2_0029\",\n\t\t\t\"segment\": \"3712-3840\",\n\t\t\t\"backend\": \"backend1\"\n\t\t},\n\t\t{\n\t\t\t\ +``` + +# debug + +## processlist +This api shows which threads are running. + +``` +Path: /v1/debug/processlist +Method: GET +Response: [{ + "id": The connection identifier. + "user": The radon user who issued the statement. + "host": The host name of the client issuing the statement. + "db": The default database. + "command": The type of command the thread is executing. + "time": The time in seconds that the thread has been in its current state. + "state": The state what the thread is doing. + "info": The statement the thread is executing. + }] +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed +``` +`Example:` +``` +$ curl http://127.0.0.1:8080/v1/debug/processlist +---Response--- +[{"id":1,"user":"root","host":"127.0.0.1:40742","db":"","command":"Sleep","time":41263,"state":"","info":""}] +``` + +## txnz +This api shows which transactions are running. + +``` +Path: /v1/debug/txnz/:limit +Method: GET +Response: [{ + "txnid": The transaction identifier. + "start": The transaction start time. + "duration": The transatcion duration time. + "state": The statement the transaction is executing. + "xaid": The xa identifier if the twopc is enabled. + "sending": The backend numbers which the transaction fanout to. + }] +``` +`Example: ` +``` +$ curl http://127.0.0.1:8080/v1/debug/txnz/10 +---Response(now backend does nothing, return null)-- +null +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed +``` + +## queryz +This api shows which queries are running. + +``` +Path: /v1/debug/queryz/:limit +Method: GET +Response: [{ + "id": The connection ID. + "host": The backend address who is issuing this query. + "start": The query start time. + "duration": The query duration time. + "query": The query which is executing. + }] +``` +`Example:` +``` +$ curl http://127.0.0.1:8080/v1/debug/queryz/10 +---Response--- +null +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed +``` + +## configz +This api shows the config of RadonDB. + +``` +Path: /v1/debug/configz +Method: GET +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 500: StatusInternalServerError +``` +`Example:` +``` +$ curl http://127.0.0.1:8080/v1/debug/configz + +---Response--- +{"proxy":{"allowip":["127.0.0.1","127.0.0.2"],"meta-dir":"bin/radon-meta","endpoint":":3306","twopc-enable":true,"max-connections":1024,"max-result-size":1073741824,"ddl-timeout":3600,"query-timeout":600,"peer-address":"127.0.0.1:8080","backup-default-engine":"TokuDB"},"audit":{"mode":"N","audit-dir":"bin/radon-audit","max-size":268435456,"expire-hours":1},"router":{"slots-readonly":4096,"blocks-readonly":128},"binlog":{"binlog-dir":"bin/radon-binlog","max-size":134217728,"relay-workers":32,"relay-wait-ms":5000,"enable-binlog":false,"enable-relay":false,"parallel-type":1},"log":{"level":"INFO"}} +``` + +## backendz +This api shows all the backends of RadonDB. + +``` +Path: /v1/debug/backendz +Method: GET +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 500: StatusInternalServerError +``` +`Example: ` +``` +$ curl http://127.0.0.1:8080/v1/debug/backendz + +---Response--- +[] +``` + + +## schemaz +This api shows all the schemas of RadonDB. + +``` +Path: /v1/debug/schemaz +Method: GET +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 500: StatusInternalServerError +``` +`Example: ` +``` +$ curl http://127.0.0.1:8080/v1/debug/schemaz + +---Response--- +{"db_test1":{"DB":"db_test1","Tables":{"t1":{"Name":"t1","ShardKey":"id","Partition":{"Segments":[{"Table":"t1_0000","Backend":"backend1","Range":{"Start":0,"End":128}},{"Table":"t1_0001","Backend":"backend1","Range":{"Start":128,"End":256}},{"Table":"t1_0002","Backend":"backend1","Range":{"Start":256,"End":384}}, +.... +.... +:"backend1","Range":{"Start":3712,"End":3840}},{"Table":"t2_0030","Backend":"backend1","Range":{"Start":3840,"End":3968}},{"Table":"t2_0031","Backend":"backend1","Range":{"Start":3968,"End":4096}}]}}}}} +``` + +# peers + +## add peer + +This api used to add a peer. + +``` +Path: /v1/peer/add +Method: POST +Request: { + "address": "The REST address of this peer", [required] + } +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 500: StatusInternalServerError +``` +`Example: ` +``` +$ curl -i -H 'Content-Type: application/json' -X POST -d '{"address": "127.0.0.1:8080"}' \ + http://127.0.0.1:8080/v1/peer/add + +---Response--- +HTTP/1.1 200 OK +Date: Tue, 10 Apr 2018 03:17:30 GMT +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + + +## peerz + +This api used to show the peers of a group. + +``` +Path: /v1/peer/peerz +Method: GET +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed +``` +`Example:` +``` +$ curl http://127.0.0.1:8080/v1/peer/peerz + +---Response--- +["127.0.0.1:8080"] +``` + + +## remove peer + +This api used to removea peer. + +``` +Path: /v1/peer/remove +Method: POST +Request: { + "address": "The REST address of this peer", [required] + } +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 500: StatusInternalServerError +``` +`Example: ` +``` +$ curl -i -H 'Content-Type: application/json' -X POST -d '{"address": "127.0.0.1:8080"}' \ + http://127.0.0.1:8080/v1/peer/remove + +---Response--- +HTTP/1.1 200 OK +Date: Tue, 10 Apr 2018 03:21:09 GMT +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + + + + +# users + +The normal users that can connect to radon with password. + +## create user + +``` +Path: /v1/user/add +Method: POST +Request: { + "user": "user name", [required] + "password": "password", [required] + } +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 500: StatusInternalServerError +``` +`Example: ` +``` +---backend should not be null--- +$ curl -i -H 'Content-Type: application/json' -X POST -d '{"name": "backend1", "address": "127.0.0.1:3306", "user": "root", "password": "318831", "max-connections":1024}' \ + http://127.0.0.1:8080/v1/radon/backend + +HTTP/1.1 200 OK +Date: Tue, 10 Apr 2018 03:35:22 GMT +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 + +$ curl -i -H 'Content-Type: application/json' -X POST -d '{"user": "test", "password": "test"}' \ + http://127.0.0.1:8080/v1/user/add + +HTTP/1.1 200 OK +Date: Tue, 10 Apr 2018 03:35:27 GMT +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + + +## update user + +``` +Path: /v1/user/update +Method: POST +Request: { + "user": "user name", [required] + "password": "password", [required] + } +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 500: StatusInternalServerError +``` +`Example:` +``` +$ curl -i -H 'Content-Type: application/json' -X POST -d '{"user": "test", "password": "test"}' \ + http://127.0.0.1:8080/v1/user/update + +---Response--- +HTTP/1.1 200 OK +Date: Tue, 10 Apr 2018 03:39:31 GMT +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + + +## drop user + +``` +Path: /v1/user/remove +Method: POST +Request: { + "user": "user name", [required] + } +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 500: StatusInternalServerError +``` +`Example:` +``` +$ curl -i -H 'Content-Type: application/json' -X POST -d '{"user": "test"}' \ + http://127.0.0.1:8080/v1/user/remove +---Response--- +HTTP/1.1 200 OK +Date: Tue, 10 Apr 2018 03:41:14 GMT +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + + +# relay + +The relay to backup node. + +## status + +``` +Path: /v1/relay/status +Method: GET +Response:{ + Status bool `json:"status"` + MaxWorkers int32 `json:"max-workers"` + ParallelWorkers int32 `json:"parallel-workers"` + SecondBehinds int64 `json:"second-behinds"` + OrderCommit bool `json:"order-commit"` + RelayBinlog string `json:"relay-binlog"` + RelayGTID int64 `json:"relay-gtid"` + Rates string `json:"rates"` +} +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed +``` + +`Example:` +``` +$ curl http://127.0.0.1:8080/v1/relay/status + +---Response--- +{"status":true,"max-workers":32,"parallel-workers":0,"second-behinds":0,"parallel-type":1,"relay-binlog":"","relay-gtid":0,"restart-gtid":0,"rates":"{\"All\":[0]}"} +``` + +## start + +start the relay worker. + +``` +Path: /v1/relay/start +Method: PUT +Request: nil +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 500: StatusInternalServerError +``` +`Example:` +``` +$ curl -i -H 'Content-Type: application/json' -X PUT http://127.0.0.1:8080/v1/relay/start + +---Response--- +HTTP/1.1 200 OK +Date: Tue, 10 Apr 2018 05:33:38 GMT +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 +``` + + +## stop + +stop the relay worker. + +``` +Path: /v1/relay/stop +Method: PUT +Request: nil +``` +`Status:` +``` + 200: StatusOK + 405: StatusMethodNotAllowed + 500: StatusInternalSserverError +``` +`Example: ` +``` +$ curl -i -H 'Content-Type: application/json' -X PUT http://127.0.0.1:8080/v1/relay/stop + +HTTP/1.1 200 OK +Date: Tue, 10 Apr 2018 05:37:02 GMT +Content-Length: 0 +Content-Type: text/plain; charset=utf-8 + +$ curl http://127.0.0.1:8080/v1/relay/status + +{"status":false,"max-workers":32,"parallel-workers":0,"second-behinds":0,"parallel-type":1,"relay-binlog":"","relay-gtid":0,"restart-gtid":0,"rates":"{\"All\":[0]}"} + +---Now relay status is `false` +``` diff --git a/docs/radon_sql_surpport.md b/docs/radon_sql_surpport.md new file mode 100644 index 00000000..5c090028 --- /dev/null +++ b/docs/radon_sql_surpport.md @@ -0,0 +1,690 @@ + +[TOC] + +# Radon SQL surport + +## Background + +On SQL syntax level, RadonDB Fully compatible with MySQL. + +In most scenarios, the SQL implementation of RadonDB is a subset of MySQL, for better use and standardization. + +## DDL + +### DATABASE + +Based on database, RadonDB now only supports `CREATE` and `DELETE` operation. + +#### CREATE DATABASE + +`Syntax` +``` + CREATE DATABASE [IF NOT EXISTS] db_name +``` +`Instructions` + +* RadonDB will sends this statement directly to all backends to execute and return results. +* *Cross-partition non-atomic operations* + +`Example:` +``` +mysql> CREATE DATABASE db_test1; +Query OK, 1 row affected (0.00 sec) +``` + +#### DROP DATABASE + +`Syntax` +``` + DROP DATABASE [IF EXISTS] db_name +``` + +`Instructions` + +* RadonDB will sends this statement directly to all backends to execute and return results. +* *Cross-partition non-atomic operations* + +`Example ` +``` +mysql> DROP DATABASE db_test1; +Query OK, 0 rows affected (0.01 sec) +``` +--------------------------------------------------------------------------------------------------- + +### TABLE + +#### CREATE TABLE + +`Syntax` +``` + CREATE TABLE [IF NOT EXISTS] table_name + (create_definition,...) + [ENGINE={InnoDB|TokuDB}] + [DEFAULT CHARSET=(charset)] + PARTITION BY HASH(shard-key) +``` + +`Instructions` +* Create partition information and generate partition tables on each partition +* Partition table syntax should include`PARTITION BY HASH(partition key)` +* The partitioning key only supports specifying one column, the data type of this column is not limited( + except for TYPE `BINARY/NULL`) +* The partition mode is HASH, which is evenly distributed across the partitions according to the partition key + `HASH value` +* table_options only surpport `ENGINE` and `CHARSET`,Others are automatically ignored +* The default engine for partition table is `InnoDB` +* The default character set for partition table `UTF-8` +* Does not support PRIMARY/UNIQUE constraints for non-partitioned keys, returning errors directly +* *Cross-partition non-atomic operations* + +`Example:` +``` +mysql> CREATE DATABASE db_test1; +Query OK, 1 row affected (0.00 sec) + +mysql> USE db_test1; + +Database changed +mysql> CREATE TABLE t1(id int, age int) PARTITION BY HASH(id); +Query OK, 0 rows affected (1.80 sec) +``` + +#### DROP TABLE + +`Syntax` +``` +DROP TABLE [IF EXISTS] table_name +``` + +`Instructions` + +* Delete partition information and backend`s partition table +* *Cross-partition non-atomic operations* + +`Example: ` +``` +mysql> DROP TABLE t1; +Query OK, 0 rows affected (0.05 sec) +``` + +#### Change Table Engine + +`Syntax` +``` +ALTER TABLE ... ENGINE={InnoDB|TokuDB...} +``` + +`Instructions` +* RadonDB sends the corresponding backend execution engine changes based on the routing information +* *Cross-partition non-atomic operations* + +`Example: ` + +``` +mysql> CREATE TABLE t1(id int, age int) PARTITION BY HASH(id); +Query OK, 0 rows affected (1.76 sec) + +mysql> SHOW CREATE TABLE t1\G; +*************************** 1. row *************************** + Table: t1 +Create Table: CREATE TABLE `t1` ( + `id` int(11) DEFAULT NULL, + `age` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8 +1 row in set (0.00 sec) + +mysql> ALTER TABLE t1 ENGINE=TokuDB; +Query OK, 0 rows affected (0.15 sec) + +mysql> SHOW CREATE TABLE t1\G; +*************************** 1. row *************************** + Table: t1 +Create Table: CREATE TABLE `t1` ( + `id` int(11) DEFAULT NULL, + `age` int(11) DEFAULT NULL +) ENGINE=TokuDB DEFAULT CHARSET=utf8 +1 row in set (0.00 sec) +``` + +#### Change the table character set + +In RadonDB, the default character set is `UTF-8`. + +`Syntax` +``` +ALTER TABLE table_name CONVERT TO CHARACTER SET {charset} +``` + +`Instructions` +* RadonDB sends the corresponding backend execution engine changes based on the routing information +* *Cross-partition non-atomic operations* + +`Example: ` + +``` +mysql> create table t1(id int, b int) partition by hash(id); +Query OK, 0 rows affected (0.15 sec) + +mysql> show create table t1\G; +*************************** 1. row *************************** + Table: t1 +Create Table: CREATE TABLE `t1` ( + `id` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8 +1 row in set (0.00 sec) + +mysql> alter table t1 convert to character set utf8mb4; +Query OK, 0 rows affected (0.07 sec) + +mysql> show create table t1\G; +*************************** 1. row *************************** + Table: t1 +Create Table: CREATE TABLE `t1` ( + `id` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 +1 row in set (0.00 sec) +``` + +#### TRUNCATE TABLE +`Syntax` +``` +TRUNCATE TABLE table_name +``` + +`Instructions` + +* *Cross-partition non-atomic operations* + +`Example: ` +``` +mysql> insert into t1(id) values(1); +Query OK, 1 row affected (0.01 sec) + +mysql> select * from t1; ++------+------+ +| id | age | ++------+------+ +| 1 | NULL | ++------+------+ +1 row in set (0.01 sec) + +mysql> truncate table t1; +Query OK, 0 rows affected (1.21 sec) + +mysql> select * from t1; +Empty set (0.01 sec) +``` +--------------------------------------------------------------------------------------------------- + +### COLUMN OPERATION + +#### Add Column +`Syntax` +``` +ALTER TABLE table_name ADD COLUMN (col_name column_definition,...) +``` + +`Instructions` +* Add new columns to the table +* *Cross-partition non-atomic operations* + +`Example: ` + +``` +mysql> ALTER TABLE t1 ADD COLUMN (b int, c varchar(100)); +Query OK, 0 rows affected (2.94 sec) + +mysql> SHOW CREATE TABLE t1\G; +*************************** 1. row *************************** + Table: t1 +Create Table: CREATE TABLE `t1` ( + `id` int(11) DEFAULT NULL, + `age` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + `c` varchar(100) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 +1 row in set (0.01 sec) +``` + +#### Drop Column + +`Syntax` +``` +ALTER TABLE table_name DROP COLUMN col_name +``` + +`Instructions` +* drop column from table +* *Cannot delete the column where the partition key is located* +* *Cross-partition non-atomic operations* + +`Example: ` + +``` +mysql> ALTER TABLE t1 DROP COLUMN c; +Query OK, 0 rows affected (2.92 sec) + +mysql> SHOW CREATE TABLE t1\G; +*************************** 1. row *************************** + Table: t1 +Create Table: CREATE TABLE `t1` ( + `id` int(11) DEFAULT NULL, + `age` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 +1 row in set (0.00 sec) + +mysql> ALTER TABLE t1 DROP COLUMN id; +ERROR 1105 (HY000): unsupported: cannot.drop.the.column.on.shard.key +``` + +#### Modify Column + +`Syntax` +``` +ALTER TABLE table_name MODIFY COLUMN col_name column_definition +``` + +`Instructions` +* Modify the column definition from table +* *Cannot modify the column where the partition key is located* +* *Cross-partition non-atomic operations* + +`Example: ` + +``` +mysql> ALTER TABLE t1 MODIFY COLUMN b bigint; +Query OK, 0 rows affected (4.09 sec) + +mysql> SHOW CREATE TABLE t1\G; +*************************** 1. row *************************** + Table: t1 +Create Table: CREATE TABLE `t1` ( + `id` int(11) DEFAULT NULL, + `age` int(11) DEFAULT NULL, + `b` bigint(20) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 +1 row in set (0.00 sec) +mysql> ALTER TABLE t1 MODIFY COLUMN id bigint; +ERROR 1105 (HY000): unsupported: cannot.modify.the.column.on.shard.key + +``` +--------------------------------------------------------------------------------------------------- + +### INDEX + +RadonDB only supports the `CREATE/DROP INDEX` syntax in order to simplify the index operation. + +#### ADD INDEX + +`Syntax` +``` +CREATE INDEX index_name ON table_name (index_col_name,...) +``` + +`Instructions` +* RadonDB sends the index to the corresponding backend based on the routing information. +* *Cross-partition non-atomic operations* + +`Example: ` +``` +mysql> CREATE INDEX idx_id_age ON t1(id, age); +Query OK, 0 rows affected (0.17 sec) +``` + +#### DROP INDEX + +`Syntax` +``` + DROP INDEX index_name ON table_name +``` + +`Instructions` +* RadonDB sends an drop index operation to the appropriate backend based on routing information +* *Cross-partition non-atomic operations* + +`Example: ` +``` +mysql> DROP INDEX idx_id_age ON t1; +Query OK, 0 rows affected (0.09 sec) +``` + +## DML +### SELECT + +`Syntax` +``` +SELECT + select_expr [, select_expr ...] + [FROM table_references + [WHERE where_condition] + [GROUP BY {col_name} + [ORDER BY {col_name | expr | position} + [ASC | DESC], ...] + [LIMIT {[offset,] row_count | row_count OFFSET offset}]] +``` + +`Instructions` + + * Support cross-partition count, sum, avg, max, min and other aggregate functions, *avg field must be in select_expr*, Aggregate functions only surpport for numeric values + * Support cross-partition order by, group by, limit and other operations, *field must be in select_expr* + * Supports complex queries such as joins, automatic routing to AP-Nodes to execute and return + +`Example: ` +``` +mysql> CREATE TABLE t2(id int, age int) partition by HASH(id); +Query OK, 0 rows affected (1.78 sec) + +mysql> INSERT INTO t2(id, age) values(1, 25); +Query OK, 1 row affected (0.01 sec) + +mysql> INSERT INTO t2(id, age) values(3, 22); +Query OK, 1 row affected (0.01 sec) + +mysql> INSERT INTO t2(id, age) values(13, 22); +Query OK, 1 row affected (0.02 sec) + +mysql> INSERT INTO t2(id, age) values(23, 22); +Query OK, 1 row affected (0.00 sec) + +mysql> select id, sum(id) from t2 group by id order by id desc limit 10; ++------+---------+ +| id | sum(id) | ++------+---------+ +| 1 | 1 | +| 3 | 3 | +| 13 | 13 | +| 23 | 23 | ++------+---------+ +4 rows in set (0.01 sec) +``` + +### INSERT + +`Syntax` +``` +INSERT INTO tbl_name + (col_name,...) + {VALUES | VALUE} +``` + +`Instructions` + * Support distributed transactions to ensure cross-partition write atomicity + * Support insert multiple values, these values can be in different partitions + * Must specify the write column + * *Does not support clauses* + +`Example: ` +``` +mysql> INSERT INTO t2(id, age) VALUES(1, 24), (2, 28), (3, 29); +Query OK, 3 rows affected (0.01 sec) +``` + +### DELETE + +`Syntax` +``` +DELETE FROM tbl_name + [WHERE where_condition] +``` + +``Instructions`` + * Support distributed transactions to ensure that atomicity is removed across partitions + * *Does not support delete without WHERE condition* + * *Does not support clauses* + +`Example: ` +``` +mysql> DELETE FROM t1 WHERE id=1; +Query OK, 2 rows affected (0.01 sec) +``` + +### UPDATE + +`Syntax` +``` +UPDATE table_reference + SET col_name1={expr1|DEFAULT} [, col_name2={expr2|DEFAULT}] ... + [WHERE where_condition] +``` + +`Instructions` + * Supports distributed transactions to ensure atomicity across partitions + * *Does not support WHERE-less condition updates* + * *Does not support updating partition key* + * *Does not support clauses* + +`Example: ` +``` +mysql> UPDATE t1 set age=age+1 WHERE id=1; +Query OK, 1 row affected (0.00 sec) +``` +### REPLACE + +`Syntax` +``` +REPLACE INTO tbl_name + [(col_name,...)] + {VALUES | VALUE} ({expr | DEFAULT},...),(...),... +``` + +`Instructions` + * Support distributed transactions to ensure cross-partition write atomicity + * Support replace multiple values, these values can be in different partitions + * Must specify write column + +`Example: ` +``` +mysql> REPLACE INTO t2 (id, age) VALUES(3,34),(5, 55); +Query OK, 2 rows affected (0.01 sec) +``` + +## SHOW + +### SHOW ENGINES + +`Syntax` +``` +SHOW ENGINES +``` + +`Instructions` +* Backend partitioned supported engine list by MySQL + +`Example: ` +``` + +mysql> SHOW ENGINES; ++--------------------+---------+----------------------------------------------------------------------------+--------------+------+------------+ +| Engine | Support | Comment | Transactions | XA | Savepoints | ++--------------------+---------+----------------------------------------------------------------------------+--------------+------+------------+ +| MyISAM | YES | MyISAM storage engine | NO | NO | NO | +| MRG_MYISAM | YES | Collection of identical MyISAM tables | NO | NO | NO | +| InnoDB | DEFAULT | Percona-XtraDB, Supports transactions, row-level locking, and foreign keys | YES | YES | YES | +| BLACKHOLE | YES | /dev/null storage engine (anything you write to it disappears) | NO | NO | NO | +| CSV | YES | CSV storage engine | NO | NO | NO | +| PERFORMANCE_SCHEMA | YES | Performance Schema | NO | NO | NO | +| ARCHIVE | YES | Archive storage engine | NO | NO | NO | +| TokuDB | YES | Percona TokuDB Storage Engine with Fractal Tree(tm) Technology | YES | YES | YES | +| FEDERATED | NO | Federated MySQL storage engine | NULL | NULL | NULL | +| MEMORY | YES | Hash based, stored in memory, useful for temporary tables | NO | NO | NO | ++--------------------+---------+----------------------------------------------------------------------------+--------------+------+------------+ +10 rows in set (0.00 sec) +``` + +### SHOW DATABASES + +`Syntax` +``` +SHOW DATABASES +``` + +`Instructions` +* Including system DB, such as mysql, information_schema + +`Example: ` +``` +mysql> SHOW DATABASES; ++--------------------+ +| Database | ++--------------------+ +| information_schema | +| db_gry_test | +| db_test1 | +| mysql | +| performance_schema | +| sys | ++--------------------+ +6 rows in set (0.01 sec) +``` + +### SHOW TABLES + +`Syntax` +``` +SHOW TABLES +[FROM db_name] +``` + +`Instructions` +* If db_name is not specified, the table under the current DB is returned + +`Example: ` +``` +mysql> SHOW TABLES; ++--------------------+ +| Tables_in_db_test1 | ++--------------------+ +| t1 | +| t2 | ++--------------------+ +2 rows in set (0.01 sec) +``` + +### SHOW CREATE TABLE + +`Syntax` +``` +SHOW CREATE TABLE table_name +``` + +`Instructions` +* N/A + +`Example: ` +``` +mysql> SHOW CREATE TABLE t1\G; +*************************** 1. row *************************** + Table: t1 +Create Table: CREATE TABLE `t1` ( + `id` int(11) DEFAULT NULL, + `age` int(11) DEFAULT NULL, + `b` bigint(20) DEFAULT NULL, + KEY `idx_id_age` (`id`,`age`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 +1 row in set (0.01 sec) +``` + +### SHOW PROCESSLIST + +`Syntax` +``` +SHOW PROCESSLIST +``` + +`Instructions` +* Shows the connection from client to RadonDB, not the backend partition MySQL + +`Example: ` +``` +mysql> SHOW PROCESSLIST; ++------+------+-----------------+----------+---------+------+-------+------+-----------+---------------+ +| Id | User | Host | db | Command | Time | State | Info | Rows_sent | Rows_examined | ++------+------+-----------------+----------+---------+------+-------+------+-----------+---------------+ +| 1 | root | 127.0.0.1:56984 | db_test1 | Sleep | 794 | | | 0 | 0 | ++------+------+-----------------+----------+---------+------+-------+------+-----------+---------------+ +1 row in set (0.00 sec) +``` + +### SHOW VARIABLES + +`Syntax` +``` +SHOW VARIABLES + [LIKE 'pattern' | WHERE expr] +``` + +`Instructions` +* For compatibility JDBC/mydumper +* The SHOW VARIABLES command is sent to the backend partition MySQL (random partition) to get and return + +## USE + +### USE DATABASE + +`Syntax` +``` +USE db_name +``` + +`Instructions` +* Switch the database of the current session + +`Example: ` +``` +mysql> use db_test1; +Reading table information for completion of table and column names +You can turn off this feature to get a quicker startup with -A + +Database changed +``` + +## KILL + +### KILL processlist_id + +`Syntax` +``` +KILL processlist_id +``` + +`Instructions` +* Kill a link (including terminating the executing statement) + +`Example: ` + +``` +mysql> show processlist; ++------+------+-----------------+----------+---------+------+-------+------+-----------+---------------+ +| Id | User | Host | db | Command | Time | State | Info | Rows_sent | Rows_examined | ++------+------+-----------------+----------+---------+------+-------+------+-----------+---------------+ +| 2 | root | 127.0.0.1:38382 | db_test1 | Sleep | 197 | | | 0 | 0 | ++------+------+-----------------+----------+---------+------+-------+------+-----------+---------------+ +1 row in set (0.00 sec) + +mysql> kill 2; +ERROR 2013 (HY000): Lost connection to MySQL server during query + +mysql> show processlist; +ERROR 2006 (HY000): MySQL server has gone away +No connection. Trying to reconnect... +Connection id: 3 +Current database: db_test1 + ++------+------+-----------------+----------+---------+------+-------+------+-----------+---------------+ +| Id | User | Host | db | Command | Time | State | Info | Rows_sent | Rows_examined | ++------+------+-----------------+----------+---------+------+-------+------+-----------+---------------+ +| 3 | root | 127.0.0.1:38516 | db_test1 | Sleep | 0 | | | 0 | 0 | ++------+------+-----------------+----------+---------+------+-------+------+-----------+---------------+ +1 row in set (0.00 sec) + +``` + +## SET + +`Instructions` +* For compatibility JDBC/mydumper +* SET is an empty operation, *all operations will not take effect*, do not use it directly。 + diff --git a/makefile b/makefile new file mode 100644 index 00000000..0262af3a --- /dev/null +++ b/makefile @@ -0,0 +1,91 @@ +PREFIX :=/usr/local +export GOPATH := $(shell pwd) +export PATH := $(GOPATH)/bin:$(PATH) + +build: LDFLAGS += $(shell GOPATH=${GOPATH} src/build/ldflags.sh) +build: + @echo "--> Building..." + @mkdir -p bin/ + go build -v -o bin/radon --ldflags '$(LDFLAGS)' src/radon/radon.go + go build -v -o bin/radoncli --ldflags '$(LDFLAGS)' src/cli/cli.go + @chmod 755 bin/* + +clean: + @echo "--> Cleaning..." + @go clean + @rm -f bin/* + +fmt: + go fmt ./... + go vet ./... + +test: + @echo "--> Testing..." + @$(MAKE) testxbase + @$(MAKE) testxcontext + @$(MAKE) testconfig + @$(MAKE) testrouter + @$(MAKE) testoptimizer + @$(MAKE) testplanner + @$(MAKE) testexecutor + @$(MAKE) testbackend + @$(MAKE) testproxy + @$(MAKE) testaudit + @$(MAKE) testsyncer + @$(MAKE) testbinlog + @$(MAKE) testctl + +testxbase: + go test -v -race xbase + go test -v -race xbase/stats + go test -v -race xbase/sync2 +testxcontext: + go test -v xcontext +testconfig: + go test -v config +testrouter: + go test -v router +testoptimizer: + go test -v optimizer +testplanner: + go test -v planner +testexecutor: + go test -v executor +testbackend: + go test -v -race backend +testproxy: + go test -v -race proxy +testaudit: + go test -v -race audit +testsyncer: + go test -v -race syncer +testbinlog: + go test -v -race binlog +testctl: + go test -v -race ctl/v1 +testcli: + go test -v -race cli/cmd +testpoc: + go test -v poc + +# code coverage +allpkgs = xbase/...\ + ctl/v1/\ + xcontext\ + config\ + router\ + optimizer\ + planner\ + executor\ + backend\ + proxy\ + audit\ + syncer\ + binlog +coverage: + go build -v -o bin/gotestcover \ + src/vendor/github.com/pierrre/gotestcover/*.go; + gotestcover -coverprofile=coverage.out -v $(allpkgs) + go tool cover -html=coverage.out + +.PHONY: build clean install fmt test coverage check diff --git a/src/audit/audit.go b/src/audit/audit.go new file mode 100644 index 00000000..55377c20 --- /dev/null +++ b/src/audit/audit.go @@ -0,0 +1,201 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package audit + +import ( + "config" + "os" + "path/filepath" + "sync" + "time" + "xbase" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +const ( + prefix = "audit-" + extension = ".log" +) + +const ( + // NULL enum. + NULL = "N" + + // READ enum. + READ = "R" + + // WRITE enum. + WRITE = "W" + + // ALL enum. + ALL = "A" +) + +// easyjson:json +// NOTE: +// if the event changes, we must re-generate the audit_easyjson.go file by 'easyjson src/audit/audit.go' command. +type event struct { + Start time.Time `json:"start"` // Time the query was start. + End time.Time `json:"end"` // Time the query was end. + Cost time.Duration `json:"cost"` // Cost. + User string `json:"user"` // User. + UserHost string `json:"user_host"` // User and host combination. + ThreadID uint32 `json:"thread_id"` // Thread id. + CommandType string `json:"command_type"` // Type of command. + Argument string `json:"argument"` // Full query. + QueryRows uint64 `json:"query_rows"` // Query rows. +} + +// Audit tuple. +type Audit struct { + log *xlog.Log + conf *config.AuditConfig + ticker *time.Ticker + queue chan *event + done chan bool + rfile xbase.RotateFile + wg sync.WaitGroup +} + +// NewAudit creates the new audit. +func NewAudit(log *xlog.Log, conf *config.AuditConfig) *Audit { + return &Audit{ + log: log, + conf: conf, + done: make(chan bool), + queue: make(chan *event, 1024), + ticker: time.NewTicker(time.Duration(time.Second * 300)), // 5 minutes + rfile: xbase.NewRotateFile(conf.LogDir, prefix, extension, conf.MaxSize), + } +} + +// Init used to create the log dir, if EXISTS we do onthing. +func (a *Audit) Init() error { + log := a.log + + log.Info("audit.init.conf:%+v", a.conf) + if err := os.MkdirAll(a.conf.LogDir, 0744); err != nil { + return err + } + + a.wg.Add(1) + go func(audit *Audit) { + defer a.wg.Done() + a.eventConsumer() + }(a) + + a.wg.Add(1) + go func(audit *Audit) { + defer a.wg.Done() + a.purge() + }(a) + log.Info("audit.init.done") + return nil +} + +// LogReadEvent used to handle the read-only event. +func (a *Audit) LogReadEvent(t string, user string, host string, threadID uint32, query string, affected uint64, startTime time.Time) { + if a.conf.Mode == ALL || a.conf.Mode == READ { + e := &event{ + Start: startTime, + End: time.Now(), + Cost: time.Since(startTime), + User: user, + UserHost: host, + ThreadID: threadID, + CommandType: t, + Argument: query, + QueryRows: affected, + } + a.queue <- e + } +} + +// LogWriteEvent used to handle the write event. +func (a *Audit) LogWriteEvent(t string, user string, host string, threadID uint32, query string, affected uint64, startTime time.Time) { + if a.conf.Mode == ALL || a.conf.Mode == WRITE { + e := &event{ + Start: startTime, + End: time.Now(), + Cost: time.Since(startTime), + User: user, + UserHost: host, + ThreadID: threadID, + CommandType: t, + Argument: query, + QueryRows: affected, + } + a.queue <- e + } +} + +// Close used to close the audit log. +func (a *Audit) Close() { + // wait the queue event flush to file. + close(a.done) + close(a.queue) + a.wg.Wait() + a.rfile.Sync() + a.rfile.Close() + a.log.Info("audit.closed") +} + +func (a *Audit) eventConsumer() { + for e := range a.queue { + a.writeEvent(e) + } +} + +func (a *Audit) writeEvent(e *event) { + log := a.log + b, err := e.MarshalJSON() + if err != nil { + b = []byte(err.Error()) + } + b = append(b, '\n') + + // write + _, err = a.rfile.Write(b) + if err != nil { + log.Error("audit.write.file.error:%v", err) + } +} + +func (a *Audit) purge() { + defer a.ticker.Stop() + for { + select { + case <-a.ticker.C: + a.doPurge() + case <-a.done: + return + } + } +} + +func (a *Audit) doPurge() { + log := a.log + if a.conf.ExpireHours == 0 { + return + } + + oldLogs, err := a.rfile.GetOldLogInfos() + if err != nil { + log.Error("audit.get.old.loginfos.error:%v", err) + return + } + + for _, old := range oldLogs { + diff := time.Now().UTC().Sub(time.Unix(0, old.Ts)) + if int(diff.Hours()) > a.conf.ExpireHours { + os.Remove(filepath.Join(a.conf.LogDir, old.Name)) + } + } +} diff --git a/src/audit/audit_easyjson.go b/src/audit/audit_easyjson.go new file mode 100644 index 00000000..465df5f5 --- /dev/null +++ b/src/audit/audit_easyjson.go @@ -0,0 +1,86 @@ +// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT. + +package audit + +import ( + json "encoding/json" + easyjson "github.com/mailru/easyjson" + jlexer "github.com/mailru/easyjson/jlexer" + jwriter "github.com/mailru/easyjson/jwriter" +) + +// suppress unused package warning +var ( + _ *json.RawMessage + _ *jlexer.Lexer + _ *jwriter.Writer + _ easyjson.Marshaler +) + +func easyjsonF2c44427EncodeAudit1(out *jwriter.Writer, in event) { + out.RawByte('{') + first := true + _ = first + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"start\":") + out.Raw((in.Start).MarshalJSON()) + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"end\":") + out.Raw((in.End).MarshalJSON()) + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"cost\":") + out.Int64(int64(in.Cost)) + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"user\":") + out.String(string(in.User)) + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"user_host\":") + out.String(string(in.UserHost)) + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"thread_id\":") + out.Uint32(uint32(in.ThreadID)) + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"command_type\":") + out.String(string(in.CommandType)) + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"argument\":") + out.String(string(in.Argument)) + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"query_rows\":") + out.Uint64(uint64(in.QueryRows)) + out.RawByte('}') +} + +// MarshalJSON supports json.Marshaler interface +func (v event) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + easyjsonF2c44427EncodeAudit1(&w, v) + return w.Buffer.BuildBytes(), w.Error +} diff --git a/src/audit/audit_test.go b/src/audit/audit_test.go new file mode 100644 index 00000000..9d47cadd --- /dev/null +++ b/src/audit/audit_test.go @@ -0,0 +1,171 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package audit + +import ( + "config" + "fmt" + "os" + "path/filepath" + "sync" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestAudit(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + conf := &config.AuditConfig{ + Mode: ALL, + MaxSize: 102400, + ExpireHours: 1, + LogDir: "/tmp/radon/test/audit", + } + + os.RemoveAll(conf.LogDir) + audit := NewAudit(log, conf) + err := audit.Init() + assert.Nil(t, err) + defer audit.Close() + + n := 10000 + for i := 0; i < n; i++ { + typ := "SELECT" + user := "BohuTANG>>>>" + host := "127.0.0.1:8899" + threadID := uint32(i) + query := "select a,b,cd from table1 where a=b and c=d and e=d group by id order\n by desc" + if i%2 == 0 { + audit.LogWriteEvent(typ, user, host, threadID, query, 0, time.Now()) + } else { + audit.LogReadEvent(typ, user, host, threadID, query, 0, time.Now()) + } + } +} + +func TestAuditMultiThread(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + conf := &config.AuditConfig{ + Mode: ALL, + MaxSize: 1024 * 1024, + ExpireHours: 1, + LogDir: "/tmp/radon/test/audit", + } + + os.RemoveAll(conf.LogDir) + audit := NewAudit(log, conf) + err := audit.Init() + assert.Nil(t, err) + defer audit.Close() + + var wait sync.WaitGroup + for k := 0; k < 10; k++ { + wait.Add(1) + go func(a *Audit) { + n := 10000 + for i := 0; i < n; i++ { + typ := "SELECT" + user := "BohuTANG>>>>" + host := "127.0.0.1:8899" + threadID := uint32(i) + query := "select a,b,cd from table1 where a=b and c=d and e=d group by id order\n by desc" + if i%2 == 0 { + a.LogWriteEvent(typ, user, host, threadID, query, 0, time.Now()) + } else { + a.LogReadEvent(typ, user, host, threadID, query, 0, time.Now()) + } + } + wait.Done() + }(audit) + } + wait.Wait() +} + +func TestPurge(t *testing.T) { + fileFormat := "20060102150405.000" + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + conf := &config.AuditConfig{ + Mode: ALL, + MaxSize: 102400, + ExpireHours: 1, + LogDir: "/tmp/radon/test/audit", + } + + os.RemoveAll(conf.LogDir) + audit := NewAudit(log, conf) + err := audit.Init() + assert.Nil(t, err) + defer audit.Close() + + n := 10000 + for i := 0; i < n; i++ { + typ := "SELECT" + user := "BohuTANG>>>>" + host := "127.0.0.1:8899" + threadID := uint32(i) + query := "select a,b,cd from table1 where a=b and c=d and e=d group by id order\n by desc" + if i%2 == 0 { + audit.LogWriteEvent(typ, user, host, threadID, query, 0, time.Now()) + } else { + audit.LogReadEvent(typ, user, host, threadID, query, 0, time.Now()) + } + } + // first the close the audit to stop the event writing. + + logs, _ := audit.rfile.GetOldLogInfos() + // purge the old log. + l0 := logs[0] + ts := time.Unix(0, l0.Ts).UTC().Add(time.Duration(time.Hour * time.Duration(-2))) + timestamp := ts.Format(fileFormat) + newName := filepath.Join(conf.LogDir, fmt.Sprintf("%s%s%s", prefix, timestamp, extension)) + os.Rename(filepath.Join(conf.LogDir, l0.Name), newName) + audit.doPurge() + + logs1, _ := audit.rfile.GetOldLogInfos() + want := len(logs) + got := len(logs1) + assert.Equal(t, want-1, got) +} + +func TestAuditBench(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.DEBUG)) + conf := &config.AuditConfig{ + Mode: ALL, + MaxSize: 1024 * 1024 * 100, + ExpireHours: 1, + LogDir: "/tmp/test/audit", + } + + os.RemoveAll(conf.LogDir) + audit := NewAudit(log, conf) + err := audit.Init() + assert.Nil(t, err) + defer audit.Close() + + { + N := 100000 + now := time.Now() + for i := 0; i < N; i++ { + typ := "SELECT" + user := "BohuTANG>>>>" + host := "127.0.0.1:8899" + threadID := uint32(i) + query := "select a,b,cd from table1 where a=b and c=d and e=d group by id order\n by desc" + audit.LogWriteEvent(typ, user, host, threadID, query, 0, time.Now()) + } + took := time.Since(now) + fmt.Printf(" LOOP\t%v COST %v, avg:%v/s\n", N, took, (int64(N)/(took.Nanoseconds()/1e6))*1000) + } +} diff --git a/src/backend/backuptxn.go b/src/backend/backuptxn.go new file mode 100644 index 00000000..7ae3ba03 --- /dev/null +++ b/src/backend/backuptxn.go @@ -0,0 +1,184 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package backend + +import ( + "sync" + "time" + + "github.com/pkg/errors" + "xbase/sync2" + + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + backupTxnCounterCreate = "#backuptxn.create" + backupTxnCounterConnectionError = "#get.backup.connection.error" + backupTxnCounterTxnFinish = "#backuptxn.finish" + backupTxnCounterTxnAbort = "#backuptxn.abort" +) + +// BackupTxn tuple. +type BackupTxn struct { + log *xlog.Log + id uint64 + mu sync.Mutex + mgr *TxnManager + txnd *TxnDetail + start time.Time + state sync2.AtomicInt32 + backup *Pool + timeout int + maxResult int + errors int + connMu sync.RWMutex + connection Connection +} + +// NewBackupTxn creates the new BackupTxn. +func NewBackupTxn(log *xlog.Log, txid uint64, mgr *TxnManager, backup *Pool) (*BackupTxn, error) { + txn := &BackupTxn{ + log: log, + id: txid, + mgr: mgr, + backup: backup, + start: time.Now(), + } + txnd := NewTxnDetail(txn) + txn.txnd = txnd + tz.Add(txnd) + txnCounters.Add(backupTxnCounterCreate, 1) + return txn, nil +} + +// SetTimeout used to set the txn timeout. +func (txn *BackupTxn) SetTimeout(timeout int) { + txn.timeout = timeout +} + +// SetMaxResult used to set the txn max result. +func (txn *BackupTxn) SetMaxResult(max int) { + txn.maxResult = max +} + +// TxID returns txn id. +func (txn *BackupTxn) TxID() uint64 { + return txn.id +} + +// XID returns empty. +func (txn *BackupTxn) XID() string { + return "" +} + +// State returns txn.state. +func (txn *BackupTxn) State() int32 { + return txn.state.Get() +} + +// XaState returns txn xastate. +func (txn *BackupTxn) XaState() int32 { + return -1 +} + +func (txn *BackupTxn) incErrors() { + txn.errors++ +} + +func (txn *BackupTxn) fetchBackupConnection() (Connection, error) { + pool := txn.backup + if pool == nil { + return nil, errors.New("txn.backup.node.is.nil") + } + + conn, err := pool.Get() + if err != nil { + return nil, err + } + + txn.connMu.Lock() + txn.connection = conn + txn.connMu.Unlock() + return conn, nil +} + +// Execute used to execute the query to the backup node. +// If the backup node is not exists, fetchBackupConnection will return with an error. +func (txn *BackupTxn) Execute(database string, query string) (*sqltypes.Result, error) { + log := txn.log + conn, err := txn.fetchBackupConnection() + if err != nil { + log.Error("backtxn.execute.fetch.connection[db:%s, query:%s].error:%+v", database, query, err) + txnCounters.Add(backupTxnCounterConnectionError, 1) + txn.incErrors() + return nil, err + } + if err := conn.UseDB(database); err != nil { + log.Error("backuptxn.execute.usedb[db:%s, query:%s].on[%v].error:%+v", database, query, conn.Address(), err) + txn.incErrors() + return nil, err + } + + qr, err := conn.ExecuteWithLimits(query, txn.timeout, txn.maxResult) + if err != nil { + log.Error("backuptxn.execute.db:%s, query:%s].on[%v].error:%+v", database, query, conn.Address(), err) + txn.incErrors() + return nil, err + } + return qr, nil +} + +// Finish used to finish a transaction. +// If the lastErr is nil, we will recycle all the twopc connections to the pool for reuse, +// otherwise we wil close all of the them. +func (txn *BackupTxn) Finish() error { + txnCounters.Add(backupTxnCounterTxnFinish, 1) + + txn.mu.Lock() + defer txn.mu.Unlock() + + defer tz.Remove(txn.txnd) + + txn.state.Set(int32(txnStateFinshing)) + // backup node connection. + if txn.connection != nil { + if txn.errors > 0 { + txn.connection.Close() + } else { + txn.connection.Recycle() + } + } + txn.mgr.Remove() + return nil +} + +// Abort used to abort all txn connections. +func (txn *BackupTxn) Abort() error { + txnCounters.Add(backupTxnCounterTxnAbort, 1) + + txn.mu.Lock() + defer txn.mu.Unlock() + + // If the txn has finished, we won't do abort. + if txn.state.Get() == int32(txnStateFinshing) { + return nil + } + txn.state.Set(int32(txnStateAborting)) + + // backup node connection. + txn.connMu.RLock() + if txn.connection != nil { + txn.connection.Kill("txn.abort") + } + txn.connMu.RUnlock() + txn.mgr.Remove() + return nil +} diff --git a/src/backend/backuptxn_test.go b/src/backend/backuptxn_test.go new file mode 100644 index 00000000..c9adc1f4 --- /dev/null +++ b/src/backend/backuptxn_test.go @@ -0,0 +1,125 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package backend + +import ( + "errors" + "sync" + "testing" + "time" + "xcontext" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestBackupTxnExecute(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb, txnMgr, _, backup, addrs, cleanup := MockTxnMgr(log, 2) + defer cleanup() + + querys := []xcontext.QueryTuple{ + xcontext.QueryTuple{Query: "select * from node1", Backend: addrs[0]}, + xcontext.QueryTuple{Query: "select * from node2", Backend: addrs[1]}, + xcontext.QueryTuple{Query: "select * from node3", Backend: addrs[1]}, + } + + fakedb.AddQuery(querys[0].Query, result1) + fakedb.AddQueryDelay(querys[1].Query, result2, 100) + fakedb.AddQueryDelay(querys[2].Query, result2, 110) + fakedb.AddQuery("select * from backup", result1) + + // backup execute. + { + txn, err := txnMgr.CreateBackupTxn(backup) + assert.Nil(t, err) + + got, err := txn.Execute("", "select * from backup") + assert.Nil(t, err) + assert.Equal(t, result1, got) + txn.Finish() + } + + // backup execute error. + { + fakedb.ResetAll() + fakedb.AddQueryError("select * from backup", errors.New("mock.backup.select.error")) + + txn, err := txnMgr.CreateBackupTxn(backup) + assert.Nil(t, err) + _, err = txn.Execute("", "select * from backup") + assert.NotNil(t, err) + txn.Finish() + } +} + +func TestBackupTxnSetting(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb, txnMgr, _, backup, _, cleanup := MockTxnMgr(log, 2) + defer cleanup() + + query := "select * from node1" + fakedb.AddQueryDelay(query, result1, 100) + + txn, err := txnMgr.CreateBackupTxn(backup) + assert.Nil(t, err) + defer txn.Finish() + + // timeout + { + txn.SetTimeout(10) + _, err := txn.Execute("", query) + assert.NotNil(t, err) + } + + // max result size. + { + txn.SetTimeout(0) + txn.SetMaxResult(10) + _, err := txn.Execute("", query) + got := err.Error() + want := "Query execution was interrupted, max memory usage[10 bytes] exceeded" + assert.Equal(t, want, got) + } +} + +func TestBackupTxnAbort(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb, txnMgr, _, backup, _, cleanup := MockTxnMgr(log, 3) + defer cleanup() + + fakedb.AddQueryDelay("update backup", result2, 2000) + + { + var wg sync.WaitGroup + txn, err := txnMgr.CreateBackupTxn(backup) + assert.Nil(t, err) + defer txn.Finish() + + // execute with long time. + { + wg.Add(1) + go func() { + defer wg.Done() + txn.Execute("", "update backup") + }() + } + + // abort + { + time.Sleep(time.Second) + err := txn.Abort() + assert.Nil(t, err) + } + wg.Wait() + } +} diff --git a/src/backend/connection.go b/src/backend/connection.go new file mode 100644 index 00000000..9ea1d781 --- /dev/null +++ b/src/backend/connection.go @@ -0,0 +1,268 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package backend + +import ( + "context" + "errors" + "fmt" + "io" + "sync" + "time" + "xbase/stats" + + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" + "xbase/sync2" +) + +var _ Connection = &connection{} + +// Connection tuple. +type Connection interface { + ID() uint32 + Dial() error + Ping() error + Close() + Closed() bool + LastErr() error + UseDB(string) error + Kill(string) error + Recycle() + Address() string + SetTimestamp(int64) + Timestamp() int64 + Execute(string) (*sqltypes.Result, error) + ExecuteStreamFetch(string) (driver.Rows, error) + ExecuteWithLimits(query string, timeout int, maxmem int) (*sqltypes.Result, error) +} + +type connection struct { + log *xlog.Log + connectionID uint32 + user string + password string + address string + charset string + + pool *Pool + + // If lastErr is not nil, this connection should be closed. + lastErr error + + killed sync2.AtomicBool + driver driver.Conn + + // Recycle timestamp, in seconds. + timestamp int64 + + counters *stats.Counters +} + +// NewConnection creates a new connection. +func NewConnection(log *xlog.Log, pool *Pool) Connection { + conf := pool.conf + return &connection{ + log: log, + pool: pool, + user: conf.User, + password: conf.Password, + address: conf.Address, + charset: conf.Charset, + counters: pool.counters, + } +} + +// Dial used to create a new driver conn. +func (c *connection) Dial() error { + var err error + defer mysqlStats.Record("conn.dial", time.Now()) + + if c.driver, err = driver.NewConn(c.user, c.password, c.address, "", c.charset); err != nil { + c.log.Error("conn[%s].dial.error:%+v", c.address, err) + c.counters.Add(poolCounterBackendDialError, 1) + c.Close() + return errors.New("Server maybe lost, please try again") + } + c.connectionID = c.driver.ConnectionID() + return nil +} + +// Ping used to do ping. +func (c *connection) Ping() error { + return c.driver.Ping() +} + +// ID returns the connection ID. +func (c *connection) ID() uint32 { + return c.connectionID +} + +// UseDB used to send a 'use database' query to MySQL. +// This is SQLCOM_CHANGE_DB command not COM_INIT_DB. +func (c *connection) UseDB(db string) error { + if db != "" { + query := fmt.Sprintf("use %s", db) + if _, err := c.Execute(query); err != nil { + return err + } + } + return nil +} + +// SetTimestamp used to set the timestamp. +func (c *connection) SetTimestamp(ts int64) { + c.timestamp = ts +} + +// Timestamp returns Timestamp of connection. +func (c *connection) Timestamp() int64 { + return c.timestamp +} + +// setDeadline used to set deadline for a query. +func (c *connection) setDeadline(timeout int) (chan bool, *sync.WaitGroup) { + var wg sync.WaitGroup + done := make(chan bool, 1) + + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Millisecond) + wg.Add(1) + go func() { + defer func() { + wg.Done() + cancel() + }() + select { + case <-ctx.Done(): + c.counters.Add(poolCounterBackendExecuteTimeout, 1) + c.killed.Set(true) + reason := ctx.Err().Error() + c.Kill(reason) + case <-done: + return + } + }() + return done, &wg +} + +// Execute used to execute a query through this connection without limits. +func (c *connection) Execute(query string) (*sqltypes.Result, error) { + return c.ExecuteWithLimits(query, 0, 0) +} + +// Execute used to execute a query through this connection. +// if timeout or memlimits is 0, means there is not limits. +func (c *connection) ExecuteWithLimits(query string, timeout int, memlimits int) (*sqltypes.Result, error) { + var err error + var qr *sqltypes.Result + log := c.log + defer mysqlStats.Record("Connection.Execute", time.Now()) + + // Query details. + qd := NewQueryDetail(c, query) + qz.Add(qd) + defer qz.Remove(qd) + + // timeout. + if timeout > 0 { + done, wg := c.setDeadline(timeout) + if done != nil { + defer func() { + close(done) + wg.Wait() + }() + } + } + + // memory limits. + checkFunc := func(rows driver.Rows) error { + if memlimits > 0 { + if rows.Bytes() > memlimits { + c.counters.Add(poolCounterBackendExecuteMaxresult, 1) + return fmt.Errorf("Query execution was interrupted, max memory usage[%d bytes] exceeded", memlimits) + } + } + return nil + } + + // execute. + if qr, err = c.driver.FetchAllWithFunc(query, -1, checkFunc); err != nil { + c.counters.Add(poolCounterBackendExecuteAllError, 1) + log.Error("conn[%s].execute[%s].error:%+v", c.address, query, err) + c.lastErr = err + + // Connection is killed. + if c.killed.Get() { + return nil, fmt.Errorf("Query execution was interrupted, timeout[%dms] exceeded", timeout) + } + + // Connection is broken(closed by server). + if err == io.EOF { + return nil, errors.New("Server maybe lost, please try again") + } + return nil, err + } + return qr, nil +} + +func (c *connection) ExecuteStreamFetch(query string) (driver.Rows, error) { + return c.driver.Query(query) +} + +// Kill used to kill current connection. +func (c *connection) Kill(reason string) error { + c.counters.Add(poolCounterBackendKilled, 1) + kill, err := c.pool.Get() + if err != nil { + return err + } + defer kill.Recycle() + + c.log.Warning("conn[%s, ID:%v].be.killed.by[%v].reason[%s]", c.address, c.ID(), kill.ID(), reason) + query := fmt.Sprintf("KILL %d", c.connectionID) + if _, err = kill.Execute(query); err != nil { + c.log.Warning("conn[%s, ID:%v].kill.error:%+v", c.address, c.ID(), err) + return err + } + return nil +} + +// Recycle used to put current to pool. +func (c *connection) Recycle() { + defer mysqlStats.Record("conn.recycle", time.Now()) + if !c.driver.Closed() { + c.pool.Put(c) + } +} + +// Address returns the backend address of the connection. +func (c *connection) Address() string { + return c.address +} + +// Close used to close connection. +func (c *connection) Close() { + defer mysqlStats.Record("conn.close", time.Now()) + c.lastErr = errors.New("I.am.closed") + if c.driver != nil { + c.driver.Close() + } +} + +func (c *connection) Closed() bool { + if c.driver != nil { + return c.driver.Closed() + } + return true +} + +func (c *connection) LastErr() error { + return c.lastErr +} diff --git a/src/backend/connection_test.go b/src/backend/connection_test.go new file mode 100644 index 00000000..dc2b2a52 --- /dev/null +++ b/src/backend/connection_test.go @@ -0,0 +1,315 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package backend + +import ( + "errors" + "fakedb" + "testing" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" + + "github.com/xelabs/go-mysqlstack/sqldb" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestConnection(t *testing.T) { + //defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb := fakedb.New(log, 1) + defer fakedb.Close() + addr := fakedb.Addrs()[0] + + // Connection + conn, cleanup := MockClient(log, addr) + defer cleanup() + + // connection ID + { + want := uint32(1) + got := conn.ID() + assert.Equal(t, want, got) + } + + // usedb + { + fakedb.AddQuery("USE MOCKDB", result2) + err := conn.UseDB("MOCKDB") + assert.Nil(t, err) + } + + sqlErr := sqldb.NewSQLError(sqldb.ER_UNKNOWN_ERROR, "query.error") + // usedb error + { + fakedb.AddQueryError("USE USEDBERROR", sqlErr) + err := conn.UseDB("USEDBERROR") + want := "query.error (errno 1105) (sqlstate HY000)" + got := err.Error() + assert.Equal(t, want, got) + } + // again + { + fakedb.AddQueryError("USE USEDBERROR", sqlErr) + err := conn.UseDB("USEDBERROR") + want := "query.error (errno 1105) (sqlstate HY000)" + got := err.Error() + assert.Equal(t, want, got) + } + + // execute + { + fakedb.AddQuery("SELECT1", result1) + r, err := conn.Execute("SELECT1") + assert.Nil(t, err) + assert.Equal(t, result1, r) + } +} + +func TestConnectionRecyle(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + // MySQL Server starts... + fakedb := fakedb.New(log, 1) + defer fakedb.Close() + addr := fakedb.Addrs()[0] + + // Connection + conn, cleanup := MockClient(log, addr) + defer cleanup() + + // usedb + { + fakedb.AddQuery("USE MOCKDB", result2) + err := conn.UseDB("MOCKDB") + assert.Nil(t, err) + conn.Recycle() + } +} + +func TestConnectionKill(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + // MySQL Server starts... + fakedb := fakedb.New(log, 1) + defer fakedb.Close() + addr := fakedb.Addrs()[0] + + // Connection + conn, cleanup := MockClient(log, addr) + defer cleanup() + + // kill + { + err := conn.Kill("kill.you") + assert.Nil(t, err) + } + + // check + { + fakedb.AddQuery("USE MOCKDB", result2) + err := conn.UseDB("MOCKDB") + assert.NotNil(t, err) + } +} + +func TestConnectionKillError(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + // MySQL Server starts... + fakedb := fakedb.New(log, 1) + defer fakedb.Close() + addr := fakedb.Addrs()[0] + + // Connection + conn, cleanup := MockClient(log, addr) + defer cleanup() + + // kill + { + query := "kill 1" + fakedb.AddQueryError(query, errors.New("mock.kill.error")) + err := conn.Kill("kill.you") + want := "mock.kill.error (errno 1105) (sqlstate HY000)" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestConnectionExecuteTimeout(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + // MySQL Server starts... + fakedb := fakedb.New(log, 1) + defer fakedb.Close() + + config := fakedb.BackendConfs()[0] + conn, cleanup := MockClientWithConfig(log, config) + defer cleanup() + // execute timeout + { + fakedb.AddQueryDelay("SELECT2", result2, 1000) + _, err := conn.ExecuteWithLimits("SELECT2", 100, 100) + assert.NotNil(t, err) + } +} + +func TestConnectionMemoryCheck(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + // MySQL Server starts... + fakedb := fakedb.New(log, 1) + defer fakedb.Close() + + config := fakedb.BackendConfs()[0] + conn, cleanup := MockClientWithConfig(log, config) + defer cleanup() + { + fakedb.AddQuery("SELECT2", result2) + _, err := conn.ExecuteWithLimits("SELECT2", 0, 5) + want := "Query execution was interrupted, max memory usage[5 bytes] exceeded" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestConnectionClosed(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + // MySQL Server starts... + fakedb := fakedb.New(log, 1) + defer fakedb.Close() + addr := fakedb.Addrs()[0] + + // Connection + conn, cleanup := MockClient(log, addr) + defer cleanup() + + // Close the connection. + { + conn.Close() + assert.True(t, conn.Closed()) + } + + // Execute querys on a closed connection. + { + _, err := conn.Execute("SELECT2") + // error: write tcp 127.0.0.1:33686->127.0.0.1:5917: use of closed network connection + assert.NotNil(t, err) + } +} + +/* +func TestConnectionRealServer(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + conf := &config.BackendConfig{ + Name: "node1", + Address: "127.0.0.1:3304", + User: "root", + Password: "", + DBName: "", + Charset: "utf8", + MaxConnections: 1024, + MaxMemoryUsage: 1024 * 1024 * 1024, + QueryTimeout: 20000, + } + + pool := NewPool(log, mysqlStats, conf) + conn := NewConnection(log, pool) + if err := conn.Dial(); err == nil { + defer conn.Close() + + // usedb + { + err := conn.UseDB("mysql") + assert.Nil(t, err) + } + + // create database + { + _, err := conn.Execute("create database if not exists test") + assert.Nil(t, err) + } + + // create table + { + _, err := conn.Execute("create table if not exists test.t1(a int)") + assert.Nil(t, err) + } + + // insert + { + r, err := conn.Execute("insert into test.t1 values(1),(2),(3)") + assert.Nil(t, err) + log.Debug("query:%+v", r) + } + + // selset + { + N := 10000 + now := time.Now() + for i := 0; i < N; i++ { + conn.Execute("select * from test.t1") + } + took := time.Since(now) + log.Debug(" LOOP\t%v COST %v, avg:%v/s", N, took, (int64(N)/(took.Nanoseconds()/1e6))*1000) + } + + // selset + { + N := 10000 + now := time.Now() + for i := 0; i < N; i++ { + conn.Execute("select * from test.t1") + } + took := time.Since(now) + log.Debug(" LOOP\t%v COST %v, avg:%v/s", N, took, (int64(N)/(took.Nanoseconds()/1e6))*1000) + } + + // usedb + { + err := conn.UseDB("test") + assert.Nil(t, err) + } + + // selset + { + N := 10000 + now := time.Now() + for i := 0; i < N; i++ { + conn.Execute("select * from t1") + } + took := time.Since(now) + log.Debug(" LOOP\t%v COST %v, avg:%v/s", N, took, (int64(N)/(took.Nanoseconds()/1e6))*1000) + } + log.Debug("--status:%s", mysqlStats.String()) + + // mysql.user + { + _, err := conn.Execute("select * from mysql.user") + assert.Nil(t, err) + } + + // drop database + { + _, err := conn.Execute("drop database test") + assert.Nil(t, err) + } + + // kill + { + err := conn.Kill("killme") + assert.Nil(t, err) + } + } +} +*/ diff --git a/src/backend/mock.go b/src/backend/mock.go new file mode 100644 index 00000000..04dc9282 --- /dev/null +++ b/src/backend/mock.go @@ -0,0 +1,145 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package backend + +import ( + "config" + "fakedb" + "fmt" + "time" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + result1 = &sqltypes.Result{ + RowsAffected: 2, + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("11")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("1nice name")), + }, + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("12")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("12nice name")), + }, + }, + } + result2 = &sqltypes.Result{ + RowsAffected: 2, + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("21")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("2nice name")), + }, + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("22")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("22nice name")), + }, + }, + } +) + +// MockBackendConfigDefault mocks new backend config. +func MockBackendConfigDefault(name, addr string) *config.BackendConfig { + return &config.BackendConfig{ + Name: name, + Address: addr, + User: "mock", + Password: "pwd", + DBName: "sbtest", + Charset: "utf8", + MaxConnections: 1024, + } +} + +// MockScatter used to mock a scatter. +func MockScatter(log *xlog.Log, n int) (*Scatter, *fakedb.DB, func()) { + scatter := NewScatter(log, "") + fakedb := fakedb.New(log, n) + backends := make(map[string]*Pool) + addrs := fakedb.Addrs() + for i, addr := range addrs { + name := fmt.Sprintf("backend%d", i) + conf := MockBackendConfigDefault(name, addr) + pool := NewPool(log, conf) + backends[name] = pool + } + scatter.backends = backends + + return scatter, fakedb, func() { + fakedb.Close() + scatter.Close() + } +} + +// MockClient mocks a client connection. +func MockClient(log *xlog.Log, addr string) (Connection, func()) { + return MockClientWithConfig(log, MockBackendConfigDefault("", addr)) +} + +// MockClientWithConfig mocks a client with backendconfig. +func MockClientWithConfig(log *xlog.Log, conf *config.BackendConfig) (Connection, func()) { + pool := NewPool(log, conf) + conn := NewConnection(log, pool) + if err := conn.Dial(); err != nil { + log.Panic("mock.conn.with.config.error:%+v", err) + } + return conn, func() { + pool.Close() + } +} + +// MockTxnMgr mocks txn manager. +func MockTxnMgr(log *xlog.Log, n int) (*fakedb.DB, *TxnManager, map[string]*Pool, *Pool, []string, func()) { + fakedb := fakedb.New(log, n+1) + backends := make(map[string]*Pool) + addrs := fakedb.Addrs() + for i := 0; i < len(addrs)-1; i++ { + addr := addrs[i] + conf := MockBackendConfigDefault(addr, addr) + pool := NewPool(log, conf) + backends[addr] = pool + } + + addr := addrs[len(addrs)-1] + conf := MockBackendConfigDefault(addr, addr) + backup := NewPool(log, conf) + txnMgr := NewTxnManager(log) + return fakedb, txnMgr, backends, backup, addrs, func() { + time.Sleep(time.Millisecond * 10) + for _, v := range backends { + v.Close() + } + backup.Close() + fakedb.Close() + } +} diff --git a/src/backend/pool.go b/src/backend/pool.go new file mode 100644 index 00000000..0cbece35 --- /dev/null +++ b/src/backend/pool.go @@ -0,0 +1,170 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package backend + +import ( + "bytes" + "config" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + "xbase/stats" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + poolCounterPing = "#pool.ping" + poolCounterPingBroken = "#pool.ping.broken" + poolCounterHit = "#pool.hit" + poolCounterMiss = "#pool.miss" + poolCounterGet = "#pool.get" + poolCounterPut = "#pool.put" + poolCounterClose = "#pool.close" + + poolCounterBackendDialError = "#backend.dial.error" + poolCounterBackendExecuteTimeout = "#backend.execute.timeout" + poolCounterBackendExecuteMaxresult = "#backend.execute.maxresult" + poolCounterBackendExecuteAllError = "#backend.execute.all.error" + poolCounterBackendKilled = "#backend.killed" +) + +var ( + maxIdleTime = 20 // 20s + errClosed = errors.New("can't get connection from the closed DB") +) + +// Pool tuple. +type Pool struct { + mu sync.RWMutex + log *xlog.Log + conf *config.BackendConfig + counters *stats.Counters + connections chan Connection + + // If maxIdleTime reached, the connection will be closed by get. + maxIdleTime int64 +} + +// NewPool creates the new Pool. +func NewPool(log *xlog.Log, conf *config.BackendConfig) *Pool { + p := &Pool{ + log: log, + conf: conf, + connections: make(chan Connection, conf.MaxConnections), + counters: stats.NewCounters(conf.Name + "@" + conf.Address), + maxIdleTime: int64(maxIdleTime), + } + return p +} + +func (p *Pool) reconnect() (Connection, error) { + log := p.log + c := NewConnection(log, p) + if err := c.Dial(); err != nil { + log.Error("pool.reconnect.dial.error:%+v", err) + return nil, err + } + c.SetTimestamp(time.Now().Unix()) + return c, nil +} + +// Get used to get a connection from the pool. +func (p *Pool) Get() (Connection, error) { + counters := p.counters + counters.Add(poolCounterGet, 1) + + conns := p.getConns() + if conns == nil { + return nil, errClosed + } + + select { + case conn, more := <-conns: + if !more { + return nil, errClosed + } + // If the idle time more than 1s, + // we will do a ping to check the connection is OK or NOT. + now := time.Now().Unix() + elapsed := (now - conn.Timestamp()) + if elapsed > 1 { + // If elasped time more than 20s, we create new one. + if elapsed > atomic.LoadInt64(&p.maxIdleTime) { + conn.Close() + return p.reconnect() + } + + if err := conn.Ping(); err != nil { + counters.Add(poolCounterPingBroken, 1) + return p.reconnect() + } + counters.Add(poolCounterPing, 1) + } + counters.Add(poolCounterHit, 1) + return conn, nil + default: + counters.Add(poolCounterMiss, 1) + return p.reconnect() + } +} + +// Put used to put a connection to pool. +func (p *Pool) Put(conn Connection) { + p.put(conn, true) +} + +func (p *Pool) put(conn Connection, updateTs bool) { + p.counters.Add(poolCounterPut, 1) + p.mu.Lock() + defer p.mu.Unlock() + if p.connections == nil { + return + } + + if updateTs { + conn.SetTimestamp(time.Now().Unix()) + } + select { + case p.connections <- conn: + default: + conn.Close() + } +} + +// Close used to close the pool. +func (p *Pool) Close() { + p.counters.Add(poolCounterClose, 1) + p.mu.Lock() + defer p.mu.Unlock() + if p.connections == nil { + return + } + close(p.connections) + for conn := range p.connections { + conn.Close() + } + p.connections = nil +} + +func (p *Pool) getConns() chan Connection { + p.mu.RLock() + defer p.mu.RUnlock() + return p.connections +} + +// JSON returns the available string. +// available is the number of currently unused connections. +func (p *Pool) JSON() string { + b := bytes.NewBuffer(make([]byte, 0, 256)) + fmt.Fprintf(b, `{"name": "%s","capacity": %d, "counters":"%s"}`, p.conf.Name, p.conf.MaxConnections, p.counters.String()) + return b.String() +} diff --git a/src/backend/pool_test.go b/src/backend/pool_test.go new file mode 100644 index 00000000..58a68bfd --- /dev/null +++ b/src/backend/pool_test.go @@ -0,0 +1,181 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package backend + +import ( + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" + + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestPool(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + // MySQL Server starts... + th := driver.NewTestHandler(log) + svr, err := driver.MockMysqlServer(log, th) + assert.Nil(t, err) + defer svr.Close() + addr := svr.Addr() + + // Connection + conf := MockBackendConfigDefault("node1", addr) + conf.MaxConnections = 64 + pool := NewPool(log, conf) + + // get + { + _, err := pool.Get() + assert.Nil(t, err) + } + + // put + { + for i := 0; i < conf.MaxConnections+100; i++ { + conn := NewConnection(log, pool) + err = conn.Dial() + assert.Nil(t, err) + pool.Put(conn) + } + want := "{\"name\": \"node1\",\"capacity\": 64, \"counters\":\"{\"#pool.get\": 1, \"#pool.miss\": 1, \"#pool.put\": 164}\"}" + got := pool.JSON() + assert.Equal(t, want, got) + } + + // clean + { + pool.Close() + _, err = pool.Get() + assert.NotNil(t, err) + } +} + +func TestPoolConcurrent(t *testing.T) { + var wg sync.WaitGroup + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + // MySQL Server starts... + th := driver.NewTestHandler(log) + svr, err := driver.MockMysqlServer(log, th) + assert.Nil(t, err) + defer svr.Close() + addr := svr.Addr() + + // Connection + conf := MockBackendConfigDefault(addr, addr) + conf.MaxConnections = 64 + pool := NewPool(log, conf) + + ch1 := make(chan bool) + ch2 := make(chan bool) + // get + { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-ch1: + return + default: + pool.Get() + } + } + }() + } + + // put + { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-ch2: + return + default: + conn := NewConnection(log, pool) + conn.Dial() + pool.Put(conn) + } + } + }() + } + + time.Sleep(time.Second) + pool.Close() + + close(ch1) + close(ch2) + wg.Wait() +} + +func TestPoolTimeout(t *testing.T) { + var wg sync.WaitGroup + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + // MySQL Server starts... + th := driver.NewTestHandler(log) + svr, err := driver.MockMysqlServer(log, th) + assert.Nil(t, err) + defer svr.Close() + addr := svr.Addr() + + // Connection + conf := MockBackendConfigDefault(addr, addr) + conf.MaxConnections = 64 + pool := NewPool(log, conf) + + ch2 := make(chan bool) + + // put + { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-ch2: + return + default: + conn := NewConnection(log, pool) + conn.Dial() + pool.Put(conn) + } + } + }() + } + + time.Sleep(time.Second * 2) + // Reset maxIdleTime + atomic.StoreInt64(&pool.maxIdleTime, 1) + for i := 0; i < 100; i++ { + pool.Get() + } + + // Reset maxIdleTime + atomic.StoreInt64(&pool.maxIdleTime, 10) + time.Sleep(time.Second * 2) + for i := 0; i < 100; i++ { + pool.Get() + } + pool.Close() + close(ch2) + wg.Wait() +} diff --git a/src/backend/queryz.go b/src/backend/queryz.go new file mode 100644 index 00000000..0981d29d --- /dev/null +++ b/src/backend/queryz.go @@ -0,0 +1,103 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + * This code was derived from https://github.com/youtube/vitess. + */ + +package backend + +import ( + "sort" + "sync" + "time" + "xbase" +) + +// QueryDetail is a simple wrapper for Query +type QueryDetail struct { + ID uint64 + connID uint32 + query string + conn Connection + start time.Time +} + +// NewQueryDetail creates a new QueryDetail +func NewQueryDetail(conn Connection, query string) *QueryDetail { + q := xbase.TruncateQuery(query, 256) + return &QueryDetail{conn: conn, connID: conn.ID(), query: q, start: time.Now()} +} + +// Queryz holds a thread safe list of QueryDetails +type Queryz struct { + ID uint64 + mu sync.RWMutex + queryDetails map[uint64]*QueryDetail +} + +// NewQueryz creates a new Queryz +func NewQueryz() *Queryz { + return &Queryz{queryDetails: make(map[uint64]*QueryDetail)} +} + +// Add adds a QueryDetail to Queryz +func (qz *Queryz) Add(qd *QueryDetail) { + qz.mu.Lock() + defer qz.mu.Unlock() + qz.ID++ + qd.ID = qz.ID + qz.queryDetails[qd.ID] = qd +} + +// Remove removes a QueryDetail from Queryz +func (qz *Queryz) Remove(qd *QueryDetail) { + qz.mu.Lock() + defer qz.mu.Unlock() + delete(qz.queryDetails, qd.ID) +} + +// QueryDetailzRow is used for rendering QueryDetail in a template +type QueryDetailzRow struct { + Start time.Time + Duration time.Duration + ConnID uint32 + Query string + Address string + Color string +} + +type byStartTime []QueryDetailzRow + +func (a byStartTime) Len() int { return len(a) } +func (a byStartTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byStartTime) Less(i, j int) bool { return a[i].Start.Before(a[j].Start) } + +// GetQueryzRows returns a list of QueryDetailzRow sorted by start time +func (qz *Queryz) GetQueryzRows() []QueryDetailzRow { + qz.mu.RLock() + rows := []QueryDetailzRow{} + for _, qd := range qz.queryDetails { + row := QueryDetailzRow{ + Query: qd.query, + Address: qd.conn.Address(), + Start: qd.start, + Duration: time.Since(qd.start), + ConnID: qd.connID, + } + if row.Duration < 10*time.Millisecond { + row.Color = "low" + } else if row.Duration < 100*time.Millisecond { + row.Color = "medium" + } else { + row.Color = "high" + } + + rows = append(rows, row) + } + qz.mu.RUnlock() + sort.Sort(byStartTime(rows)) + return rows +} diff --git a/src/backend/queryz_test.go b/src/backend/queryz_test.go new file mode 100644 index 00000000..6acb3a2a --- /dev/null +++ b/src/backend/queryz_test.go @@ -0,0 +1,67 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package backend + +import ( + "fakedb" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestQueryz(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + // MySQL Server starts... + fakedb := fakedb.New(log, 1) + defer fakedb.Close() + addr := fakedb.Addrs()[0] + conf := MockBackendConfigDefault(addr, addr) + pool := NewPool(log, conf) + + querys := []string{ + "SELECT1", + "SELECT2", + } + + // conn1 + conn1 := NewConnection(log, pool) + err := conn1.Dial() + assert.Nil(t, err) + + // conn2 + conn2 := NewConnection(log, pool) + err = conn2.Dial() + assert.Nil(t, err) + + // set conds + fakedb.AddQueryDelay(querys[0], result1, 200) + fakedb.AddQueryDelay(querys[1], result1, 205) + + // QueryRows + { + e1 := func(q string) { + conn1.Execute(q) + } + + e2 := func(q string) { + conn2.Execute(q) + } + go e1(querys[0]) + time.Sleep(100 * time.Millisecond) + go e2(querys[1]) + + time.Sleep(50 * time.Millisecond) + rows := qz.GetQueryzRows() + assert.Equal(t, querys[0], rows[0].Query) + assert.Equal(t, querys[1], rows[1].Query) + } +} diff --git a/src/backend/scatter.go b/src/backend/scatter.go new file mode 100644 index 00000000..3a262e07 --- /dev/null +++ b/src/backend/scatter.go @@ -0,0 +1,302 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package backend + +import ( + "config" + "io/ioutil" + "os" + "path" + "sort" + "sync" + + "github.com/pkg/errors" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +const ( + backendjson = "backend.json" +) + +// Scatter tuple. +type Scatter struct { + log *xlog.Log + mu sync.RWMutex + txnMgr *TxnManager + metadir string + backends map[string]*Pool + backup *Pool +} + +// NewScatter creates a new scatter. +func NewScatter(log *xlog.Log, metadir string) *Scatter { + return &Scatter{ + log: log, + txnMgr: NewTxnManager(log), + metadir: metadir, + backends: make(map[string]*Pool), + } +} + +// Add backend node. +func (scatter *Scatter) add(config *config.BackendConfig) error { + log := scatter.log + log.Warning("scatter.add:%v", config) + + if _, ok := scatter.backends[config.Name]; ok { + return errors.Errorf("scatter.backend[%v].duplicate", config.Name) + } + pool := NewPool(scatter.log, config) + scatter.backends[config.Name] = pool + return nil +} + +// Add used to add a new backend to scatter. +func (scatter *Scatter) Add(config *config.BackendConfig) error { + scatter.mu.Lock() + defer scatter.mu.Unlock() + return scatter.add(config) +} + +func (scatter *Scatter) remove(config *config.BackendConfig) error { + log := scatter.log + log.Warning("scatter.remove:%v", config) + + pool, ok := scatter.backends[config.Name] + if !ok { + return errors.Errorf("scatter.backend[%v].can.not.be.found", config.Name) + } + delete(scatter.backends, config.Name) + pool.Close() + return nil +} + +// Remove used to remove a backend from the scatter. +func (scatter *Scatter) Remove(config *config.BackendConfig) error { + scatter.mu.Lock() + defer scatter.mu.Unlock() + return scatter.remove(config) +} + +func (scatter *Scatter) addBackup(config *config.BackendConfig) error { + log := scatter.log + log.Warning("scatter.add.backup:%v", config) + + if scatter.backup != nil { + return errors.Errorf("scatter.backup.node[%+v].duplicate", config.Name) + } + + pool := NewPool(scatter.log, config) + scatter.backup = pool + return nil +} + +// AddBackup used to add the backup node to scatter. +func (scatter *Scatter) AddBackup(config *config.BackendConfig) error { + scatter.mu.Lock() + defer scatter.mu.Unlock() + return scatter.addBackup(config) +} + +// Remove backup node. +func (scatter *Scatter) removeBackup(config *config.BackendConfig) error { + log := scatter.log + log.Warning("scatter.remove.backup:%v", config) + if scatter.backup != nil && scatter.backup.conf.Name == config.Name { + scatter.backup.Close() + scatter.backup = nil + } else { + return errors.Errorf("scatter.backup[%v].can.not.be.found", config.Name) + } + return nil +} + +// RemoveBackup used to remove the backup from the scatter. +func (scatter *Scatter) RemoveBackup(config *config.BackendConfig) error { + scatter.mu.Lock() + defer scatter.mu.Unlock() + return scatter.removeBackup(config) +} + +// HasBackup used to check the backup node whether nil. +func (scatter *Scatter) HasBackup() bool { + scatter.mu.RLock() + defer scatter.mu.RUnlock() + return scatter.backup != nil +} + +// Close used to clean the pools connections. +func (scatter *Scatter) Close() { + scatter.mu.Lock() + defer scatter.mu.Unlock() + + log := scatter.log + log.Info("scatter.prepare.to.close....") + scatter.clear() + log.Info("scatter.close.done....") +} + +func (scatter *Scatter) clear() { + for _, v := range scatter.backends { + v.Close() + } + scatter.backends = make(map[string]*Pool) + + if scatter.backup != nil { + scatter.backup.Close() + scatter.backup = nil + } +} + +// FlushConfig used to write the backends to file. +func (scatter *Scatter) FlushConfig() error { + scatter.mu.Lock() + defer scatter.mu.Unlock() + + log := scatter.log + file := path.Join(scatter.metadir, backendjson) + + var backends config.BackendsConfig + for _, v := range scatter.backends { + backends.Backends = append(backends.Backends, v.conf) + } + + // backup. + if scatter.backup != nil { + backends.Backup = scatter.backup.conf + } + + log.Warning("scatter.flush.to.file[%v].backends.conf:%+v, backup.node:%+v", file, backends.Backends, backends.Backup) + if err := config.WriteConfig(file, backends); err != nil { + log.Panicf("scatter.flush.config.to.file[%v].error:%v", file, err) + return err + } + if err := config.UpdateVersion(scatter.metadir); err != nil { + log.Panicf("scatter.flush.config.update.version.error:%v", err) + return err + } + return nil +} + +// LoadConfig used to load all backends from metadir/backend.json file. +func (scatter *Scatter) LoadConfig() error { + scatter.mu.Lock() + defer scatter.mu.Unlock() + + // Do cleay first. + scatter.clear() + + log := scatter.log + metadir := scatter.metadir + file := path.Join(metadir, backendjson) + + // Create it if the backends config not exists. + if _, err := os.Stat(file); os.IsNotExist(err) { + backends := config.BackendsConfig{} + if err := config.WriteConfig(file, backends); err != nil { + log.Error("scatter.flush.backends.to.file[%v].error:%v", file, err) + return err + } + } + + data, err := ioutil.ReadFile(file) + if err != nil { + log.Error("scatter.load.from.file[%v].error:%v", file, err) + return err + } + conf, err := config.ReadBackendsConfig(string(data)) + if err != nil { + log.Error("scatter.parse.json.file[%v].error:%v", file, err) + return err + } + for _, backend := range conf.Backends { + if err := scatter.add(backend); err != nil { + log.Error("scatter.add.backend[%+v].error:%v", backend, err) + return err + } + log.Warning("scatter.load.backend:%+v", backend) + } + + // Add backup node. + if conf.Backup != nil { + if err := scatter.addBackup(conf.Backup); err != nil { + log.Error("scatter.add.backup[%+v].error:%v", conf.Backup, err) + return err + } + log.Warning("scatter.load.backup:%+v", conf.Backup) + } + return nil +} + +// Backends returns all backends. +func (scatter *Scatter) Backends() []string { + var backends []string + scatter.mu.RLock() + defer scatter.mu.RUnlock() + for k := range scatter.backends { + backends = append(backends, k) + } + sort.Strings(backends) + return backends +} + +// PoolClone used to copy backends to new map. +func (scatter *Scatter) PoolClone() map[string]*Pool { + poolMap := make(map[string]*Pool) + scatter.mu.RLock() + defer scatter.mu.RUnlock() + for k, v := range scatter.backends { + poolMap[k] = v + } + return poolMap +} + +// BackupPool returns the backup pool. +func (scatter *Scatter) BackupPool() *Pool { + scatter.mu.RLock() + defer scatter.mu.RUnlock() + return scatter.backup +} + +// BackupBackend returns the backup name. +func (scatter *Scatter) BackupBackend() string { + scatter.mu.RLock() + defer scatter.mu.RUnlock() + return scatter.backup.conf.Name +} + +// BackupConfig returns the config of backup. +// Used for backup rebuild. +func (scatter *Scatter) BackupConfig() *config.BackendConfig { + scatter.mu.RLock() + defer scatter.mu.RUnlock() + return scatter.backup.conf +} + +// BackendConfigsClone used to clone all the backend configs. +func (scatter *Scatter) BackendConfigsClone() []*config.BackendConfig { + scatter.mu.RLock() + defer scatter.mu.RUnlock() + beConfigs := make([]*config.BackendConfig, 0, 16) + for _, v := range scatter.backends { + beConfigs = append(beConfigs, v.conf) + } + return beConfigs +} + +// CreateTransaction used to create a transaction. +func (scatter *Scatter) CreateTransaction() (*Txn, error) { + return scatter.txnMgr.CreateTxn(scatter.PoolClone()) +} + +// CreateBackupTransaction used to create a backup transaction. +func (scatter *Scatter) CreateBackupTransaction() (*BackupTxn, error) { + return scatter.txnMgr.CreateBackupTxn(scatter.backup) +} diff --git a/src/backend/scatter_test.go b/src/backend/scatter_test.go new file mode 100644 index 00000000..ceccf3a9 --- /dev/null +++ b/src/backend/scatter_test.go @@ -0,0 +1,240 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package backend + +import ( + "fakedb" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestScatterAddRemove(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + scatter := NewScatter(log, "/tmp/") + fakedb := fakedb.New(log, 2) + defer fakedb.Close() + addrs := fakedb.Addrs() + config1 := MockBackendConfigDefault("node1", addrs[0]) + + // add + { + err := scatter.Add(config1) + assert.Nil(t, err) + } + + // duplicate + { + err := scatter.Add(config1) + assert.NotNil(t, err) + } + + // remove + { + err := scatter.Remove(config1) + assert.Nil(t, err) + } + + // remove again + { + err := scatter.Remove(config1) + assert.NotNil(t, err) + } + + // flush config + { + err := scatter.FlushConfig() + assert.Nil(t, err) + } + + // load config + { + err := scatter.LoadConfig() + assert.Nil(t, err) + } +} + +func TestScatterLoadConfig(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + scatter := NewScatter(log, "/tmp/") + fakedb := fakedb.New(log, 2) + defer fakedb.Close() + addrs := fakedb.Addrs() + config1 := MockBackendConfigDefault("node1", addrs[0]) + config2 := MockBackendConfigDefault("node2", addrs[1]) + + // add config1. + { + err := scatter.Add(config1) + assert.Nil(t, err) + } + + // add config2. + { + err := scatter.Add(config2) + assert.Nil(t, err) + } + + // flush config. + { + err := scatter.FlushConfig() + assert.Nil(t, err) + } + + // load config. + { + want := scatter.backends["node1"].conf + err := scatter.LoadConfig() + assert.Nil(t, err) + got := scatter.backends["node1"].conf + assert.Equal(t, want, got) + } + + // load config again. + { + want := scatter.backends["node2"].conf + err := scatter.LoadConfig() + assert.Nil(t, err) + got := scatter.backends["node2"].conf + assert.Equal(t, want, got) + } +} + +func TestScatterAddRemoveBackup(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + scatter := NewScatter(log, "/tmp/") + fakedb := fakedb.New(log, 2) + defer fakedb.Close() + addrs := fakedb.Addrs() + + config1 := MockBackendConfigDefault("node1", addrs[0]) + backup1 := MockBackendConfigDefault("backup", addrs[1]) + + // add normal. + { + err := scatter.Add(config1) + assert.Nil(t, err) + assert.Equal(t, false, scatter.HasBackup()) + } + + // add backup. + { + err := scatter.AddBackup(backup1) + assert.Nil(t, err) + } + + // add backup again. + { + err := scatter.AddBackup(backup1) + assert.NotNil(t, err) + } + + // backup name. + { + got := scatter.BackupBackend() + want := "backup" + assert.Equal(t, want, got) + } + + // backup pool. + { + got := scatter.BackupPool() + want := scatter.backup + assert.Equal(t, want, got) + } + + // backup config. + { + got := scatter.BackupConfig() + want := scatter.backup.conf + assert.Equal(t, want, got) + } + + // flush config + { + err := scatter.FlushConfig() + assert.Nil(t, err) + } + + // remove backup. + { + err := scatter.RemoveBackup(backup1) + assert.Nil(t, err) + } + + // remove backup again. + { + err := scatter.RemoveBackup(backup1) + assert.NotNil(t, err) + } + + // remove config1. + { + err := scatter.Remove(config1) + assert.Nil(t, err) + } + + // load config + { + err := scatter.LoadConfig() + assert.Nil(t, err) + assert.Equal(t, backup1, scatter.backup.conf) + assert.Equal(t, true, scatter.HasBackup()) + } +} + +func TestScatter(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + scatter := NewScatter(log, "/tmp/") + defer scatter.Close() + + fakedb := fakedb.New(log, 2) + defer fakedb.Close() + addrs := fakedb.Addrs() + + // add + { + config1 := MockBackendConfigDefault("node1", addrs[0]) + err := scatter.add(config1) + assert.Nil(t, err) + } + // backends + { + backends := scatter.Backends() + assert.Equal(t, "node1", backends[0]) + } + + // pool clone. + { + clone := scatter.PoolClone() + assert.Equal(t, clone["node1"], scatter.backends["node1"]) + } + + // backends config clone. + { + clone := scatter.BackendConfigsClone() + assert.Equal(t, clone[0], scatter.backends["node1"].conf) + } + + // create txn. + { + _, err := scatter.CreateTransaction() + assert.Nil(t, err) + } +} + +func TestScatterLoadNotExists(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + scatter := NewScatter(log, "/tmp/") + os.Remove("/tmp/backend.json") + err := scatter.LoadConfig() + assert.Nil(t, err) +} diff --git a/src/backend/stats.go b/src/backend/stats.go new file mode 100644 index 00000000..455bea07 --- /dev/null +++ b/src/backend/stats.go @@ -0,0 +1,61 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package backend + +import ( + "time" + "xbase/stats" +) + +var ( + // mysqlStats shows the time histogram for operations spent on mysql side. + mysqlStats = stats.NewTimings("MySQL") + + // queryStats shows the time histogram for each type of queries. + queryStats = stats.NewTimings("Query") + + // queryRates shows the qps of QueryStats. Sample every 5 seconds and keep samples for 1. + queryRates = stats.NewRates("QPS", queryStats, 1, 5*time.Second) + + // for transactions. + txnCounters = stats.NewCounters("TxnCounters") + + tz = NewTxnz() + qz = NewQueryz() +) + +// Queryz returns the queryz. +func (scatter *Scatter) Queryz() *Queryz { + return qz +} + +// Txnz returns the txnz. +func (scatter *Scatter) Txnz() *Txnz { + return tz +} + +// MySQLStats returns the mysql stats. +func (scatter *Scatter) MySQLStats() *stats.Timings { + return mysqlStats +} + +// QueryStats returns the query stats. +func (scatter *Scatter) QueryStats() *stats.Timings { + return queryStats +} + +// QueryRates returns the query rates. +func (scatter *Scatter) QueryRates() *stats.Rates { + return queryRates +} + +// TxnCounters returns the txn counters. +func (scatter *Scatter) TxnCounters() *stats.Counters { + return txnCounters +} diff --git a/src/backend/stats_test.go b/src/backend/stats_test.go new file mode 100644 index 00000000..7be98547 --- /dev/null +++ b/src/backend/stats_test.go @@ -0,0 +1,38 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package backend + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestStats(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + scatter := NewScatter(log, "") + // Others. + { + assert.NotNil(t, scatter.Queryz()) + assert.NotNil(t, scatter.Txnz()) + + assert.NotNil(t, scatter.MySQLStats()) + log.Debug(scatter.MySQLStats().String()) + + assert.NotNil(t, scatter.QueryStats()) + log.Debug(scatter.QueryStats().String()) + + assert.NotNil(t, scatter.QueryRates()) + log.Debug(scatter.QueryRates().String()) + + assert.NotNil(t, scatter.TxnCounters()) + log.Debug(scatter.TxnCounters().String()) + } +} diff --git a/src/backend/txn.go b/src/backend/txn.go new file mode 100644 index 00000000..724d1549 --- /dev/null +++ b/src/backend/txn.go @@ -0,0 +1,809 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package backend + +import ( + "fmt" + "sync" + "time" + "xcontext" + + "github.com/pkg/errors" + "xbase/sync2" + + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqldb" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + txnCounterTxnCreate = "#txn.create" + txnCounterTwopcConnectionError = "#get.twopc.connection.error" + txnCounterNormalConnectionError = "#get.normal.connection.error" + txnCounterXaStart = "#xa.start" + txnCounterXaStartError = "#xa.start.error" + txnCounterXaEnd = "#xa.end" + txnCounterXaEndError = "#xa.end.error" + txnCounterXaPrepare = "#xa.prepare" + txnCounterXaPrepareError = "#xa.prepare.error" + txnCounterXaCommit = "#xa.commit" + txnCounterXaCommitError = "#xa.commit.error" + txnCounterXaRollback = "#xa.rollback" + txnCounterXaRollbackError = "#xa.rollback.error" + txnCounterTxnBegin = "#txn.begin" + txnCounterTxnFinish = "#txn.finish" + txnCounterTxnAbort = "#txn.abort" +) + +type txnState int32 + +const ( + txnStateLive txnState = iota + txnStateBeginning + txnStateExecutingTwoPC + txnStateExecutingNormal + txnStateRollbacking + txnStateCommitting + txnStateFinshing + txnStateAborting +) + +type txnXAState int32 + +const ( + txnXAStateNone txnXAState = iota + txnXAStateStart + txnXAStateStartFinished + txnXAStateEnd + txnXAStateEndFinished + txnXAStatePrepare + txnXAStatePrepareFinished + txnXAStateCommit + txnXAStateCommitFinished + txnXAStateRollback + txnXAStateRollbackFinished +) + +// Transaction interface. +type Transaction interface { + XID() string + TxID() uint64 + State() int32 + XaState() int32 + Abort() error +} + +// Txn tuple. +type Txn struct { + log *xlog.Log + id uint64 + xid string + mu sync.Mutex + mgr *TxnManager + req *xcontext.RequestContext + txnd *TxnDetail + twopc bool + start time.Time + state sync2.AtomicInt32 + xaState sync2.AtomicInt32 + backends map[string]*Pool + timeout int + maxResult int + errors int + twopcConnections map[string]Connection + normalConnections []Connection + twopcConnMu sync.RWMutex + normalConnMu sync.RWMutex +} + +// NewTxn creates the new Txn. +func NewTxn(log *xlog.Log, txid uint64, mgr *TxnManager, backends map[string]*Pool) (*Txn, error) { + txn := &Txn{ + log: log, + id: txid, + mgr: mgr, + backends: backends, + start: time.Now(), + twopcConnections: make(map[string]Connection), + normalConnections: make([]Connection, 0, 8), + state: sync2.NewAtomicInt32(int32(txnStateLive)), + } + txnd := NewTxnDetail(txn) + txn.txnd = txnd + tz.Add(txnd) + txnCounters.Add(txnCounterTxnCreate, 1) + return txn, nil +} + +// SetTimeout used to set the txn timeout. +func (txn *Txn) SetTimeout(timeout int) { + txn.timeout = timeout +} + +// SetMaxResult used to set the txn max result. +func (txn *Txn) SetMaxResult(max int) { + txn.maxResult = max +} + +// TxID returns txn id. +func (txn *Txn) TxID() uint64 { + return txn.id +} + +// XID returns txn xid. +func (txn *Txn) XID() string { + return txn.xid +} + +// State returns txn.state. +func (txn *Txn) State() int32 { + return txn.state.Get() +} + +// XaState returns txn xastate. +func (txn *Txn) XaState() int32 { + return txn.xaState.Get() +} + +func (txn *Txn) incErrors() { + txn.errors++ +} + +// twopcConnection used to get a connection via backend name from pool. +// The connection is stored in twopcConnections. +func (txn *Txn) twopcConnection(backend string) (Connection, error) { + var err error + + txn.twopcConnMu.RLock() + conn, ok := txn.twopcConnections[backend] + txn.twopcConnMu.RUnlock() + if !ok { + pool, ok := txn.backends[backend] + if !ok { + txnCounters.Add(txnCounterTwopcConnectionError, 1) + return nil, errors.Errorf("txn.can.not.get.twopc.connection.by.backend[%+v].from.pool", backend) + } + conn, err = pool.Get() + if err != nil { + return nil, err + } + txn.twopcConnMu.Lock() + txn.twopcConnections[backend] = conn + txn.twopcConnMu.Unlock() + } + return conn, nil +} + +func (txn *Txn) reFetchTwopcConnection(backend string) (Connection, error) { + txn.twopcConnMu.Lock() + conn, ok := txn.twopcConnections[backend] + if ok { + delete(txn.twopcConnections, backend) + conn.Close() + } + txn.twopcConnMu.Unlock() + return txn.twopcConnection(backend) +} + +// normalConnection used to get a connection via backend name from pool. +// The Connection is stored in normalConnections for recycling. +func (txn *Txn) normalConnection(backend string) (Connection, error) { + pool, ok := txn.backends[backend] + if !ok { + txnCounters.Add(txnCounterNormalConnectionError, 1) + return nil, errors.Errorf("txn.can.not.get.normal.connection.by.backend[%+v].from.pool", backend) + } + conn, err := pool.Get() + if err != nil { + return nil, err + } + txn.normalConnMu.Lock() + txn.normalConnections = append(txn.normalConnections, conn) + txn.normalConnMu.Unlock() + return conn, nil +} + +func (txn *Txn) fetchOneConnection(back string) (Connection, error) { + var err error + var conn Connection + if txn.twopc { + if conn, err = txn.twopcConnection(back); err != nil { + return nil, err + } + } else { + if conn, err = txn.normalConnection(back); err != nil { + return nil, err + } + } + return conn, nil +} + +func (txn *Txn) xaStart() error { + txnCounters.Add(txnCounterXaStart, 1) + txn.xaState.Set(int32(txnXAStateStart)) + defer func() { txn.xaState.Set(int32(txnXAStateStartFinished)) }() + + txn.xid = fmt.Sprintf("RXID-%v-%v", time.Now().Format("20060102150405"), txn.id) + start := fmt.Sprintf("XA START '%v'", txn.xid) + if err := txn.executeXACommand(start, txnXAStateStart); err != nil { + txnCounters.Add(txnCounterXaStartError, 1) + txn.incErrors() + return err + } + return nil +} + +func (txn *Txn) xaEnd() error { + txnCounters.Add(txnCounterXaEnd, 1) + txn.xaState.Set(int32(txnXAStateEnd)) + defer func() { txn.xaState.Set(int32(txnXAStateEndFinished)) }() + + end := fmt.Sprintf("XA END '%v'", txn.xid) + if err := txn.executeXACommand(end, txnXAStateEnd); err != nil { + txnCounters.Add(txnCounterXaEndError, 1) + txn.incErrors() + return err + } + return nil +} + +func (txn *Txn) xaPrepare() error { + txnCounters.Add(txnCounterXaPrepare, 1) + txn.xaState.Set(int32(txnXAStatePrepare)) + defer func() { txn.xaState.Set(int32(txnXAStatePrepareFinished)) }() + + prepare := fmt.Sprintf("XA PREPARE '%v'", txn.xid) + if err := txn.executeXACommand(prepare, txnXAStatePrepare); err != nil { + txnCounters.Add(txnCounterXaPrepareError, 1) + txn.incErrors() + return err + } + return nil +} + +func (txn *Txn) xaCommit() { + txnCounters.Add(txnCounterXaCommit, 1) + txn.xaState.Set(int32(txnXAStateCommit)) + defer func() { txn.xaState.Set(int32(txnXAStateCommitFinished)) }() + + commit := fmt.Sprintf("XA COMMIT '%v'", txn.xid) + if err := txn.executeXACommand(commit, txnXAStateCommit); err != nil { + txn.incErrors() + txnCounters.Add(txnCounterXaCommitError, 1) + } +} + +func (txn *Txn) xaRollback() error { + txnCounters.Add(txnCounterXaRollback, 1) + txn.xaState.Set(int32(txnXAStateRollback)) + defer func() { txn.xaState.Set(int32(txnXAStateRollbackFinished)) }() + + rollback := fmt.Sprintf("XA ROLLBACK '%v'", txn.xid) + if err := txn.executeXACommand(rollback, txnXAStateRollback); err != nil { + txnCounters.Add(txnCounterXaRollbackError, 1) + txn.incErrors() + return err + } + return nil +} + +// Begin used to start a XA transaction. +// Begin only does: +// 1. set twopc to true +func (txn *Txn) Begin() error { + txnCounters.Add(txnCounterTxnBegin, 1) + txn.twopc = true + return nil +} + +// Commit does: +// 1. XA END +// 2. XA PREPARE +// 3. XA COMMIT +func (txn *Txn) Commit() error { + txn.state.Set(int32(txnStateCommitting)) + + // Here, we only handle the write-txn. + // Commit nothing for read-txn. + switch txn.req.TxnMode { + case xcontext.TxnWrite: + // 1. XA END. + if err := txn.xaEnd(); err != nil { + txn.Rollback() + return err + } + + // 2. XA PREPARE. + if err := txn.xaPrepare(); err != nil { + txn.Rollback() + return err + } + + // 3. XA COMMIT + txn.xaCommit() + } + return nil +} + +// Rollback used to rollback a XA transaction. +// 1. XA ROLLBACK +func (txn *Txn) Rollback() error { + log := txn.log + txn.state.Set(int32(txnStateRollbacking)) + + // Here, we only handle the write-txn. + // Rollback nothing for read-txn. + switch txn.req.TxnMode { + case xcontext.TxnWrite: + log.Warning("txn.rollback.xid[%v]", txn.xid) + switch txnXAState(txn.xaState.Get()) { + // XA Prepare error, rollback prepare txn. + case txnXAStatePrepareFinished: + return txn.xaRollback() + } + } + return nil +} + +// Execute used to execute the query. +// If the txn is in twopc mode, we do the xaStart before the real query execute. +func (txn *Txn) Execute(req *xcontext.RequestContext) (*sqltypes.Result, error) { + if txn.twopc { + txn.req = req + switch req.TxnMode { + case xcontext.TxnRead: + // read-txn acquires the commit read-lock. + txn.mgr.CommitRLock() + defer txn.mgr.CommitRUnlock() + case xcontext.TxnWrite: + // write-txn xa starts. + if err := txn.xaStart(); err != nil { + return nil, err + } + } + } + qr, err := txn.execute(req) + if err != nil { + txn.incErrors() + return nil, err + } + return qr, err +} + +// Execute used to execute a query to backends. +func (txn *Txn) execute(req *xcontext.RequestContext) (*sqltypes.Result, error) { + var err error + var mu sync.Mutex + var wg sync.WaitGroup + + log := txn.log + qr := &sqltypes.Result{} + allErrors := make([]error, 0, 8) + + if txn.twopc { + defer queryStats.Record("txn.2pc.execute", time.Now()) + txn.state.Set(int32(txnStateExecutingTwoPC)) + } else { + defer queryStats.Record("txn.normal.execute", time.Now()) + txn.state.Set(int32(txnStateExecutingNormal)) + } + + // Execute backend-querys. + oneShard := func(back string, txn *Txn, querys []string) { + var x error + var c Connection + defer wg.Done() + + if c, x = txn.fetchOneConnection(back); x != nil { + log.Error("txn.fetch.connection.on[%s].querys[%v].error:%+v", back, querys, x) + } else { + for _, query := range querys { + var innerqr *sqltypes.Result + + // Execute to backends. + if innerqr, x = c.ExecuteWithLimits(query, txn.timeout, txn.maxResult); x != nil { + log.Error("txn.execute.on[%v].query[%v].error:%+v", c.Address(), query, x) + break + } + mu.Lock() + qr.AppendResult(innerqr) + mu.Unlock() + } + } + + if x != nil { + mu.Lock() + allErrors = append(allErrors, x) + mu.Unlock() + } + } + + switch req.Mode { + // ReqSingle mode: execute on the first one shard of txn.backends. + case xcontext.ReqSingle: + qs := []string{req.RawQuery} + for back := range txn.backends { + wg.Add(1) + oneShard(back, txn, qs) + break + } + // ReqScatter mode: execute on the all shards of txn.backends. + case xcontext.ReqScatter: + qs := []string{req.RawQuery} + beLen := len(txn.backends) + for back := range txn.backends { + wg.Add(1) + if beLen > 1 { + go oneShard(back, txn, qs) + } else { + oneShard(back, txn, qs) + } + } + // ReqNormal mode: execute on the some shards of txn.backends. + case xcontext.ReqNormal: + queryMap := make(map[string][]string) + for _, query := range req.Querys { + v, ok := queryMap[query.Backend] + if !ok { + v = make([]string, 0, 4) + v = append(v, query.Query) + } else { + v = append(v, query.Query) + } + queryMap[query.Backend] = v + } + beLen := len(queryMap) + for back, qs := range queryMap { + wg.Add(1) + if beLen > 1 { + go oneShard(back, txn, qs) + } else { + oneShard(back, txn, qs) + } + } + } + + wg.Wait() + if len(allErrors) > 0 { + err = allErrors[0] + } + return qr, err +} + +// executeXACommand used to execute XA statements. +func (txn *Txn) executeXACommand(query string, state txnXAState) error { + rctx := &xcontext.RequestContext{ + RawQuery: query, + Mode: txn.req.Mode, + Querys: txn.req.Querys, + } + return txn.executeXA(rctx, state) +} + +// executeXA only used to execute the 'XA START','XA END', 'XA PREPARE', 'XA COMMIT'/'XA ROLLBACK' statements. +func (txn *Txn) executeXA(req *xcontext.RequestContext, state txnXAState) error { + var err error + var mu sync.Mutex + var wg sync.WaitGroup + + log := txn.log + allErrors := make([]error, 0, 8) + + txn.state.Set(int32(txnStateExecutingTwoPC)) + defer queryStats.Record("txn.2pc.execute", time.Now()) + oneShard := func(state txnXAState, back string, txn *Txn, query string) { + var x error + var c Connection + defer wg.Done() + + switch state { + case txnXAStateStart, txnXAStateEnd, txnXAStatePrepare: + if c, x = txn.twopcConnection(back); x != nil { + log.Error("txn.xa.fetch.connection.state[%v].on[%s].query[%v].error:%+v", state, back, query, x) + } else { + if _, x = c.Execute(query); x != nil { + log.Error("txn.xa.execute[%v].on[%v].error:%+v", query, c.Address(), x) + } + } + case txnXAStateCommit, txnXAStateRollback: + maxRetry := 20 + for retry := 0; retry < maxRetry; retry++ { + if retry == 0 { + if c, x = txn.twopcConnection(back); x != nil { + log.Error("txn.xa.twopc.connection[maxretry:%v, retried:%v].state[%v].on[%s].query[%v].error:%+v", maxRetry, retry, state, back, query, x) + continue + } + } else { + // Retry the connection for commit/rollback. + if c, x = txn.reFetchTwopcConnection(back); x != nil { + log.Error("txn.xa.fetch.connection[maxretry:%v, retried:%v].state[%v].on[%s].query[%v].error:%+v", maxRetry, retry, state, back, query, x) + time.Sleep(time.Second * time.Duration(retry)) + continue + } + } + if _, x = c.Execute(query); x != nil { + log.Error("txn.xa.execute[maxretry:%v, retried:%v].state[%v].on[%v].query[%v].error[%T]:%+v", maxRetry, retry, state, c.Address(), query, x, x) + if sqlErr, ok := x.(*sqldb.SQLError); ok { + // XAE04: + // https://dev.mysql.com/doc/refman/5.5/en/error-messages-server.html#error_er_xaer_nota + // Error: 1397 SQLSTATE: XAE04 (ER_XAER_NOTA) + // Message: XAER_NOTA: Unknown XID + if sqlErr.Num == 1397 { + log.Warning("txn.xa.[%v].XAE04.error....", state) + break + } + } + time.Sleep(time.Second * time.Duration(retry)) + continue + } + break + } + } + + if x != nil { + mu.Lock() + allErrors = append(allErrors, x) + mu.Unlock() + } + } + + switch req.Mode { + case xcontext.ReqNormal: + backends := make(map[string]bool) + for _, query := range req.Querys { + _, ok := backends[query.Backend] + if !ok { + backends[query.Backend] = true + } + } + + // Only do XA when backends numbers larger than one. + beLen := len(backends) + if beLen > 1 { + switch state { + case txnXAStateCommit, txnXAStateRollback: + // Acquire the commit lock if the txn is write. + txn.mgr.CommitLock() + defer txn.mgr.CommitUnlock() + } + + for back := range backends { + wg.Add(1) + go oneShard(state, back, txn, req.RawQuery) + } + } + } + + wg.Wait() + if len(allErrors) > 0 { + err = allErrors[0] + } + return err +} + +// ExecuteStreamFetch used to execute stream fetch query. +func (txn *Txn) ExecuteStreamFetch(req *xcontext.RequestContext, callback func(*sqltypes.Result) error, streamBufferSize int) error { + var err error + var mu sync.Mutex + var wg sync.WaitGroup + + log := txn.log + cursors := make([]driver.Rows, 0, 8) + allErrors := make([]error, 0, 8) + + defer func() { + for _, cursor := range cursors { + cursor.Close() + } + }() + + oneShard := func(c Connection, query string) { + defer wg.Done() + cursor, x := c.ExecuteStreamFetch(query) + if x != nil { + mu.Lock() + allErrors = append(allErrors, x) + mu.Unlock() + return + } + mu.Lock() + cursors = append(cursors, cursor) + mu.Unlock() + } + + for _, qt := range req.Querys { + var conn Connection + if conn, err = txn.fetchOneConnection(qt.Backend); err != nil { + return err + } + wg.Add(1) + go oneShard(conn, qt.Query) + } + wg.Wait() + if len(allErrors) > 0 { + return allErrors[0] + } + + // Send Fields. + fields := cursors[0].Fields() + fieldsQr := &sqltypes.Result{Fields: fields, State: sqltypes.RStateFields} + if err := callback(fieldsQr); err != nil { + return err + } + + // Send rows. + var allByteCount, allBatchCount, allRowCount uint64 + + byteCount := 0 + cursorFinished := 0 + bitmap := make([]bool, len(cursors)) + qr := &sqltypes.Result{Fields: fields, Rows: make([][]sqltypes.Value, 0, 256), State: sqltypes.RStateRows} + for cursorFinished < len(cursors) { + for i, cursor := range cursors { + fetchPerLoop := 64 + name := req.Querys[i].Backend + for fetchPerLoop > 0 { + if cursor.Next() { + allRowCount++ + row, err := cursor.RowValues() + if err != nil { + log.Error("txn.stream.cursor[%s].RowValues.error:%+v", name, err) + return err + } + rowLen := sqltypes.Values(row).Len() + byteCount += rowLen + allByteCount += uint64(rowLen) + qr.Rows = append(qr.Rows, row) + + } else { + if !bitmap[i] { + if x := cursor.LastError(); x != nil { + log.Error("txn.stream.cursor[%s].last.error:%+v", name, x) + return x + } + bitmap[i] = true + cursorFinished++ + } + } + fetchPerLoop-- + } + } + + if byteCount >= streamBufferSize { + if x := callback(qr); x != nil { + log.Error("txn.stream.cursor.send1.error:%+v", x) + return x + } + qr.Rows = qr.Rows[:0] + byteCount = 0 + allBatchCount++ + + log.Warning("txn.steam.send.[streamBufferSize:%v, hasSentRows:%v, hasSentBytes:%v, hasSentBatchs:%v, cursorFinished:%d/%d]", + streamBufferSize, allRowCount, allByteCount, allBatchCount, cursorFinished, len(cursors)) + } + } + if len(qr.Rows) > 0 { + if x := callback(qr); x != nil { + log.Error("txn.stream.cursor.send2.error:%+v", x) + return x + } + } + log.Warning("txn.stream.send.done[allRows:%v, allBytes:%v, allBatches:%v]", allRowCount, allByteCount, allBatchCount) + + // Send finished. + finishQr := &sqltypes.Result{Fields: fields, RowsAffected: allRowCount, State: sqltypes.RStateFinished} + return callback(finishQr) +} + +// ExecuteScatter used to execute query on all shards. +func (txn *Txn) ExecuteScatter(query string) (*sqltypes.Result, error) { + rctx := &xcontext.RequestContext{ + RawQuery: query, + Mode: xcontext.ReqScatter, + } + return txn.Execute(rctx) +} + +// ExecuteSingle used to execute query on one shard. +func (txn *Txn) ExecuteSingle(query string) (*sqltypes.Result, error) { + rctx := &xcontext.RequestContext{ + RawQuery: query, + Mode: xcontext.ReqSingle, + } + return txn.Execute(rctx) +} + +// ExecuteOnThisBackend used to send the query to this backend. +func (txn *Txn) ExecuteOnThisBackend(backend string, query string) (*sqltypes.Result, error) { + qt := xcontext.QueryTuple{ + Query: query, + Backend: backend, + } + rctx := &xcontext.RequestContext{ + Querys: []xcontext.QueryTuple{qt}, + } + return txn.Execute(rctx) +} + +// Finish used to finish a transaction. +// If the lastErr is nil, we will recycle all the twopc connections to the pool for reuse, +// otherwise we wil close all of the them. +func (txn *Txn) Finish() error { + txnCounters.Add(txnCounterTxnFinish, 1) + + txn.mu.Lock() + defer txn.mu.Unlock() + + defer tz.Remove(txn.txnd) + defer func() { txn.twopc = false }() + + // If the txn has aborted, we won't do finish. + if txn.state.Get() == int32(txnStateAborting) { + return nil + } + + txn.xaState.Set(int32(txnXAStateNone)) + txn.state.Set(int32(txnStateFinshing)) + + // 2pc connections. + for id, conn := range txn.twopcConnections { + if txn.errors > 0 { + conn.Close() + } else { + conn.Recycle() + } + delete(txn.twopcConnections, id) + } + + // normal connections. + for _, conn := range txn.normalConnections { + if txn.errors > 0 { + conn.Close() + } else { + conn.Recycle() + } + } + txn.mgr.Remove() + return nil +} + +// Abort used to abort all txn connections. +func (txn *Txn) Abort() error { + txnCounters.Add(txnCounterTxnAbort, 1) + + txn.mu.Lock() + defer txn.mu.Unlock() + + defer tz.Remove(txn.txnd) + defer func() { txn.twopc = false }() + + // If the txn has finished, we won't do abort. + if txn.state.Get() == int32(txnStateFinshing) { + return nil + } + txn.state.Set(int32(txnStateAborting)) + + // 2pc connections. + for id, conn := range txn.twopcConnections { + conn.Kill("txn.abort") + txn.twopcConnMu.Lock() + delete(txn.twopcConnections, id) + txn.twopcConnMu.Unlock() + } + + // normal connections. + txn.normalConnMu.RLock() + for _, conn := range txn.normalConnections { + conn.Kill("txn.abort") + } + txn.normalConnMu.RUnlock() + txn.mgr.Remove() + return nil +} diff --git a/src/backend/txn_test.go b/src/backend/txn_test.go new file mode 100644 index 00000000..607d72ea --- /dev/null +++ b/src/backend/txn_test.go @@ -0,0 +1,824 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package backend + +import ( + "errors" + "fmt" + "sync" + "testing" + "time" + "xcontext" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" + + "github.com/xelabs/go-mysqlstack/sqldb" + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestTxnNormalExecute(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + fakedb, txnMgr, backends, _, addrs, cleanup := MockTxnMgr(log, 2) + defer cleanup() + + querys := []xcontext.QueryTuple{ + xcontext.QueryTuple{Query: "select * from node1", Backend: addrs[0]}, + xcontext.QueryTuple{Query: "select * from node2", Backend: addrs[1]}, + xcontext.QueryTuple{Query: "select * from node3", Backend: addrs[1]}, + } + + fakedb.AddQuery(querys[0].Query, result1) + fakedb.AddQueryDelay(querys[1].Query, result2, 100) + fakedb.AddQueryDelay(querys[2].Query, result2, 110) + + // normal execute. + { + rctx := &xcontext.RequestContext{ + Querys: querys, + } + + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + + got, err := txn.Execute(rctx) + assert.Nil(t, err) + + want := &sqltypes.Result{} + want.AppendResult(result1) + want.AppendResult(result2) + want.AppendResult(result2) + assert.Equal(t, want, got) + } + + // single execute. + { + rctx := &xcontext.RequestContext{ + Mode: xcontext.ReqSingle, + RawQuery: querys[0].Query, + } + + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + got, err := txn.Execute(rctx) + assert.Nil(t, err) + + assert.Equal(t, result1, got) + } + + // scatter execute. + { + rctx := &xcontext.RequestContext{ + Mode: xcontext.ReqScatter, + RawQuery: querys[0].Query, + } + + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + got, err := txn.Execute(rctx) + assert.Nil(t, err) + + want := &sqltypes.Result{} + want.AppendResult(result1) + want.AppendResult(result1) + assert.Equal(t, want, got) + } +} + +func TestTxnExecuteStreamFetch(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb, txnMgr, backends, _, addrs, cleanup := MockTxnMgr(log, 2) + defer cleanup() + + querys := []xcontext.QueryTuple{ + xcontext.QueryTuple{Query: "select * from node1", Backend: addrs[0]}, + xcontext.QueryTuple{Query: "select * from node2", Backend: addrs[1]}, + xcontext.QueryTuple{Query: "select * from node3", Backend: addrs[1]}, + } + + result11 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: make([][]sqltypes.Value, 0, 256)} + for i := 0; i < 201710; i++ { + row := []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("11")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("1nice name")), + } + result11.Rows = append(result11.Rows, row) + } + + result12 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: make([][]sqltypes.Value, 0, 256)} + + for i := 0; i < 9; i++ { + row := []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("22")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("2nice name")), + } + result12.Rows = append(result12.Rows, row) + } + + // normal execute. + { + fakedb.AddQueryStream(querys[0].Query, result11) + fakedb.AddQueryStream(querys[1].Query, result12) + fakedb.AddQueryStream(querys[2].Query, result12) + + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + + rctx := &xcontext.RequestContext{ + Querys: querys, + } + + callbackQr := &sqltypes.Result{} + err = txn.ExecuteStreamFetch(rctx, func(qr *sqltypes.Result) error { + callbackQr.AppendResult(qr) + return nil + }, 1024*1024) + assert.Nil(t, err) + + want := len(result11.Rows) + 2*len(result12.Rows) + got := len(callbackQr.Rows) + assert.Equal(t, want, got) + } + + // execute error. + { + fakedb.AddQueryError(querys[0].Query, errors.New("mock.stream.query.error")) + fakedb.AddQueryStream(querys[1].Query, result12) + fakedb.AddQueryStream(querys[2].Query, result12) + + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + + rctx := &xcontext.RequestContext{ + Querys: querys, + } + + callbackQr := &sqltypes.Result{} + err = txn.ExecuteStreamFetch(rctx, func(qr *sqltypes.Result) error { + callbackQr.AppendResult(qr) + return nil + }, 1024*1024) + want := "mock.stream.query.error (errno 1105) (sqlstate HY000)" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestTxnNormalError(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb, txnMgr, backends, _, addrs, cleanup := MockTxnMgr(log, 3) + defer cleanup() + + querys := []xcontext.QueryTuple{ + xcontext.QueryTuple{Query: "select * from node1", Backend: addrs[0]}, + xcontext.QueryTuple{Query: "select * from node2", Backend: addrs[1]}, + } + + // execute error. + { + fakedb.AddQueryError("select * from node1", errors.New("mock.execute.error")) + rctx := &xcontext.RequestContext{ + Mode: xcontext.ReqSingle, + RawQuery: querys[0].Query, + } + + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + _, err = txn.Execute(rctx) + assert.NotNil(t, err) + } +} + +func TestTxnErrorBackendNotExists(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb, txnMgr, backends, _, _, cleanup := MockTxnMgr(log, 3) + defer cleanup() + + fakedb.AddQuery("select * from node1", result1) + querys := []xcontext.QueryTuple{ + xcontext.QueryTuple{Query: "select * from node1", Backend: "xx"}, + xcontext.QueryTuple{Query: "select * from node2", Backend: "xx"}, + } + + // Normal connection error. + { + rctx := &xcontext.RequestContext{ + Querys: querys, + } + + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + _, err = txn.Execute(rctx) + want := "txn.can.not.get.normal.connection.by.backend[xx].from.pool" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestTxnExecuteSingle(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb, txnMgr, backends, _, addrs, cleanup := MockTxnMgr(log, 2) + defer cleanup() + + querys := []xcontext.QueryTuple{ + xcontext.QueryTuple{Query: "select * from node1", Backend: addrs[0]}, + } + fakedb.AddQuery(querys[0].Query, result1) + + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + + // single execute. + { + got, err := txn.ExecuteSingle(querys[0].Query) + assert.Nil(t, err) + assert.Equal(t, result1, got) + } +} + +func TestTxnExecuteScatter(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb, txnMgr, backends, _, addrs, cleanup := MockTxnMgr(log, 2) + defer cleanup() + + querys := []xcontext.QueryTuple{ + xcontext.QueryTuple{Query: "select * from node1", Backend: addrs[0]}, + } + fakedb.AddQuery(querys[0].Query, result1) + + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + + // scatter execute. + { + qr, err := txn.ExecuteScatter(querys[0].Query) + assert.Nil(t, err) + got := fmt.Sprintf("%+v", qr.Rows) + want := "[[11 1nice name] [12 12nice name] [11 1nice name] [12 12nice name]]" + assert.Equal(t, want, got) + } +} + +func TestTxnExecuteOnThisBackend(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb, txnMgr, backends, _, addrs, cleanup := MockTxnMgr(log, 2) + defer cleanup() + query := "select from node2" + backend := addrs[1] + fakedb.AddQuery(query, result1) + + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + + { + got, err := txn.ExecuteOnThisBackend(backend, query) + assert.Nil(t, err) + assert.Equal(t, result1, got) + } +} + +func TestTxnSetting(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb, txnMgr, backends, _, _, cleanup := MockTxnMgr(log, 2) + defer cleanup() + + query := "select * from node1" + fakedb.AddQueryDelay(query, result1, 1000) + + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + + // timeout + { + txn.SetTimeout(50) + // scatter execute. + { + _, err := txn.ExecuteScatter(query) + assert.NotNil(t, err) + /* + got := err.Error() + want := "Query execution was interrupted, timeout[50ms] exceeded" + assert.Equal(t, want, got) + */ + } + } + + // max result size. + { + txn.SetTimeout(0) + txn.SetMaxResult(10) + // scatter execute. + { + _, err := txn.ExecuteScatter(query) + got := err.Error() + want := "Query execution was interrupted, max memory usage[10 bytes] exceeded" + assert.Equal(t, want, got) + } + } +} + +/*****************************************************************/ +/************************XA TESTS START***************************/ +/*****************************************************************/ +func TestTxnAbort(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb, txnMgr, backends, _, addrs, cleanup := MockTxnMgr(log, 3) + defer cleanup() + + querys := []xcontext.QueryTuple{ + xcontext.QueryTuple{Query: "update node1", Backend: addrs[0]}, + xcontext.QueryTuple{Query: "update node2", Backend: addrs[1]}, + xcontext.QueryTuple{Query: "update node3", Backend: addrs[2]}, + } + + fakedb.AddQueryDelay(querys[0].Query, result2, 2000) + fakedb.AddQueryDelay(querys[1].Query, result2, 2000) + fakedb.AddQueryDelay(querys[2].Query, result2, 2000) + fakedb.AddQueryPattern("XA .*", result1) + + // Normal abort. + { + var wg sync.WaitGroup + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + // normal execute with long time. + { + wg.Add(1) + go func() { + defer wg.Done() + rctx := &xcontext.RequestContext{ + TxnMode: xcontext.TxnWrite, + Querys: querys, + } + txn.Execute(rctx) + }() + } + + // abort + { + time.Sleep(time.Second) + err := txn.Abort() + assert.Nil(t, err) + } + wg.Wait() + } + + // Twopc abort. + { + var wg sync.WaitGroup + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + + err = txn.Begin() + assert.Nil(t, err) + + // normal execute with long time. + { + wg.Add(1) + go func() { + defer wg.Done() + rctx := &xcontext.RequestContext{ + TxnMode: xcontext.TxnWrite, + Querys: querys, + } + txn.Execute(rctx) + }() + } + + // abort + { + time.Sleep(time.Second) + err := txn.Abort() + assert.Nil(t, err) + } + wg.Wait() + } +} + +func TestTxnTwoPCExecute(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb, txnMgr, backends, _, addrs, cleanup := MockTxnMgr(log, 2) + defer cleanup() + + querys := []xcontext.QueryTuple{ + xcontext.QueryTuple{Query: "select * from node1", Backend: addrs[0]}, + xcontext.QueryTuple{Query: "select * from node2", Backend: addrs[1]}, + xcontext.QueryTuple{Query: "select * from node3", Backend: addrs[1]}, + } + + fakedb.AddQuery(querys[0].Query, result2) + fakedb.AddQueryDelay(querys[1].Query, result2, 100) + fakedb.AddQueryDelay(querys[2].Query, result2, 150) + + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + + // Set 2PC conds. + { + fakedb.AddQueryPattern("XA .*", result1) + } + + // Begin. + { + err := txn.Begin() + assert.Nil(t, err) + } + + // normal execute. + { + rctx := &xcontext.RequestContext{ + Mode: xcontext.ReqNormal, + TxnMode: xcontext.TxnRead, + Querys: querys, + } + got, err := txn.Execute(rctx) + assert.Nil(t, err) + + want := &sqltypes.Result{} + want.AppendResult(result2) + want.AppendResult(result2) + want.AppendResult(result2) + assert.Equal(t, want, got) + } + + // single execute. + { + rctx := &xcontext.RequestContext{ + Mode: xcontext.ReqSingle, + RawQuery: querys[0].Query, + } + got, err := txn.Execute(rctx) + assert.Nil(t, err) + + assert.Equal(t, result2, got) + } + + // scatter execute. + { + rctx := &xcontext.RequestContext{ + Mode: xcontext.ReqScatter, + TxnMode: xcontext.TxnWrite, + RawQuery: querys[0].Query, + } + got, err := txn.Execute(rctx) + assert.Nil(t, err) + + want := &sqltypes.Result{} + want.AppendResult(result2) + want.AppendResult(result2) + assert.Equal(t, want, got) + } + + // 2PC Commit. + { + txn.Commit() + } +} + +func TestTxnTwoPCExecuteNormalOnOneBackend(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb, txnMgr, backends, _, addrs, cleanup := MockTxnMgr(log, 2) + defer cleanup() + + // All in one backends. + querys := []xcontext.QueryTuple{ + xcontext.QueryTuple{Query: "select * from node1", Backend: addrs[0]}, + xcontext.QueryTuple{Query: "select * from node2", Backend: addrs[0]}, + xcontext.QueryTuple{Query: "select * from node3", Backend: addrs[0]}, + } + + fakedb.AddQuery(querys[0].Query, result2) + fakedb.AddQueryDelay(querys[1].Query, result2, 100) + fakedb.AddQueryDelay(querys[2].Query, result2, 150) + + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + + // Begin. + { + err := txn.Begin() + assert.Nil(t, err) + } + + // normal execute. + { + rctx := &xcontext.RequestContext{ + Mode: xcontext.ReqNormal, + Querys: querys, + } + got, err := txn.Execute(rctx) + assert.Nil(t, err) + + want := &sqltypes.Result{} + want.AppendResult(result2) + want.AppendResult(result2) + want.AppendResult(result2) + assert.Equal(t, want, got) + } + + // 2PC Commit. + { + txn.Commit() + } +} + +func TestTxnTwoPCExecuteWrite(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb, txnMgr, backends, _, addrs, cleanup := MockTxnMgr(log, 2) + defer cleanup() + + querys := []xcontext.QueryTuple{ + xcontext.QueryTuple{Query: "insert", Backend: addrs[0]}, + xcontext.QueryTuple{Query: "insert", Backend: addrs[1]}, + xcontext.QueryTuple{Query: "insert", Backend: addrs[1]}, + } + + fakedb.AddQuery(querys[0].Query, result2) + fakedb.AddQueryDelay(querys[1].Query, result2, 100) + fakedb.AddQueryDelay(querys[2].Query, result2, 150) + + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + + // Set 2PC conds. + { + fakedb.AddQueryPattern("XA .*", result1) + } + + // Begin. + { + err := txn.Begin() + assert.Nil(t, err) + } + + // normal execute. + { + rctx := &xcontext.RequestContext{ + TxnMode: xcontext.TxnWrite, + Querys: querys, + } + got, err := txn.Execute(rctx) + assert.Nil(t, err) + + want := &sqltypes.Result{} + want.AppendResult(result2) + want.AppendResult(result2) + want.AppendResult(result2) + assert.Equal(t, want, got) + } + + // 2PC Commit. + { + txn.Commit() + } +} + +func TestTxnTwoPCExecuteScatterOnOneBackend(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb, txnMgr, backends, _, addrs, cleanup := MockTxnMgr(log, 1) + defer cleanup() + + // All in one backends. + querys := []xcontext.QueryTuple{ + xcontext.QueryTuple{Query: "select * from node1", Backend: addrs[0]}, + } + fakedb.AddQuery(querys[0].Query, result2) + + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + + // Begin. + { + err := txn.Begin() + assert.Nil(t, err) + } + + // scatter execute. + { + rctx := &xcontext.RequestContext{ + Mode: xcontext.ReqScatter, + RawQuery: querys[0].Query, + } + got, err := txn.Execute(rctx) + assert.Nil(t, err) + + want := result2 + assert.Equal(t, want, got) + } + + // 2PC Commit. + { + txn.Commit() + } +} + +func TestTxnTwoPCExecuteError(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb, txnMgr, backends, _, addrs, cleanup := MockTxnMgr(log, 2) + defer cleanup() + + querys := []xcontext.QueryTuple{ + xcontext.QueryTuple{Query: "update", Backend: addrs[0]}, + xcontext.QueryTuple{Query: "update", Backend: addrs[1]}, + } + fakedb.AddQuery(querys[0].Query, result1) + fakedb.AddQueryDelay(querys[1].Query, result2, 100) + + // Set 2PC conds. + resetFunc := func(txn *Txn) { + fakedb.ResetAll() + fakedb.AddQuery(querys[0].Query, result1) + fakedb.AddQueryDelay(querys[1].Query, result2, 100) + fakedb.AddQueryPattern("XA .*", result1) + } + + // Begin never failed. + { + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + resetFunc(txn) + + err = txn.Begin() + assert.Nil(t, err) + } + + // Execute, xa start error. + { + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + resetFunc(txn) + fakedb.AddQueryErrorPattern("XA START .*", errors.New("mock.xa.start.error")) + + err = txn.Begin() + assert.Nil(t, err) + + rctx := &xcontext.RequestContext{ + Mode: xcontext.ReqNormal, + TxnMode: xcontext.TxnWrite, + Querys: querys, + } + _, err = txn.Execute(rctx) + assert.NotNil(t, err) + txn.Rollback() + } + + // Commit error. + { + // XA END error. + { + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + + resetFunc(txn) + fakedb.AddQueryErrorPattern("XA END .*", errors.New("mock.xa.end.error")) + + err = txn.Begin() + assert.Nil(t, err) + + rctx := &xcontext.RequestContext{ + Mode: xcontext.ReqNormal, + TxnMode: xcontext.TxnWrite, + Querys: querys, + } + _, err = txn.Execute(rctx) + assert.Nil(t, err) + txn.Commit() + } + + // XA PREPARE error. + { + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + + resetFunc(txn) + fakedb.AddQueryErrorPattern("XA PREPARE .*", errors.New("mock.xa.prepare.error")) + + err = txn.Begin() + assert.Nil(t, err) + + rctx := &xcontext.RequestContext{ + Mode: xcontext.ReqNormal, + TxnMode: xcontext.TxnWrite, + Querys: querys, + } + _, err = txn.Execute(rctx) + assert.Nil(t, err) + txn.Commit() + } + + // XA PREPARE and ROLLBACK error. + { + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + + resetFunc(txn) + fakedb.AddQueryErrorPattern("XA PREPARE .*", errors.New("mock.xa.prepare.error")) + fakedb.AddQueryErrorPattern("XA ROLLBACK .*", sqldb.NewSQLError1(1397, "XAE04", "XAER_NOTA: Unknown XID")) + + err = txn.Begin() + assert.Nil(t, err) + + rctx := &xcontext.RequestContext{ + Mode: xcontext.ReqNormal, + TxnMode: xcontext.TxnWrite, + Querys: querys, + } + _, err = txn.Execute(rctx) + assert.Nil(t, err) + txn.Commit() + } + + // XA COMMIT error. + { + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + + resetFunc(txn) + fakedb.AddQueryErrorPattern("XA COMMIT .*", sqldb.NewSQLError1(1397, "XAE04", "XAER_NOTA: Unknown XID")) + + err = txn.Begin() + assert.Nil(t, err) + + rctx := &xcontext.RequestContext{ + Mode: xcontext.ReqNormal, + TxnMode: xcontext.TxnWrite, + Querys: querys, + } + _, err = txn.Execute(rctx) + assert.Nil(t, err) + txn.Commit() + } + } +} + +/*****************************************************************/ +/*************************XA TESTS END****************************/ +/*****************************************************************/ diff --git a/src/backend/txnmgr.go b/src/backend/txnmgr.go new file mode 100644 index 00000000..34d01db3 --- /dev/null +++ b/src/backend/txnmgr.go @@ -0,0 +1,99 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package backend + +import ( + "errors" + "sync" + "sync/atomic" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +// TxnManager tuple. +type TxnManager struct { + log *xlog.Log + txnid uint64 + txnNums int64 + commitLock sync.RWMutex +} + +// NewTxnManager creates new TxnManager. +func NewTxnManager(log *xlog.Log) *TxnManager { + return &TxnManager{ + log: log, + txnid: 0, + } +} + +// GetID returns a new txnid. +func (mgr *TxnManager) GetID() uint64 { + return atomic.AddUint64(&mgr.txnid, 1) +} + +// Add used to add a txn to mgr. +func (mgr *TxnManager) Add() error { + atomic.AddInt64(&mgr.txnNums, 1) + return nil +} + +// Remove used to remove a txn from mgr. +func (mgr *TxnManager) Remove() error { + atomic.AddInt64(&mgr.txnNums, -1) + return nil +} + +// CreateTxn creates new txn. +func (mgr *TxnManager) CreateTxn(backends map[string]*Pool) (*Txn, error) { + if len(backends) == 0 { + return nil, errors.New("backends.is.NULL") + } + + txid := mgr.GetID() + txn, err := NewTxn(mgr.log, txid, mgr, backends) + if err != nil { + return nil, err + } + mgr.Add() + return txn, nil +} + +// CreateBackupTxn creates new backup txn. +func (mgr *TxnManager) CreateBackupTxn(backup *Pool) (*BackupTxn, error) { + if backup == nil { + return nil, errors.New("backup.is.NULL") + } + txid := mgr.GetID() + txn, err := NewBackupTxn(mgr.log, txid, mgr, backup) + if err != nil { + return nil, err + } + mgr.Add() + return txn, nil +} + +// CommitLock used to acquire the commit. +func (mgr *TxnManager) CommitLock() { + mgr.commitLock.Lock() +} + +// CommitUnlock used to release the commit. +func (mgr *TxnManager) CommitUnlock() { + mgr.commitLock.Unlock() +} + +// CommitRLock used to acquire the read lock of commit. +func (mgr *TxnManager) CommitRLock() { + mgr.commitLock.RLock() +} + +// CommitRUnlock used to release the read lock of commit. +func (mgr *TxnManager) CommitRUnlock() { + mgr.commitLock.RUnlock() +} diff --git a/src/backend/txnmgr_test.go b/src/backend/txnmgr_test.go new file mode 100644 index 00000000..1b66f65e --- /dev/null +++ b/src/backend/txnmgr_test.go @@ -0,0 +1,66 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package backend + +import ( + "fakedb" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestTxnManager(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb := fakedb.New(log, 2) + defer fakedb.Close() + backends := make(map[string]*Pool) + addrs := fakedb.Addrs() + for _, addr := range addrs { + conf := MockBackendConfigDefault(addr, addr) + pool := NewPool(log, conf) + backends[addr] = pool + } + txnmgr := NewTxnManager(log) + + { + txn, err := txnmgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + } + + { + backTxn, err := txnmgr.CreateBackupTxn(backends[addrs[0]]) + assert.Nil(t, err) + defer backTxn.Finish() + } +} + +func TestTxnManagerBackendsNull(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb := fakedb.New(log, 2) + defer fakedb.Close() + backends := make(map[string]*Pool) + txnmgr := NewTxnManager(log) + + { + _, err := txnmgr.CreateTxn(backends) + want := "backends.is.NULL" + got := err.Error() + assert.Equal(t, want, got) + } + + { + _, err := txnmgr.CreateBackupTxn(nil) + want := "backup.is.NULL" + got := err.Error() + assert.Equal(t, want, got) + } +} diff --git a/src/backend/txnz.go b/src/backend/txnz.go new file mode 100644 index 00000000..000f663a --- /dev/null +++ b/src/backend/txnz.go @@ -0,0 +1,132 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + * This code was derived from https://github.com/youtube/vitess. + */ + +package backend + +import ( + "sort" + "sync" + "time" +) + +// TxnDetail is a simple wrapper for Query +type TxnDetail struct { + txnID uint64 + txn Transaction + start time.Time +} + +// NewTxnDetail creates a new TxnDetail +func NewTxnDetail(txn Transaction) *TxnDetail { + return &TxnDetail{txnID: txn.TxID(), txn: txn, start: time.Now()} +} + +// Txnz holds a thread safe list of TxnDetails +type Txnz struct { + mu sync.RWMutex + txnDetails map[uint64]*TxnDetail +} + +// NewTxnz creates a new Txnz +func NewTxnz() *Txnz { + return &Txnz{txnDetails: make(map[uint64]*TxnDetail)} +} + +// Add adds a TxnDetail to Txnz +func (tz *Txnz) Add(td *TxnDetail) { + tz.mu.Lock() + defer tz.mu.Unlock() + tz.txnDetails[td.txnID] = td +} + +// Remove removes a TxnDetail from Txnz +func (tz *Txnz) Remove(td *TxnDetail) { + tz.mu.Lock() + defer tz.mu.Unlock() + delete(tz.txnDetails, td.txnID) +} + +// TxnDetailzRow is used for rendering TxnDetail in a template +type TxnDetailzRow struct { + Start time.Time + Duration time.Duration + TxnID uint64 + XAID string + Query string + State string + XaState string + Color string +} + +type byTxStartTime []TxnDetailzRow + +func (a byTxStartTime) Len() int { return len(a) } +func (a byTxStartTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTxStartTime) Less(i, j int) bool { return a[i].Start.Before(a[j].Start) } + +var ( + txnStates = map[int32]string{ + int32(txnStateLive): "txnStateLive", + int32(txnStateBeginning): "txnStateBeginning", + int32(txnStateExecutingTwoPC): "txnStateExecutingTwoPC", + int32(txnStateExecutingNormal): "txnStateExecutingNormal", + int32(txnStateRollbacking): "txnStateRollbacking", + int32(txnStateCommitting): "txnStateCommitting", + int32(txnStateFinshing): "txnStateFinshing", + int32(txnStateAborting): "txnStateAborting", + } + + xaStates = map[int32]string{ + int32(txnXAStateNone): "txnXAStateNone", + int32(txnXAStateStart): "txnXAStateStart", + int32(txnXAStateStartFinished): "txnXAStateStartFinished", + int32(txnXAStateEnd): "txnXAStateEnd", + int32(txnXAStateEndFinished): "txnXAStateEndFinished", + int32(txnXAStatePrepare): "txnXAStatePrepare", + int32(txnXAStatePrepareFinished): "txnXAStatePrepareFinished", + int32(txnXAStateCommit): "txnXAStateCommit", + int32(txnXAStateRollback): "txnXAStateRollback", + } +) + +// GetTxnzRows returns a list of TxnDetailzRow sorted by start time +func (tz *Txnz) GetTxnzRows() []TxnDetailzRow { + tz.mu.RLock() + rows := []TxnDetailzRow{} + for _, td := range tz.txnDetails { + state := "UNKNOW" + if s, ok := txnStates[td.txn.State()]; ok { + state = s + } + xaState := "NONE" + if s, ok := xaStates[td.txn.XaState()]; ok { + xaState = s + } + + row := TxnDetailzRow{ + Start: td.start, + Duration: time.Since(td.start), + TxnID: td.txnID, + XAID: td.txn.XID(), + State: state, + XaState: xaState, + } + if row.Duration < 10*time.Millisecond { + row.Color = "low" + } else if row.Duration < 100*time.Millisecond { + row.Color = "medium" + } else { + row.Color = "high" + } + rows = append(rows, row) + } + tz.mu.RUnlock() + sort.Sort(byTxStartTime(rows)) + return rows +} diff --git a/src/backend/txnz_test.go b/src/backend/txnz_test.go new file mode 100644 index 00000000..be41ac85 --- /dev/null +++ b/src/backend/txnz_test.go @@ -0,0 +1,69 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package backend + +import ( + "testing" + "time" + "xcontext" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestTxnz(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedb, txnMgr, backends, _, addrs, cleanup := MockTxnMgr(log, 2) + defer cleanup() + + querys := []*xcontext.QueryTuple{ + &xcontext.QueryTuple{Query: "select * from node1", Backend: addrs[0]}, + &xcontext.QueryTuple{Query: "select * from node2", Backend: addrs[1]}, + } + + fakedb.AddQueryDelay(querys[0].Query, result2, 10000) + fakedb.AddQueryDelay(querys[1].Query, result2, 10000) + + { + txn, err := txnMgr.CreateTxn(backends) + assert.Nil(t, err) + defer txn.Finish() + + qzRows := tz.GetTxnzRows() + assert.NotNil(t, qzRows) + + time.Sleep(30 * time.Millisecond) + qzRows = tz.GetTxnzRows() + assert.NotNil(t, qzRows) + + time.Sleep(100 * time.Millisecond) + qzRows = tz.GetTxnzRows() + assert.NotNil(t, qzRows) + } + + { + txn, err := txnMgr.CreateBackupTxn(backends[addrs[0]]) + assert.Nil(t, err) + defer txn.Finish() + + qzRows := tz.GetTxnzRows() + assert.NotNil(t, qzRows) + + time.Sleep(30 * time.Millisecond) + qzRows = tz.GetTxnzRows() + assert.NotNil(t, qzRows) + + time.Sleep(100 * time.Millisecond) + qzRows = tz.GetTxnzRows() + assert.NotNil(t, qzRows) + } +} diff --git a/src/binlog/binlog.go b/src/binlog/binlog.go new file mode 100644 index 00000000..bc0a3149 --- /dev/null +++ b/src/binlog/binlog.go @@ -0,0 +1,206 @@ +/* + * Radon + * + * Copyright (c) 2017 QingCloud.com. + * Code is licensed under the GPLv3. + * + */ + +package binlog + +import ( + "config" + "os" + "path" + "path/filepath" + "sync" + "time" + "xbase" + "xbase/sync2" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +const ( + prefix = "radon-" + extension = ".binlog" +) + +// Binlog tuple. +type Binlog struct { + log *xlog.Log + mu sync.RWMutex + wg sync.WaitGroup + id sync2.AtomicInt64 + done chan bool + conf *config.BinlogConfig + rfile xbase.RotateFile + binDir string + ioworker *IOWorker + sqlworkers map[int64]*SQLWorker + purgeTicker *time.Ticker +} + +// NewBinlog creates the new binlog tuple. +func NewBinlog(log *xlog.Log, conf *config.BinlogConfig) *Binlog { + return &Binlog{ + log: log, + conf: conf, + done: make(chan bool), + binDir: conf.LogDir, + ioworker: NewIOWorker(log, conf), + sqlworkers: make(map[int64]*SQLWorker, 64), + purgeTicker: time.NewTicker(time.Duration(time.Second * 300)), // 5 minutes. + rfile: xbase.NewRotateFile(conf.LogDir, prefix, extension, conf.MaxSize), + } +} + +// Init used to init the ioworker. +func (bin *Binlog) Init() error { + log := bin.log + + log.Info("binlog.init.conf:%+v", bin.conf) + defer log.Info("binlog.init.done") + + // Purge worker. + bin.wg.Add(1) + go func(bin *Binlog) { + defer bin.wg.Done() + bin.purge() + }(bin) + + // IO Worker. + return bin.ioworker.Init() +} + +func (bin *Binlog) addSQLWork(sqlworker *SQLWorker) { + bin.mu.Lock() + defer bin.mu.Unlock() + bin.id.Add(1) + id := bin.id.Get() + sqlworker.setID(id) + bin.sqlworkers[id] = sqlworker +} + +func (bin *Binlog) removeSQLWork(sqlworker *SQLWorker) { + bin.mu.Lock() + defer bin.mu.Unlock() + delete(bin.sqlworkers, sqlworker.id) +} + +// NewSQLWorker creates the new sql worker. +func (bin *Binlog) NewSQLWorker(ts int64) (*SQLWorker, error) { + sqlworker := NewSQLWorker(bin.log, bin.conf, ts) + if err := sqlworker.Init(); err != nil { + return nil, err + } + bin.addSQLWork(sqlworker) + return sqlworker, nil +} + +// CloseSQLWorker used to close the sqlworker. +func (bin *Binlog) CloseSQLWorker(sqlworker *SQLWorker) { + bin.removeSQLWork(sqlworker) + sqlworker.close() +} + +// LogEvent used to write the event to the bin. +func (bin *Binlog) LogEvent(typ string, schema string, sql string) { + bin.ioworker.LogEvent(typ, schema, sql) +} + +// Close used to close the bin. +func (bin *Binlog) Close() { + close(bin.done) + bin.wg.Wait() + bin.ioworker.Close() +} + +func (bin *Binlog) purge() { + defer bin.purgeTicker.Stop() + for { + select { + case <-bin.purgeTicker.C: + bin.doPurge() + case <-bin.done: + return + } + } +} + +func (bin *Binlog) doPurge() { + minName := "" + bin.mu.RLock() + for _, sqlworker := range bin.sqlworkers { + if minName == "" { + minName = sqlworker.RelayName() + } else { + if sqlworker.RelayName() < minName { + minName = sqlworker.RelayName() + } + } + } + bin.mu.RUnlock() + + if minName != "" { + bin.purgebinTo(minName) + } +} + +func (bin *Binlog) purgebinTo(name string) { + log := bin.log + + // name is empty + name = path.Base(name) + if name == "." { + return + } + + oldLogs, err := bin.rfile.GetOldLogInfos() + if err != nil { + log.Error("bin.purge.bin.to[%s].get.old.loginfos.error:%v", name, err) + return + } + for _, old := range oldLogs { + if old.Name < name { + os.Remove(filepath.Join(bin.binDir, old.Name)) + } + } +} + +// LastGTID returns the last event GTID. +func (bin *Binlog) LastGTID() int64 { + return bin.ioworker.GTID() +} + +// RelayInfo represents the relay sqlworker status. +type RelayInfo struct { + ID int64 + StartGTID int64 + RelayGTID int64 + LastWriteGTID int64 + Relaybin string + RelayPosition int64 + SecondBehinds int64 +} + +// RelayInfos returns all the sqlworker status. +func (bin *Binlog) RelayInfos() []RelayInfo { + bin.mu.RLock() + defer bin.mu.RUnlock() + lastGTID := bin.ioworker.GTID() + relayInfos := make([]RelayInfo, 0, 8) + for id, sqlworker := range bin.sqlworkers { + relayInfo := RelayInfo{ + ID: id, + StartGTID: sqlworker.SeekGTID(), + RelayGTID: sqlworker.RelayGTID(), + LastWriteGTID: lastGTID, + Relaybin: sqlworker.RelayName(), + RelayPosition: sqlworker.RelayPosition(), + SecondBehinds: (lastGTID - sqlworker.RelayGTID()) / int64(time.Second), + } + relayInfos = append(relayInfos, relayInfo) + } + return relayInfos +} diff --git a/src/binlog/binlog_test.go b/src/binlog/binlog_test.go new file mode 100644 index 00000000..4747f813 --- /dev/null +++ b/src/binlog/binlog_test.go @@ -0,0 +1,109 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package binlog + +import ( + "config" + "os" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestBinlog(t *testing.T) { + os.RemoveAll(mockDir) + defer leaktest.Check(t)() + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + conf := &config.BinlogConfig{ + MaxSize: 102400, + LogDir: mockDir, + } + + binlog := NewBinlog(log, conf) + err := binlog.Init() + assert.Nil(t, err) + defer binlog.Close() + + ts := time.Now().UnixNano() + n := 10 + schema := "radon" + for i := 0; i < n; i++ { + query := "select a,b,cd from table1 where a=b and c=d and e=d group by id order\n by desc" + binlog.LogEvent("SELECT", schema, query) + } + time.Sleep(time.Second) + + sqlworker, err := binlog.NewSQLWorker(ts) + assert.Nil(t, err) + defer binlog.CloseSQLWorker(sqlworker) + + relayInfos := binlog.RelayInfos() + assert.True(t, len(relayInfos) > 0) +} + +func TestBinlogPurge(t *testing.T) { + os.RemoveAll(mockDir) + defer leaktest.Check(t)() + + log := xlog.NewStdLog(xlog.Level(xlog.ERROR)) + conf := &config.BinlogConfig{ + MaxSize: 102400, + LogDir: mockDir, + } + + binlog := NewBinlog(log, conf) + err := binlog.Init() + assert.Nil(t, err) + defer binlog.Close() + + n := 10000 + schema := "radon" + for i := 0; i < n; i++ { + query := "select a,b,cd from table1 where a=b and c=d and e=d group by id order\n by desc" + binlog.LogEvent("SELECT", schema, query) + } + time.Sleep(time.Second) + + logs1, _ := binlog.rfile.GetOldLogInfos() + file0 := logs1[len(logs1)/2-1] + file1 := logs1[len(logs1)/2] + file2 := logs1[len(logs1)/2+1] + + // sqlwoker1. + { + sqlworker1, err := binlog.NewSQLWorker(file1.Ts) + assert.Nil(t, err) + defer binlog.CloseSQLWorker(sqlworker1) + assert.Equal(t, file1.Name, sqlworker1.RelayName()) + } + + // sqlwoker2. + { + sqlworker2, err := binlog.NewSQLWorker(file2.Ts) + assert.Nil(t, err) + defer binlog.CloseSQLWorker(sqlworker2) + assert.Equal(t, file2.Name, sqlworker2.RelayName()) + } + + { + // Purge. + binlog.doPurge() + + // Check old binlogs. + logs1, _ = binlog.rfile.GetOldLogInfos() + for _, logInfo := range logs1 { + assert.True(t, logInfo.Name > file0.Name) + } + binlog.LastGTID() + } +} diff --git a/src/binlog/event.go b/src/binlog/event.go new file mode 100644 index 00000000..5136a5c6 --- /dev/null +++ b/src/binlog/event.go @@ -0,0 +1,96 @@ +/* + * Radon + * + * Copyright (c) 2017 QingCloud.com. + * Code is licensed under the GPLv3. + * + */ + +package binlog + +import ( + "fmt" + "hash/crc32" + + "github.com/xelabs/go-mysqlstack/common" +) + +// Event binlog event. +type Event struct { + // An identifier that describes the event type. + Type string + Schema string + Query string + Version uint16 + // The GTID of this event. + Timestamp uint64 + // The name of the file that is being listed. + LogName string + // The position at which the event occurs. + Pos int64 + // The position at which the next event begins, which is equal to Pos plus the size of the event. + EndLogPos int64 +} + +const ( + v1 = 1 +) + +func packEventv1(e *Event) []byte { + crc32 := crc32.ChecksumIEEE(common.StringToBytes(e.Query)) + + buf := common.NewBuffer(256) + buf.WriteU16(v1) + buf.WriteU64(e.Timestamp) + buf.WriteLenEncodeString(e.Type) + buf.WriteLenEncodeString(e.Schema) + buf.WriteLenEncodeString(e.Query) + buf.WriteU32(crc32) + return buf.Datas() +} + +func unpackEvent(datas []byte) (*Event, error) { + var err error + e := &Event{} + + buf := common.ReadBuffer(datas) + e.Version, err = buf.ReadU16() + if err != nil { + return nil, fmt.Errorf("event.read.version.error:%v", err) + } + switch e.Version { + case v1: + // GTID. + if e.Timestamp, err = buf.ReadU64(); err != nil { + return nil, err + } + + // Typee. + if e.Type, err = buf.ReadLenEncodeString(); err != nil { + return nil, err + } + + // Schema. + if e.Schema, err = buf.ReadLenEncodeString(); err != nil { + return nil, err + } + + // Query. + if e.Query, err = buf.ReadLenEncodeString(); err != nil { + return nil, err + } + + // CRC32. + var crc1, crc2 uint32 + crc1 = crc32.ChecksumIEEE(common.StringToBytes(e.Query)) + if crc2, err = buf.ReadU32(); err != nil { + return nil, err + } + if crc1 != crc2 { + return nil, fmt.Errorf("event.crc32.check[%v].read[%v].query[%v]", crc1, crc2, e.Query) + } + return e, nil + default: + return nil, fmt.Errorf("event.unknow.version[%v]", e.Version) + } +} diff --git a/src/binlog/event_test.go b/src/binlog/event_test.go new file mode 100644 index 00000000..f8c3ef75 --- /dev/null +++ b/src/binlog/event_test.go @@ -0,0 +1,102 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package binlog + +import ( + "log" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/common" +) + +func TestEventv1(t *testing.T) { + e := &Event{ + Type: "INSERT", + Query: "insert into t1 values(1)", + } + + datas := packEventv1(e) + e1, err := unpackEvent(datas) + assert.Nil(t, err) + assert.Equal(t, e.Query, e1.Query) + log.Printf("...%+v", e1) +} + +func TestEventv1Error(t *testing.T) { + // version error. + { + buf := common.NewBuffer(128) + buf.WriteU8(v1) + _, err := unpackEvent(buf.Datas()) + assert.NotNil(t, err) + } + + // timestamp error. + { + buf := common.NewBuffer(128) + buf.WriteU16(v1) + buf.WriteLenEncodeString("xx") + _, err := unpackEvent(buf.Datas()) + assert.NotNil(t, err) + } + + // typ error. + { + buf := common.NewBuffer(128) + buf.WriteU16(v1) + buf.WriteU64(0) + buf.WriteU64(65535) + _, err := unpackEvent(buf.Datas()) + assert.NotNil(t, err) + } + + // query error. + { + buf := common.NewBuffer(128) + buf.WriteU16(v1) + buf.WriteU64(0) + buf.WriteLenEncodeString("INSERT") + buf.WriteU64(65535) + _, err := unpackEvent(buf.Datas()) + assert.NotNil(t, err) + } + + // crc error. + { + buf := common.NewBuffer(128) + buf.WriteU16(v1) + buf.WriteU64(0) + buf.WriteLenEncodeString("INSERT") + buf.WriteLenEncodeString("INSERT") + buf.WriteU8(124) + _, err := unpackEvent(buf.Datas()) + assert.NotNil(t, err) + } + + // crc check error. + { + buf := common.NewBuffer(128) + buf.WriteU16(v1) + buf.WriteU64(0) + buf.WriteLenEncodeString("INSERT") + buf.WriteLenEncodeString("INSERT") + buf.WriteU32(124) + _, err := unpackEvent(buf.Datas()) + assert.NotNil(t, err) + } + + // unknow version. + { + buf := common.NewBuffer(128) + buf.WriteU16(125) + _, err := unpackEvent(buf.Datas()) + assert.NotNil(t, err) + } +} diff --git a/src/binlog/info.go b/src/binlog/info.go new file mode 100644 index 00000000..658abb4f --- /dev/null +++ b/src/binlog/info.go @@ -0,0 +1,93 @@ +/* + * Radon + * + * Copyright (c) 2017 QingCloud.com. + * Code is licensed under the GPLv3. + * + */ + +package binlog + +import ( + "config" + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "xbase/sync2" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +type info struct { + Binlog string `json:"binlog"` + Timestamp int64 `json:"gtid"` +} + +// Info tuple. +type Info struct { + log *xlog.Log + file *os.File + binDir string + infoFile string + currTs sync2.AtomicInt64 + currBin sync2.AtomicString +} + +// NewInfo returns info tuple. +func NewInfo(log *xlog.Log, conf *config.BinlogConfig, fileName string) *Info { + return &Info{ + log: log, + binDir: conf.LogDir, + infoFile: filepath.Join(conf.LogDir, fileName), + } +} + +// Init used to init the relay. +func (inf *Info) Init() error { + f, err := os.OpenFile(inf.infoFile, os.O_WRONLY|os.O_CREATE, os.ModePerm) + if err != nil { + return err + } + inf.file = f + return nil +} + +// Sync used to sync the ts to the relay file. +func (inf *Info) Sync(binlog string, ts int64) error { + info := &info{Binlog: binlog, Timestamp: ts} + jsons, err := info.MarshalJSON() + if err != nil { + return err + } + inf.file.Truncate(0) + _, err = inf.file.WriteAt(jsons, 0) + inf.currTs.Set(ts) + inf.currBin.Set(binlog) + return err +} + +// ReadTs used to get the ts from the relay file. +func (inf *Info) ReadTs() (int64, error) { + info := &info{} + buf, err := ioutil.ReadFile(inf.infoFile) + if err != nil { + return 0, err + } + + if len(buf) > 0 { + err = json.Unmarshal(buf, info) + if err != nil { + return 0, err + } + } + return info.Timestamp, nil +} + +// Close used to close the file of relay. +func (inf *Info) Close() { + log := inf.log + inf.file.Sync() + inf.file.Close() + log.Info("info.close.last[binlog:%v, ts:%v]", inf.currBin.Get(), inf.currTs.Get()) +} diff --git a/src/binlog/info_easyjson.go b/src/binlog/info_easyjson.go new file mode 100644 index 00000000..4203fb74 --- /dev/null +++ b/src/binlog/info_easyjson.go @@ -0,0 +1,84 @@ +// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT. + +package binlog + +import ( + json "encoding/json" + easyjson "github.com/mailru/easyjson" + jlexer "github.com/mailru/easyjson/jlexer" + jwriter "github.com/mailru/easyjson/jwriter" +) + +// suppress unused package warning +var ( + _ *json.RawMessage + _ *jlexer.Lexer + _ *jwriter.Writer + _ easyjson.Marshaler +) + +func easyjsonDdc53814DecodeBinlog(in *jlexer.Lexer, out *info) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeString() + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "binlog": + out.Binlog = string(in.String()) + case "gtid": + out.Timestamp = int64(in.Int64()) + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonDdc53814EncodeBinlog(out *jwriter.Writer, in info) { + out.RawByte('{') + first := true + _ = first + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"binlog\":") + out.String(string(in.Binlog)) + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"gtid\":") + out.Int64(int64(in.Timestamp)) + out.RawByte('}') +} + +// MarshalJSON supports json.Marshaler interface +func (v info) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + easyjsonDdc53814EncodeBinlog(&w, v) + return w.Buffer.BuildBytes(), w.Error +} + +// UnmarshalJSON supports json.Unmarshaler interface +func (v *info) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + easyjsonDdc53814DecodeBinlog(&r, v) + return r.Error() +} diff --git a/src/binlog/info_test.go b/src/binlog/info_test.go new file mode 100644 index 00000000..aca662fe --- /dev/null +++ b/src/binlog/info_test.go @@ -0,0 +1,63 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package binlog + +import ( + "config" + "fmt" + "os" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestInfo(t *testing.T) { + defer leaktest.Check(t)() + os.RemoveAll(mockDir) + os.MkdirAll(mockDir, os.ModePerm) + + log := xlog.NewStdLog(xlog.Level(xlog.INFO)) + conf := &config.BinlogConfig{ + MaxSize: 102400, + LogDir: mockDir, + } + + ts := time.Now().UnixNano() + { + relay := NewInfo(log, conf, "relay-log.info") + err := relay.Init() + assert.Nil(t, err) + defer relay.Close() + + n := 100000 + now := time.Now() + for i := 0; i < n; i++ { + ts = time.Now().UnixNano() + relay.Sync("xx", ts) + } + took := time.Since(now) + fmt.Printf(" LOOP\t%v COST %v, avg:%v/s\n", n, took, (int64(n)/(took.Nanoseconds()/1e6))*1000) + + ts1, _ := relay.ReadTs() + assert.Equal(t, ts, ts1) + } + + // + { + relay := NewInfo(log, conf, "relay-log.info") + err := relay.Init() + assert.Nil(t, err) + defer relay.Close() + ts1, _ := relay.ReadTs() + assert.Equal(t, ts1, ts) + } +} diff --git a/src/binlog/io.go b/src/binlog/io.go new file mode 100644 index 00000000..aaed224e --- /dev/null +++ b/src/binlog/io.go @@ -0,0 +1,118 @@ +/* + * Radon + * + * Copyright (c) 2017 QingCloud.com. + * Code is licensed under the GPLv3. + * + */ + +package binlog + +import ( + "config" + "os" + "sync" + "time" + "xbase" + + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + binlogInfoFile = "bin-log.info" +) + +// IOWorker tuple. +type IOWorker struct { + log *xlog.Log + info *Info + rfile xbase.RotateFile + binDir string + maxSize int + queue chan *Event + wg sync.WaitGroup +} + +// NewIOWorker creates the new IOWorker. +func NewIOWorker(log *xlog.Log, conf *config.BinlogConfig) *IOWorker { + return &IOWorker{ + log: log, + binDir: conf.LogDir, + maxSize: conf.MaxSize, + queue: make(chan *Event, 1), + info: NewInfo(log, conf, binlogInfoFile), + rfile: xbase.NewRotateFile(conf.LogDir, prefix, extension, conf.MaxSize), + } +} + +// GTID returns the last event Timestamp. +func (io *IOWorker) GTID() int64 { + log := io.log + ts, err := io.info.ReadTs() + if err != nil { + log.Error("ioworker.bin.log.info.read.ts.error:%v", err) + return 0 + } + return ts +} + +// Init used to create the log dir. +func (io *IOWorker) Init() error { + log := io.log + log.Info("binlog.ioworker.init.bindir[%v]", io.binDir) + if err := os.MkdirAll(io.binDir, 0744); err != nil { + return err + } + if err := io.info.Init(); err != nil { + return err + } + + io.wg.Add(1) + go func(io *IOWorker) { + defer io.wg.Done() + io.eventConsumer() + }(io) + log.Info("binlog.ioworker.init.done") + return nil +} + +func (io *IOWorker) eventConsumer() { + for e := range io.queue { + io.writeEvent(e) + } +} + +func (io *IOWorker) writeEvent(e *Event) { + log := io.log + datas := packEventv1(e) + + buf := common.NewBuffer(256) + buf.WriteU32(uint32(len(datas))) + buf.WriteBytes(datas) + if _, err := io.rfile.Write(buf.Datas()); err != nil { + log.Panic("binlog.ioworker.write.event[query:%v].error:%v", e.Query, err) + } + io.info.Sync(io.rfile.Name(), int64(e.Timestamp)) +} + +// LogEvent used to write the query to binary log. +func (io *IOWorker) LogEvent(typ string, schema string, query string) { + io.queue <- &Event{ + Type: typ, + Schema: schema, + Query: query, + Timestamp: uint64(time.Now().UTC().UnixNano()), + } +} + +// Close used to close the io worker. +func (io *IOWorker) Close() { + io.log.Info("ioworker.prepare.to.close") + close(io.queue) + io.wg.Wait() + io.rfile.Sync() + io.rfile.Close() + io.info.Close() + io.log.Info("ioworker.closed") +} diff --git a/src/binlog/io_test.go b/src/binlog/io_test.go new file mode 100644 index 00000000..58f18bb3 --- /dev/null +++ b/src/binlog/io_test.go @@ -0,0 +1,98 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package binlog + +import ( + "config" + "fmt" + "os" + "sync" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestIOWorker(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + conf := &config.BinlogConfig{ + MaxSize: 102400, + LogDir: "/tmp/radon/test/binlog", + } + + os.RemoveAll(conf.LogDir) + ioworker := NewIOWorker(log, conf) + err := ioworker.Init() + assert.Nil(t, err) + defer ioworker.Close() + + n := 10000 + schema := "radon" + for i := 0; i < n; i++ { + query := "select a,b,cd from table1 where a=b and c=d and e=d group by id order\n by desc" + ioworker.LogEvent("SELECT", schema, query) + } +} + +func TestIOWorkerMultiThread(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + conf := &config.BinlogConfig{ + MaxSize: 1024 * 1024, + LogDir: "/tmp/radon/test/binlog", + } + os.RemoveAll(conf.LogDir) + ioworker := NewIOWorker(log, conf) + err := ioworker.Init() + assert.Nil(t, err) + defer ioworker.Close() + + schema := "radon" + var wait sync.WaitGroup + for k := 0; k < 10; k++ { + wait.Add(1) + go func(ioworker *IOWorker) { + n := 10000 + for i := 0; i < n; i++ { + query := "select a,b,cd from table1 where a=b and c=d and e=d group by id order\n by desc" + ioworker.LogEvent("SELECT", schema, query) + } + wait.Done() + }(ioworker) + } + wait.Wait() +} + +func TestIOWorkerBench(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + conf := &config.BinlogConfig{ + MaxSize: 1024 * 1024 * 100, + LogDir: "/tmp/radon/test/binlog", + } + os.RemoveAll(conf.LogDir) + ioworker := NewIOWorker(log, conf) + err := ioworker.Init() + assert.Nil(t, err) + defer ioworker.Close() + + { + N := 100000 + schema := "radon" + now := time.Now() + for i := 0; i < N; i++ { + query := "select a,b,cd from table1 where a=b and c=d and e=d group by id order\n by desc" + ioworker.LogEvent("SELECT", schema, query) + } + took := time.Since(now) + fmt.Printf(" LOOP\t%v COST %v, avg:%v/s\n", N, took, (int64(N)/(took.Nanoseconds()/1e6))*1000) + } +} diff --git a/src/binlog/mock.go b/src/binlog/mock.go new file mode 100644 index 00000000..5092a342 --- /dev/null +++ b/src/binlog/mock.go @@ -0,0 +1,48 @@ +/* + * Radon + * + * Copyright (c) 2017 QingCloud.com. + * Code is licensed under the GPLv3. + * + */ + +package binlog + +import ( + "errors" + "xbase" +) + +var ( + _ xbase.RotateFile = &mockRotateFile{} +) + +type mockRotateFile struct { +} + +func (mf *mockRotateFile) Write(b []byte) (int, error) { + return 0, errors.New("mock.rfile.write.error") +} + +func (mf *mockRotateFile) Sync() error { + return nil +} + +func (mf *mockRotateFile) Close() { +} + +func (mf *mockRotateFile) Name() string { + return "" +} + +func (mf *mockRotateFile) GetOldLogInfos() ([]xbase.LogInfo, error) { + return nil, errors.New("mock.rfile.GetOldLogInfos.error") +} + +func (mf *mockRotateFile) GetNextLogInfo(logName string) (xbase.LogInfo, error) { + return xbase.LogInfo{}, errors.New("mock.rfile.GetOldLogInfos.error") +} + +func (mf *mockRotateFile) GetCurrLogInfo(ts int64) (xbase.LogInfo, error) { + return xbase.LogInfo{}, errors.New("mock.rfile.GetCurrLogInfo.error") +} diff --git a/src/binlog/sql.go b/src/binlog/sql.go new file mode 100644 index 00000000..386ec622 --- /dev/null +++ b/src/binlog/sql.go @@ -0,0 +1,237 @@ +/* + * Radon + * + * Copyright (c) 2017 QingCloud.com. + * Code is licensed under the GPLv3. + * + */ + +package binlog + +import ( + "config" + "io" + "os" + "path" + "time" + "xbase" + "xbase/sync2" + + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/xlog" +) + +// SQLWorker tuple. +type SQLWorker struct { + log *xlog.Log + rfile xbase.RotateFile + + // gtid is timestamp, + // (time.Now().UTC().UnixNano(), format as '1514254947594569594' + binDir string + currFile *os.File + id int64 + seekTimestamp int64 + stop sync2.AtomicBool + currPos sync2.AtomicInt64 + currTimestamp sync2.AtomicInt64 + currBinName sync2.AtomicString +} + +// NewSQLWorker creates the new SQLWorker. +func NewSQLWorker(log *xlog.Log, conf *config.BinlogConfig, ts int64) *SQLWorker { + return &SQLWorker{ + log: log, + binDir: conf.LogDir, + seekTimestamp: ts, + rfile: xbase.NewRotateFile(conf.LogDir, prefix, extension, conf.MaxSize), + } +} + +func (sql *SQLWorker) close() { + sql.stop.Set(true) + if sql.currFile != nil { + sql.currFile.Close() + } + sql.rfile.Close() + sql.currPos.Set(0) + sql.currBinName.Set("") + sql.log.Info("sqlworker.closed") +} + +// RelayName returns the current binlog which are read. +func (sql *SQLWorker) RelayName() string { + return sql.currBinName.Get() +} + +// RelayPosition returns the current binlog position which are read. +func (sql *SQLWorker) RelayPosition() int64 { + return sql.currPos.Get() +} + +// RelayGTID returns the last event timestamp have read. +func (sql *SQLWorker) RelayGTID() int64 { + return sql.currTimestamp.Get() +} + +// SeekGTID returns the timestamp which we started. +func (sql *SQLWorker) SeekGTID() int64 { + return sql.seekTimestamp +} + +func (sql *SQLWorker) setID(id int64) { + sql.id = id +} + +func (sql *SQLWorker) readOneEvent() (*Event, error) { + // No any binlog files in binlog dir, we return the EOF error. + if sql.currBinName.Get() == "" { + return nil, io.EOF + } + + pos := sql.currPos.Get() + // 1. Read the event length datas, 4bytes. + lenDatas := make([]byte, 4) + _, err := sql.currFile.ReadAt(lenDatas, sql.currPos.Get()) + if err != nil { + return nil, err + } + + buf := common.ReadBuffer(lenDatas) + len, err := buf.ReadU32() + if err != nil { + return nil, err + } + + // 2. Read the event datas. + datas := make([]byte, len) + _, err = sql.currFile.ReadAt(datas, sql.currPos.Get()+4) + if err != nil { + return nil, err + } + + // 3. Unpack the event. + event, err := unpackEvent(datas) + if err != nil { + return nil, err + } + + // Set the position at last. + sql.currPos.Add(4) + sql.currPos.Add(int64(len)) + endLogPos := sql.currPos.Get() + + event.Pos = pos + event.LogName = sql.currBinName.Get() + event.EndLogPos = endLogPos + return event, nil +} + +func (sql *SQLWorker) seekToEvent(ts int64) error { + prevPos := sql.currPos.Get() + for !sql.stop.Get() { + event, err := sql.readOneEvent() + if err != nil { + // We have got the end of the current binlog. + if err == io.EOF { + return nil + } + return err + } + // Find the first larger event, we should stop. + if event.Timestamp > uint64(ts) { + // Reset the postion to the previous. + sql.currPos.Set(prevPos) + return nil + } + // Reset the position. + prevPos = sql.currPos.Get() + } + return nil +} + +// Init used to init the sql current position and seek to the right event. +func (sql *SQLWorker) Init() error { + log := sql.log + if sql.currBinName.Get() == "" { + currLogInfo, err := sql.rfile.GetCurrLogInfo(sql.seekTimestamp) + if err != nil { + log.Error("binlog.sql.init.get.current[seekts:%v].loginfo.error:%v", sql.seekTimestamp, err) + return err + } + + if currLogInfo.Name != "" { + log.Info("sqlworker.init.currlog[%v].seekts[%v, %v]", currLogInfo.Name, sql.seekTimestamp, time.Unix(0, sql.seekTimestamp)) + file, err := os.Open(path.Join(sql.binDir, currLogInfo.Name)) + if err != nil { + return err + } + sql.currPos.Set(0) + sql.currFile = file + sql.currBinName.Set(currLogInfo.Name) + return sql.seekToEvent(sql.seekTimestamp) + } + } + return nil +} + +func (sql *SQLWorker) checkNextFileExists() bool { + log := sql.log + logInfo, err := sql.rfile.GetNextLogInfo(sql.currBinName.Get()) + if err != nil { + log.Error("binlog.sql.get.next.log.curr[%s].error:%v", sql.currBinName.Get(), err) + return false + } + + if logInfo.Name == "" { + return false + } + + if logInfo.Name != sql.currBinName.Get() { + // Here, we make sure the next file is exists, but we check that whether a new write(stale) to the sql.currBinName binlog file. + if sql.currBinName.Get() != "" { + fileInfo, err := os.Lstat(path.Join(sql.binDir, sql.currBinName.Get())) + if err != nil { + log.Error("binlog.sql.check.next.file.stat[%s].error:%v", sql.currBinName.Get(), err) + return false + } + size := fileInfo.Size() + if sql.currPos.Get() < size { + log.Warning("binlog.sql.found.stale.write.size[%v].currpos[%v]", size, sql.currPos.Get()) + return false + } + } + + // Rotate to the next binlog file. + file, err := os.Open(path.Join(sql.binDir, logInfo.Name)) + if err != nil { + log.Error("binlog.sql.check.next.file.error:%v", err) + return false + } + sql.currFile.Close() + sql.currFile = file + sql.currPos.Set(0) + sql.currBinName.Set(logInfo.Name) + return true + } + // We don't have next the binlog file. + return false +} + +// NextEvent used to read the next event. +// If we get the end of the current binlog file and don't have next binlog, just returns (nil,nil). +func (sql *SQLWorker) NextEvent() (*Event, error) { + event, err := sql.readOneEvent() + if err != nil { + if err == io.EOF { + if !sql.checkNextFileExists() { + return nil, nil + } + // Changed to the next binlog file, read next event from the new file. + return sql.NextEvent() + } + return nil, err + } + sql.currTimestamp.Set(int64(event.Timestamp)) + return event, nil +} diff --git a/src/binlog/sql_test.go b/src/binlog/sql_test.go new file mode 100644 index 00000000..ef17f3a9 --- /dev/null +++ b/src/binlog/sql_test.go @@ -0,0 +1,331 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package binlog + +import ( + "config" + "math/rand" + "os" + "sync" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + mockDir = "/tmp/radon/test/binlog" +) + +func TestSQLWorker(t *testing.T) { + os.RemoveAll(mockDir) + defer leaktest.Check(t)() + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + conf := &config.BinlogConfig{ + MaxSize: 102400, + LogDir: mockDir, + } + + ts := time.Now().UnixNano() + ioworker := NewIOWorker(log, conf) + err := ioworker.Init() + assert.Nil(t, err) + defer ioworker.Close() + + n := 10000 + schema := "radon" + for i := 0; i < n; i++ { + query := "select a,b,cd from table1 where a=b and c=d and e=d group by id order\n by desc" + ioworker.LogEvent("SELECT", schema, query) + } + time.Sleep(time.Second) + + sqlworker := NewSQLWorker(log, conf, ts) + err = sqlworker.Init() + assert.Nil(t, err) + defer sqlworker.close() + + got := 0 + for { + event, err := sqlworker.NextEvent() + if err != nil { + break + } + if event == nil { + break + } + got++ + } + assert.Equal(t, n, got) +} + +func TestSQLWorkerInitError(t *testing.T) { + os.RemoveAll(mockDir) + defer leaktest.Check(t)() + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + conf := &config.BinlogConfig{ + MaxSize: 102400, + LogDir: mockDir, + } + + sqlworker := NewSQLWorker(log, conf, 0) + sqlworker.rfile = &mockRotateFile{} + err := sqlworker.Init() + assert.NotNil(t, err) + defer sqlworker.close() + + // For mock.go code coverage. + { + sqlworker.rfile.Write([]byte{0x00}) + sqlworker.rfile.Sync() + sqlworker.rfile.GetOldLogInfos() + sqlworker.rfile.GetNextLogInfo("") + } +} + +func TestSQLWorkerNoAnyBinlogFiles(t *testing.T) { + os.RemoveAll(mockDir) + defer leaktest.Check(t)() + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + conf := &config.BinlogConfig{ + MaxSize: 102400, + LogDir: mockDir, + } + + ts := time.Now().UnixNano() + ioworker := NewIOWorker(log, conf) + err := ioworker.Init() + assert.Nil(t, err) + defer ioworker.Close() + + sqlworker := NewSQLWorker(log, conf, ts) + err = sqlworker.Init() + assert.Nil(t, err) + defer sqlworker.close() + + // Writes events. + n := 100 + schema := "radon" + for i := 0; i < n; i++ { + query := "select a,b,cd from table1 where a=b and c=d and e=d group by id order\n by desc" + ioworker.LogEvent("SELECT", schema, query) + } + time.Sleep(500 * time.Millisecond) + + // Reads events. + got := 0 + for { + event, err := sqlworker.NextEvent() + if err != nil { + break + } + if event == nil { + break + } + got++ + } + assert.Equal(t, n, got) +} + +func TestSQLWorkerSeekEvent(t *testing.T) { + os.RemoveAll(mockDir) + defer leaktest.Check(t)() + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + conf := &config.BinlogConfig{ + MaxSize: 102400, + LogDir: mockDir, + } + os.RemoveAll(conf.LogDir) + + ioworker := NewIOWorker(log, conf) + err := ioworker.Init() + assert.Nil(t, err) + + n := 10 + schema := "radon" + for i := 0; i < n; i++ { + query := "select a,b,cd from table1 where a=b and c=d and e=d group by id order\n by desc" + ioworker.LogEvent("SELECT", schema, query) + } + ts := time.Now().UnixNano() + ioworker.Close() + time.Sleep(time.Second) + + { + sqlworker := NewSQLWorker(log, conf, ts) + err = sqlworker.Init() + assert.Nil(t, err) + defer sqlworker.close() + } + + { + ts = time.Now().UnixNano() + sqlworker := NewSQLWorker(log, conf, ts) + err = sqlworker.Init() + assert.Nil(t, err) + defer sqlworker.close() + } +} + +func TestSQLWorkerAndIOWorkerAsync(t *testing.T) { + os.RemoveAll(mockDir) + defer leaktest.Check(t)() + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + conf := &config.BinlogConfig{ + MaxSize: 1024 * 1024, + LogDir: mockDir, + } + + var wg sync.WaitGroup + writeDone := make(chan bool) + os.RemoveAll(conf.LogDir) + ts := time.Now().UnixNano() + + wg.Add(1) + writes := 100000 + go func() { + defer wg.Done() + ioworker := NewIOWorker(log, conf) + err := ioworker.Init() + assert.Nil(t, err) + defer ioworker.Close() + + schema := "radon" + sleepParts := writes - writes/10 + for i := 0; i < writes; i++ { + query := "select a,b,cd from table1 where a=b and c=d and e=d group by id order\n by desc" + ioworker.LogEvent("SELECT", schema, query) + if i > sleepParts { + time.Sleep(time.Duration(rand.Intn(500)) * time.Microsecond) + } + } + writeDone <- true + }() + time.Sleep(time.Second) + + wg.Add(1) + reads := 0 + go func() { + defer wg.Done() + sqlworker := NewSQLWorker(log, conf, ts) + err := sqlworker.Init() + assert.Nil(t, err) + defer sqlworker.close() + + done := false + for !done { + select { + case <-writeDone: + done = true + default: + } + event, err := sqlworker.NextEvent() + assert.Nil(t, err) + if event == nil { + time.Sleep(time.Duration(rand.Intn(100)) * time.Microsecond) + } else { + reads++ + } + //log.Info("read.event:%+v", event) + } + }() + wg.Wait() + assert.Equal(t, writes, reads) +} + +func TestSQLWorkerSeekFromSecond(t *testing.T) { + os.RemoveAll(mockDir) + defer leaktest.Check(t)() + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + conf := &config.BinlogConfig{ + MaxSize: 1024 * 1024, + LogDir: mockDir, + } + + os.RemoveAll(conf.LogDir) + ts := time.Now().UnixNano() + writes := 100 + { + ioworker := NewIOWorker(log, conf) + err := ioworker.Init() + assert.Nil(t, err) + defer ioworker.Close() + + schema := "radon" + for i := 0; i < writes; i++ { + query := "select a,b,cd from table1 where a=b and c=d and e=d group by id order\n by desc" + ioworker.LogEvent("SELECT", schema, query) + if i == 0 { + ts = time.Now().UnixNano() + } + } + } + + reads := 0 + { + sqlworker := NewSQLWorker(log, conf, ts) + err := sqlworker.Init() + assert.Nil(t, err) + defer sqlworker.close() + + for { + event, err := sqlworker.NextEvent() + assert.Nil(t, err) + if event == nil { + break + } else { + reads++ + } + } + } + assert.True(t, reads <= (writes-1)) +} + +func TestSQLWorkerStaleWrite(t *testing.T) { + os.RemoveAll(mockDir) + defer leaktest.Check(t)() + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + conf := &config.BinlogConfig{ + MaxSize: 1024, + LogDir: mockDir, + } + + ts := time.Now().UnixNano() + ioworker := NewIOWorker(log, conf) + err := ioworker.Init() + assert.Nil(t, err) + defer ioworker.Close() + + n := 256 + schema := "radon" + for i := 0; i < n; i++ { + query := "select a,b,cd from table1 where a=b and c=d and e=d group by id order\n by desc" + ioworker.LogEvent("SELECT", schema, query) + } + time.Sleep(time.Second) + + sqlworker := NewSQLWorker(log, conf, ts) + err = sqlworker.Init() + assert.Nil(t, err) + defer sqlworker.close() + + _, err = sqlworker.NextEvent() + assert.Nil(t, err) + exists := sqlworker.checkNextFileExists() + assert.False(t, exists) +} diff --git a/src/build/info.go b/src/build/info.go new file mode 100644 index 00000000..aa7cc0c4 --- /dev/null +++ b/src/build/info.go @@ -0,0 +1,41 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package build + +import ( + "fmt" + "runtime" +) + +var ( + tag = "unknown" // tag of this build + git string // git hash + time string // build time + platform = fmt.Sprintf("%s %s", runtime.GOOS, runtime.GOARCH) +) + +// Info tuple. +type Info struct { + Tag string + Time string + Git string + GoVersion string + Platform string +} + +// GetInfo returns the info. +func GetInfo() Info { + return Info{ + GoVersion: runtime.Version(), + Tag: tag, + Time: time, + Git: git, + Platform: platform, + } +} diff --git a/src/build/ldflags.sh b/src/build/ldflags.sh new file mode 100755 index 00000000..a5033c82 --- /dev/null +++ b/src/build/ldflags.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env sh + +set -eu + +cd "$(dirname "${0}")/.." + +echo '-X "build.tag='$(git describe --tags)'"' \ + '-X "build.time='$(date -u '+%Y/%m/%d %H:%M:%S')'"' \ + '-X "build.git='$(git rev-parse --short HEAD)'"' diff --git a/src/cli/cli.go b/src/cli/cli.go new file mode 100644 index 00000000..b4e9fb61 --- /dev/null +++ b/src/cli/cli.go @@ -0,0 +1,46 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package main + +import ( + "cli/cmd" + "fmt" + "os" + + "github.com/spf13/cobra" +) + +const ( + cliName = "radoncli" + cliDescription = "A simple command line client for radon" +) + +var ( + rootCmd = &cobra.Command{ + Use: cliName, + Short: cliDescription, + SuggestFor: []string{"radoncli"}, + } +) + +func init() { + rootCmd.AddCommand(cmd.NewVersionCommand()) + rootCmd.AddCommand(cmd.NewReadonlyCommand()) + rootCmd.AddCommand(cmd.NewTwopcCommand()) + rootCmd.AddCommand(cmd.NewDebugCommand()) + rootCmd.AddCommand(cmd.NewRelayCommand()) + rootCmd.AddCommand(cmd.NewBackupCommand()) +} + +func main() { + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(-1) + } +} diff --git a/src/cli/cmd/backup.go b/src/cli/cmd/backup.go new file mode 100644 index 00000000..fa6b07dd --- /dev/null +++ b/src/cli/cmd/backup.go @@ -0,0 +1,92 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package cmd + +import ( + "encoding/json" + "fmt" + "xbase" + + "github.com/spf13/cobra" + streamer "github.com/xelabs/go-mydumper/src/common" +) + +var ( + database = "" + radonPort = 3306 + backupEngine = "tokudb" +) + +func NewBackupCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "backup", + Short: "rebuild the backup datas", + } + cmd.AddCommand(NewBackupRebuildCommand()) + return cmd +} + +func NewBackupRebuildCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "rebuild", + Example: "rebuild --database=DB", + Run: backupRebuildCommand, + } + cmd.PersistentFlags().IntVar(&radonPort, "radon-port", 3306, "--radon-port=[port]") + cmd.PersistentFlags().StringVar(&backupEngine, "backup-engine", "tokudb", "--backup-engine=[engine]") + cmd.PersistentFlags().StringVar(&database, "database", "", "--database=[db]") + return cmd +} + +func backupRebuildCommand(cmd *cobra.Command, args []string) { + if database == "" { + log.Panicf("database.cant.be.null") + } + + // First to stop the relay. + url := "http://127.0.0.1:8080/v1/relay/stop" + setRelay(url) + log.Info("backup.rebuild.stop.the.relay...") + + // Get the backup address/user/pwd. + type backupConfig struct { + Address string `json:"address"` + User string `json:"user"` + Password string `json:"password"` + } + url = "http://127.0.0.1:8080/v1/radon/backupconfig" + body, err := xbase.HTTPGet(url) + if err != nil { + log.Panic("backup.rebuild.get.backup.config.error:%v", err) + } + log.Info("get.the.backup.config:%v", body) + + backConf := &backupConfig{} + err = json.Unmarshal([]byte(body), backConf) + if err != nil { + log.Panic("backup.rebuild.unmarshal.config[%s].error:%v", body, err) + } + + streamArgs := &streamer.Args{ + User: "root", + Password: "", + Address: fmt.Sprintf("127.0.0.1:%d", radonPort), + ToUser: backConf.User, + ToPassword: backConf.Password, + ToAddress: backConf.Address, + ToEngine: backupEngine, + Database: database, + ToDatabase: database, + Threads: 32, + StmtSize: 1000000, + IntervalMs: 10 * 1000, + OverwriteTables: true, + } + streamer.Streamer(log, streamArgs) +} diff --git a/src/cli/cmd/debug.go b/src/cli/cmd/debug.go new file mode 100644 index 00000000..b57c2874 --- /dev/null +++ b/src/cli/cmd/debug.go @@ -0,0 +1,81 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package cmd + +import ( + "fmt" + "xbase" + + "github.com/spf13/cobra" +) + +func NewDebugCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "debug", + Short: "show radon config, including configz/backendz/schemaz", + } + cmd.AddCommand(NewDebugConfigzCommand()) + cmd.AddCommand(NewDebugBackendzCommand()) + cmd.AddCommand(NewDebugSchemazCommand()) + return cmd +} + +func NewDebugConfigzCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "configz", + Short: "show radon configz", + Run: debugConfigzCommand, + } + return cmd +} + +func debugConfigzCommand(cmd *cobra.Command, args []string) { + configzUrl := "http://127.0.0.1:8080/v1/debug/configz" + resp, err := xbase.HTTPGet(configzUrl) + if err != nil { + log.Panicf("error:%+v", err) + } + fmt.Printf(resp) +} + +func NewDebugBackendzCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "backendz", + Short: "show radon backendz", + Run: debugBackendzCommand, + } + return cmd +} + +func debugBackendzCommand(cmd *cobra.Command, args []string) { + backendzUrl := "http://127.0.0.1:8080/v1/debug/backendz" + resp, err := xbase.HTTPGet(backendzUrl) + if err != nil { + log.Panicf("error:%+v", err) + } + fmt.Printf(resp) +} + +func NewDebugSchemazCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "schemaz", + Short: "show radon schemaz", + Run: debugSchemazCommand, + } + return cmd +} + +func debugSchemazCommand(cmd *cobra.Command, args []string) { + schemazUrl := "http://127.0.0.1:8080/v1/debug/schemaz" + resp, err := xbase.HTTPGet(schemazUrl) + if err != nil { + log.Panicf("error:%+v", err) + } + fmt.Printf(resp) +} diff --git a/src/cli/cmd/debug_test.go b/src/cli/cmd/debug_test.go new file mode 100644 index 00000000..348afc60 --- /dev/null +++ b/src/cli/cmd/debug_test.go @@ -0,0 +1,87 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package cmd + +import ( + "ctl" + "proxy" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestCmdDebugConfigz(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.INFO)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + admin := ctl.NewAdmin(log, proxy) + admin.Start() + defer admin.Stop() + time.Sleep(100) + + { + cmd := NewDebugCommand() + cmd.Flags().String("configz", "", "") + debugConfigzCommand(cmd, nil) + } +} + +func TestCmdDebugBackendz(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.INFO)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + admin := ctl.NewAdmin(log, proxy) + admin.Start() + defer admin.Stop() + time.Sleep(100) + + { + cmd := NewDebugCommand() + cmd.Flags().String("backendz", "", "") + debugBackendzCommand(cmd, nil) + } +} + +func TestCmdDebugSchemaz(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.INFO)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + address := proxy.Address() + + admin := ctl.NewAdmin(log, proxy) + admin.Start() + defer admin.Stop() + time.Sleep(100) + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + { + cmd := NewDebugCommand() + cmd.Flags().String("schemaz", "", "") + debugSchemazCommand(cmd, nil) + } +} diff --git a/src/cli/cmd/flags.go b/src/cli/cmd/flags.go new file mode 100644 index 00000000..ef4d34b5 --- /dev/null +++ b/src/cli/cmd/flags.go @@ -0,0 +1,37 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package cmd + +import ( + "bytes" + + "github.com/spf13/cobra" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + log = xlog.NewStdLog(xlog.Level(xlog.INFO)) + localFlags = LocalFlags{} +) + +// LocalFlags are flags that defined for local. +type LocalFlags struct { + gtid int64 + maxWorkers int + parallelType int +} + +func executeCommand(root *cobra.Command, args ...string) (output string, err error) { + buf := new(bytes.Buffer) + root.SetOutput(buf) + root.SetArgs(args) + + _, err = root.ExecuteC() + return buf.String(), err +} diff --git a/src/cli/cmd/readonly.go b/src/cli/cmd/readonly.go new file mode 100644 index 00000000..00e45edf --- /dev/null +++ b/src/cli/cmd/readonly.go @@ -0,0 +1,76 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package cmd + +import ( + "net/http" + "xbase" + + "github.com/spf13/cobra" +) + +func NewReadonlyCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "readonly", + Short: "disable/enable radon to readonly", + } + cmd.AddCommand(NewReadonlyEnableCommand()) + cmd.AddCommand(NewReadonlyDisableCommand()) + return cmd +} + +func setReadonly(url string, readonly bool) { + type request struct { + ReadOnly bool `json:"readonly"` + } + + req := &request{ + ReadOnly: readonly, + } + resp, cleanup, err := xbase.HTTPPut(url, &req) + defer cleanup() + + if err != nil { + log.Panicf("error:%+v", err) + } + + if resp == nil || resp.StatusCode != http.StatusOK { + log.Panicf("radoncli.set.readonly.to.[%v].url[%s].response.error:%+s", readonly, url, xbase.HTTPReadBody(resp)) + } +} + +// enable readonly. +func NewReadonlyEnableCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "enable", + Short: "enable radon to readonly", + Run: readonlyEnableCommand, + } + return cmd +} + +func readonlyEnableCommand(cmd *cobra.Command, args []string) { + readonlyUrl := "http://127.0.0.1:8080/v1/radon/readonly" + setReadonly(readonlyUrl, true) +} + +// disable readonly. +func NewReadonlyDisableCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "disable", + Short: "disable radon readonly", + Run: readonlyDisableCommand, + } + return cmd +} + +func readonlyDisableCommand(cmd *cobra.Command, args []string) { + readonlyUrl := "http://127.0.0.1:8080/v1/radon/readonly" + setReadonly(readonlyUrl, false) +} diff --git a/src/cli/cmd/readonly_test.go b/src/cli/cmd/readonly_test.go new file mode 100644 index 00000000..871527c7 --- /dev/null +++ b/src/cli/cmd/readonly_test.go @@ -0,0 +1,43 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package cmd + +import ( + "ctl" + "proxy" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestCmdReadOnly(t *testing.T) { + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + admin := ctl.NewAdmin(log, proxy) + admin.Start() + defer admin.Stop() + time.Sleep(100) + + // enable. + { + cmd := NewReadonlyCommand() + _, err := executeCommand(cmd, "enable") + assert.Nil(t, err) + } + + // disable. + { + cmd := NewReadonlyCommand() + _, err := executeCommand(cmd, "disable") + assert.Nil(t, err) + } + +} diff --git a/src/cli/cmd/relay.go b/src/cli/cmd/relay.go new file mode 100644 index 00000000..9f58705f --- /dev/null +++ b/src/cli/cmd/relay.go @@ -0,0 +1,244 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package cmd + +import ( + "fmt" + "net/http" + "time" + "xbase" + + "github.com/spf13/cobra" +) + +func NewRelayCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "relay", + Short: "show/enable/disable relay worker", + } + cmd.AddCommand(NewRelayStatusCommand()) + cmd.AddCommand(NewRelayInfosCommand()) + cmd.AddCommand(NewRelayStartCommand()) + cmd.AddCommand(NewRelayStopCommand()) + cmd.AddCommand(NewRelayParallelTypeCommand()) + cmd.AddCommand(NewRelayResetCommand()) + cmd.AddCommand(NewRelayResetToNowCommand()) + cmd.AddCommand(NewRelayMaxWorkersCommand()) + cmd.AddCommand(NewRelayNowCommand()) + return cmd +} + +func NewRelayStatusCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "status", + Short: "show relay status", + Run: relayStatusCommand, + } + return cmd +} + +func relayStatusCommand(cmd *cobra.Command, args []string) { + relayUrl := "http://127.0.0.1:8080/v1/relay/status" + resp, err := xbase.HTTPGet(relayUrl) + if err != nil { + log.Panicf("error:%+v", err) + } + fmt.Printf(resp) +} + +func NewRelayInfosCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "infos", + Short: "show relay all infos", + Run: relayInfosCommand, + } + return cmd +} + +func relayInfosCommand(cmd *cobra.Command, args []string) { + relayUrl := "http://127.0.0.1:8080/v1/relay/infos" + resp, err := xbase.HTTPGet(relayUrl) + if err != nil { + log.Panicf("error:%+v", err) + } + fmt.Printf(resp) +} + +func setRelay(url string) { + resp, cleanup, err := xbase.HTTPPut(url, nil) + defer cleanup() + + if err != nil { + log.Panicf("error:%+v", err) + } + + if resp == nil || resp.StatusCode != http.StatusOK { + log.Panicf("radoncli.set.relay.url[%s].response.error:%+s", url, xbase.HTTPReadBody(resp)) + } +} + +func NewRelayStartCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "start", + Short: "start the relay worker", + Run: relayStartCommand, + } + return cmd +} + +func relayStartCommand(cmd *cobra.Command, args []string) { + relayUrl := "http://127.0.0.1:8080/v1/relay/start" + setRelay(relayUrl) +} + +func NewRelayStopCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "stop", + Short: "stop the relay worker", + Run: relayStopCommand, + } + return cmd +} + +func relayStopCommand(cmd *cobra.Command, args []string) { + relayUrl := "http://127.0.0.1:8080/v1/relay/stop" + setRelay(relayUrl) +} + +func setParallelType(url string, t int32) { + type request struct { + Type int32 `json:"type"` + } + + req := &request{ + Type: t, + } + resp, cleanup, err := xbase.HTTPPut(url, &req) + defer cleanup() + + if err != nil { + log.Panicf("error:%+v", err) + } + + if resp == nil || resp.StatusCode != http.StatusOK { + log.Panicf("radoncli.set.parallel.type.to.[%v].url[%s].response.error:%+s", t, url, xbase.HTTPReadBody(resp)) + } +} + +func NewRelayParallelTypeCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "paralleltype", + Short: "parallel type, 0:turn off parallel relay, 1:same events type can parallel(default), 2:all events type can parallel", + Run: relayParallelTypeCommand, + } + cmd.Flags().IntVar(&localFlags.parallelType, "type", 1, "") + return cmd +} + +func relayParallelTypeCommand(cmd *cobra.Command, args []string) { + url := "http://127.0.0.1:8080/v1/relay/paralleltype" + setParallelType(url, int32(localFlags.parallelType)) +} + +func relayResetGTID(gtid int64) { + if gtid < 1514254947594569594 { + log.Panicf("gtid[%v].less.than[1514254947594569594].should.be.UTC().UnixNano()", gtid) + } + + relayUrl := "http://127.0.0.1:8080/v1/relay/reset" + type request struct { + GTID int64 `json:"gtid"` + } + + req := &request{ + GTID: gtid, + } + resp, cleanup, err := xbase.HTTPPost(relayUrl, &req) + defer cleanup() + + if err != nil { + log.Panicf("error:%+v", err) + } + + if resp == nil || resp.StatusCode != http.StatusOK { + log.Panicf("radoncli.set.relay.to.[%v].url[%s].response.error:%+s", req, relayUrl, xbase.HTTPReadBody(resp)) + } + log.Info("reset.relay.gtid.to[%v]", gtid) +} + +func NewRelayResetCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "reset", + Short: "reset the relay worker GTID", + Run: relayResetCommand, + } + cmd.Flags().Int64Var(&localFlags.gtid, "gtid", 0, "--gtid=[timestamp(UTC().UnixNano())]") + return cmd +} + +func relayResetCommand(cmd *cobra.Command, args []string) { + relayResetGTID(localFlags.gtid) +} + +func NewRelayResetToNowCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "resettonow", + Short: "reset the relay worker GTID to time.NOW().UTC().UnixNano()", + Run: relayResetToNowCommand, + } + return cmd +} + +func relayResetToNowCommand(cmd *cobra.Command, args []string) { + relayResetGTID(time.Now().UTC().UnixNano()) +} + +func NewRelayMaxWorkersCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "workers", + Short: "Set the max relay parallel workers", + Run: relayMaxWorkersCommand, + } + cmd.Flags().IntVar(&localFlags.maxWorkers, "max", 0, "--max=[1, 1024]") + return cmd +} + +func relayMaxWorkersCommand(cmd *cobra.Command, args []string) { + relayUrl := "http://127.0.0.1:8080/v1/relay/workers" + type request struct { + Workers int `json:"workers"` + } + + req := &request{ + Workers: localFlags.maxWorkers, + } + resp, cleanup, err := xbase.HTTPPost(relayUrl, &req) + defer cleanup() + + if err != nil { + log.Panicf("error:%+v", err) + } + + if resp == nil || resp.StatusCode != http.StatusOK { + log.Panicf("radoncli.relay.set.max.parallel.worker.to.[%v].url[%s].response.error:%+s", req, relayUrl, xbase.HTTPReadBody(resp)) + } +} + +func NewRelayNowCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "now", + Short: "returns the Now().UTC().UnixNano()", + Run: relayNowCommand, + } + return cmd +} + +func relayNowCommand(cmd *cobra.Command, args []string) { + log.Info("Now().UTC().UnixNano():%v", time.Now().UTC().UnixNano()) +} diff --git a/src/cli/cmd/relay_test.go b/src/cli/cmd/relay_test.go new file mode 100644 index 00000000..47e906bd --- /dev/null +++ b/src/cli/cmd/relay_test.go @@ -0,0 +1,159 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package cmd + +import ( + "ctl" + "fmt" + "proxy" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestCmdRelayStatus(t *testing.T) { + _, proxy, cleanup := proxy.MockProxyWithBackup(log) + defer cleanup() + + admin := ctl.NewAdmin(log, proxy) + admin.Start() + defer admin.Stop() + time.Sleep(200) + + { + cmd := NewRelayCommand() + _, err := executeCommand(cmd, "status") + assert.Nil(t, err) + } +} + +func TestCmdRelayInfos(t *testing.T) { + _, proxy, cleanup := proxy.MockProxyWithBackup(log) + defer cleanup() + + admin := ctl.NewAdmin(log, proxy) + admin.Start() + defer admin.Stop() + time.Sleep(200) + + { + cmd := NewRelayCommand() + _, err := executeCommand(cmd, "infos") + assert.Nil(t, err) + } +} + +func TestCmdRelayStart(t *testing.T) { + _, proxy, cleanup := proxy.MockProxyWithBackup(log) + defer cleanup() + + admin := ctl.NewAdmin(log, proxy) + admin.Start() + defer admin.Stop() + time.Sleep(200) + + { + cmd := NewRelayCommand() + _, err := executeCommand(cmd, "start") + assert.Nil(t, err) + } +} + +func TestCmdRelayStop(t *testing.T) { + _, proxy, cleanup := proxy.MockProxyWithBackup(log) + defer cleanup() + + admin := ctl.NewAdmin(log, proxy) + admin.Start() + defer admin.Stop() + time.Sleep(200) + + { + cmd := NewRelayCommand() + _, err := executeCommand(cmd, "stop") + assert.Nil(t, err) + } +} + +func TestCmdRelayParallelType(t *testing.T) { + _, proxy, cleanup := proxy.MockProxyWithBackup(log) + defer cleanup() + + admin := ctl.NewAdmin(log, proxy) + admin.Start() + defer admin.Stop() + time.Sleep(200) + + { + cmd := NewRelayCommand() + for i := 0; i < 50; i++ { + _, err := executeCommand(cmd, "paralleltype", "--type", fmt.Sprintf("%d", (i%5))) + assert.Nil(t, err) + } + } +} + +func TestCmdRelayReset(t *testing.T) { + _, proxy, cleanup := proxy.MockProxyWithBackup(log) + defer cleanup() + + admin := ctl.NewAdmin(log, proxy) + admin.Start() + defer admin.Stop() + time.Sleep(200) + + { + cmd := NewRelayCommand() + _, err := executeCommand(cmd, "stop") + assert.Nil(t, err) + _, err = executeCommand(cmd, "reset", "--gtid", "1514254947594569595") + assert.Nil(t, err) + } +} + +func TestCmdRelayResetToNow(t *testing.T) { + _, proxy, cleanup := proxy.MockProxyWithBackup(log) + defer cleanup() + + admin := ctl.NewAdmin(log, proxy) + admin.Start() + defer admin.Stop() + time.Sleep(200) + + { + cmd := NewRelayCommand() + _, err := executeCommand(cmd, "stop") + assert.Nil(t, err) + _, err = executeCommand(cmd, "resettonow") + assert.Nil(t, err) + } +} + +func TestCmdRelayMaxWorkers(t *testing.T) { + _, proxy, cleanup := proxy.MockProxyWithBackup(log) + defer cleanup() + + admin := ctl.NewAdmin(log, proxy) + admin.Start() + defer admin.Stop() + time.Sleep(200) + + { + cmd := NewRelayCommand() + _, err := executeCommand(cmd, "workers", "--max", "111") + assert.Nil(t, err) + } + time.Sleep(200) + { + cmd := NewRelayCommand() + _, err := executeCommand(cmd, "workers", "--max", "1") + assert.Nil(t, err) + } +} diff --git a/src/cli/cmd/twopc.go b/src/cli/cmd/twopc.go new file mode 100644 index 00000000..1cecc1a8 --- /dev/null +++ b/src/cli/cmd/twopc.go @@ -0,0 +1,76 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package cmd + +import ( + "net/http" + "xbase" + + "github.com/spf13/cobra" +) + +func NewTwopcCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "twopc", + Short: "disable/enable radon to twopc", + } + cmd.AddCommand(NewTwopcEnableCommand()) + cmd.AddCommand(NewTwopcDisableCommand()) + return cmd +} + +func setTwopc(url string, twopc bool) { + type request struct { + Twopc bool `json:"twopc"` + } + + req := &request{ + Twopc: twopc, + } + resp, cleanup, err := xbase.HTTPPut(url, &req) + defer cleanup() + + if err != nil { + log.Panicf("error:%+v", err) + } + + if resp == nil || resp.StatusCode != http.StatusOK { + log.Panicf("radoncli.set.twopc.to.[%v].url[%s].response.error:%+s", twopc, url, xbase.HTTPReadBody(resp)) + } +} + +// enable twopc. +func NewTwopcEnableCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "enable", + Short: "enable radon to twopc", + Run: twopcEnableCommand, + } + return cmd +} + +func twopcEnableCommand(cmd *cobra.Command, args []string) { + twopcUrl := "http://127.0.0.1:8080/v1/radon/twopc" + setTwopc(twopcUrl, true) +} + +// disable twopc. +func NewTwopcDisableCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "disable", + Short: "disable radon twopc", + Run: twopcDisableCommand, + } + return cmd +} + +func twopcDisableCommand(cmd *cobra.Command, args []string) { + twopcUrl := "http://127.0.0.1:8080/v1/radon/twopc" + setTwopc(twopcUrl, false) +} diff --git a/src/cli/cmd/twopc_test.go b/src/cli/cmd/twopc_test.go new file mode 100644 index 00000000..d5e7df34 --- /dev/null +++ b/src/cli/cmd/twopc_test.go @@ -0,0 +1,41 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package cmd + +import ( + "ctl" + "proxy" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestCmdTwopc(t *testing.T) { + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + admin := ctl.NewAdmin(log, proxy) + admin.Start() + defer admin.Stop() + time.Sleep(100) + + // enable. + { + cmd := NewTwopcCommand() + _, err := executeCommand(cmd, "enable") + assert.Nil(t, err) + } + // disable. + { + cmd := NewTwopcCommand() + _, err := executeCommand(cmd, "disable") + assert.Nil(t, err) + } +} diff --git a/src/cli/cmd/version.go b/src/cli/cmd/version.go new file mode 100644 index 00000000..3ad09d96 --- /dev/null +++ b/src/cli/cmd/version.go @@ -0,0 +1,31 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package cmd + +import ( + "build" + "fmt" + + "github.com/spf13/cobra" +) + +func NewVersionCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "version", + Short: "Print the version number of radon client", + Run: versionCommandFn, + } + + return cmd +} + +func versionCommandFn(cmd *cobra.Command, args []string) { + build := build.GetInfo() + fmt.Printf("radoncli:[%+v]\n", build) +} diff --git a/src/config/config.go b/src/config/config.go new file mode 100644 index 00000000..1066603b --- /dev/null +++ b/src/config/config.go @@ -0,0 +1,280 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package config + +import ( + "encoding/json" + "io/ioutil" + "xbase" + + "github.com/pkg/errors" +) + +// ProxyConfig tuple. +type ProxyConfig struct { + IPS []string `json:"allowip,omitempty"` + MetaDir string `json:"meta-dir"` + Endpoint string `json:"endpoint"` + TwopcEnable bool `json:"twopc-enable"` + + MaxConnections int `json:"max-connections"` + MaxResultSize int `json:"max-result-size"` + DDLTimeout int `json:"ddl-timeout"` + QueryTimeout int `json:"query-timeout"` + PeerAddress string `json:"peer-address,omitempty"` + BackupDefaultEngine string `json:"backup-default-engine"` +} + +// DefaultProxyConfig returns default proxy config. +func DefaultProxyConfig() *ProxyConfig { + return &ProxyConfig{ + MetaDir: "./radonmeta", + Endpoint: "127.0.0.1:3306", + MaxConnections: 1024, + MaxResultSize: 1024 * 1024 * 1024, // 1GB + DDLTimeout: 10 * 3600 * 1000, // 10hours + QueryTimeout: 5 * 60 * 1000, // 5minutes + PeerAddress: "127.0.0.1:8080", + BackupDefaultEngine: "TokuDB", // Default MySQL storage engine for backup. + } +} + +// UnmarshalJSON interface on ProxyConfig. +func (c *ProxyConfig) UnmarshalJSON(b []byte) error { + type confAlias *ProxyConfig + conf := confAlias(DefaultProxyConfig()) + if err := json.Unmarshal(b, conf); err != nil { + return err + } + *c = ProxyConfig(*conf) + return nil +} + +// AuditConfig tuple. +type AuditConfig struct { + Mode string `json:"mode"` + LogDir string `json:"audit-dir"` + MaxSize int `json:"max-size"` + ExpireHours int `json:"expire-hours"` +} + +// DefaultAuditConfig returns default audit config. +func DefaultAuditConfig() *AuditConfig { + return &AuditConfig{ + Mode: "N", + LogDir: "/tmp/auditlog", + MaxSize: 1024 * 1024 * 256, // 256MB + ExpireHours: 1, // 1hours + } +} + +// UnmarshalJSON interface on AuditConfig. +func (c *AuditConfig) UnmarshalJSON(b []byte) error { + type confAlias *AuditConfig + conf := confAlias(DefaultAuditConfig()) + if err := json.Unmarshal(b, conf); err != nil { + return err + } + *c = AuditConfig(*conf) + return nil +} + +// BinlogConfig tuple. +type BinlogConfig struct { + LogDir string `json:"binlog-dir"` + MaxSize int `json:"max-size"` + RelayWorkers int `json:"relay-workers"` + RelayWaitMs int `json:"relay-wait-ms"` + EnableBinlog bool `json:"enable-binlog"` + EnableRelay bool `json:"enable-relay"` + // type=0, turn off the parallel. + // type=1, same events type can parallel(default). + // type=2, all events type can parallel. + ParallelType int `json:"parallel-type"` +} + +// DefaultBinlogConfig returns default binlog config. +func DefaultBinlogConfig() *BinlogConfig { + return &BinlogConfig{ + LogDir: "/tmp/binlog", + MaxSize: 1024 * 1024 * 128, // 128MB + RelayWorkers: 32, + RelayWaitMs: 5000, + ParallelType: 1, + } +} + +// UnmarshalJSON interface on BinlogConfig. +func (c *BinlogConfig) UnmarshalJSON(b []byte) error { + type confAlias *BinlogConfig + conf := confAlias(DefaultBinlogConfig()) + if err := json.Unmarshal(b, conf); err != nil { + return err + } + *c = BinlogConfig(*conf) + return nil +} + +// LogConfig tuple. +type LogConfig struct { + Level string `json:"level"` +} + +// DefaultLogConfig returns default log config. +func DefaultLogConfig() *LogConfig { + return &LogConfig{ + Level: "ERROR", + } +} + +// UnmarshalJSON interface on LogConfig. +func (c *LogConfig) UnmarshalJSON(b []byte) error { + type confAlias *LogConfig + conf := confAlias(DefaultLogConfig()) + if err := json.Unmarshal(b, conf); err != nil { + return err + } + *c = LogConfig(*conf) + return nil +} + +// BackendConfig tuple. +type BackendConfig struct { + Name string `json:"name"` + Address string `json:"address"` + User string `json:"user"` + Password string `json:"password"` + DBName string `json:"database"` + Charset string `json:"charset"` + MaxConnections int `json:"max-connections"` +} + +// BackendsConfig tuple. +type BackendsConfig struct { + Backup *BackendConfig `json:"backup"` + Backends []*BackendConfig `json:"backends"` +} + +// PartitionConfig tuple. +type PartitionConfig struct { + Table string `json:"table"` + Segment string `json:"segment"` + Backend string `json:"backend"` +} + +// TableConfig tuple. +type TableConfig struct { + Name string `json:"name"` + ShardType string `json:"shardtype"` + ShardKey string `json:"shardkey"` + Partitions []*PartitionConfig `json:"partitions"` +} + +// SchemaConfig tuple. +type SchemaConfig struct { + DB string `json:"database"` + Tables []*TableConfig `json:"tables"` +} + +// RouterConfig tuple. +type RouterConfig struct { + Slots int `json:"slots-readonly"` + Blocks int `json:"blocks-readonly"` +} + +// DefaultRouterConfig returns the default router config. +func DefaultRouterConfig() *RouterConfig { + return &RouterConfig{ + Slots: 4096, + Blocks: 128, + } +} + +// UnmarshalJSON interface on RouterConfig. +func (c *RouterConfig) UnmarshalJSON(b []byte) error { + type confAlias *RouterConfig + conf := confAlias(DefaultRouterConfig()) + if err := json.Unmarshal(b, conf); err != nil { + return err + } + *c = RouterConfig(*conf) + return nil +} + +// Config tuple. +type Config struct { + Proxy *ProxyConfig `json:"proxy"` + Audit *AuditConfig `json:"audit"` + Router *RouterConfig `json:"router"` + Binlog *BinlogConfig `json:"binlog"` + Log *LogConfig `json:"log"` +} + +func checkConfig(conf *Config) { + if conf.Proxy == nil { + conf.Proxy = DefaultProxyConfig() + } + + if conf.Binlog == nil { + conf.Binlog = DefaultBinlogConfig() + } + + if conf.Audit == nil { + conf.Audit = DefaultAuditConfig() + } + + if conf.Router == nil { + conf.Router = DefaultRouterConfig() + } + + if conf.Log == nil { + conf.Log = DefaultLogConfig() + } +} + +// LoadConfig used to load the config from file. +func LoadConfig(path string) (*Config, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, errors.WithStack(err) + } + conf := &Config{} + if err := json.Unmarshal([]byte(data), conf); err != nil { + return nil, errors.WithStack(err) + } + checkConfig(conf) + return conf, nil +} + +// ReadTableConfig used to read the table config from the data. +func ReadTableConfig(data string) (*TableConfig, error) { + conf := &TableConfig{} + if err := json.Unmarshal([]byte(data), conf); err != nil { + return nil, errors.WithStack(err) + } + return conf, nil +} + +// ReadBackendsConfig used to read the backend config from the data. +func ReadBackendsConfig(data string) (*BackendsConfig, error) { + conf := &BackendsConfig{} + if err := json.Unmarshal([]byte(data), conf); err != nil { + return nil, errors.WithStack(err) + } + return conf, nil +} + +// WriteConfig used to write the conf to file. +func WriteConfig(path string, conf interface{}) error { + b, err := json.MarshalIndent(conf, "", "\t") + if err != nil { + return errors.WithStack(err) + } + return xbase.WriteFile(path, b) +} diff --git a/src/config/config_test.go b/src/config/config_test.go new file mode 100644 index 00000000..b8c20315 --- /dev/null +++ b/src/config/config_test.go @@ -0,0 +1,312 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package config + +import ( + "io/ioutil" + _ "log" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWriteConfig(t *testing.T) { + conf := &Config{ + Proxy: MockProxyConfig, + Log: MockLogConfig, + Audit: DefaultAuditConfig(), + Binlog: DefaultBinlogConfig(), + Router: DefaultRouterConfig(), + } + + path := "/tmp/radon.test.config.json" + os.Remove(path) + err := WriteConfig(path, conf) + assert.Nil(t, err) + + want, err := LoadConfig(path) + assert.Nil(t, err) + assert.Equal(t, want, conf) + os.Remove(path) +} + +func TestLoadConfig(t *testing.T) { + path := "/tmp/radon.test.config.json" + { + _, err := LoadConfig(path) + assert.NotNil(t, err) + } + + { + mockProxyConfig := &ProxyConfig{ + TwopcEnable: true, + Endpoint: ":5566", + MaxConnections: 1024, + MetaDir: "/tmp/radonmeta", + PeerAddress: ":8080", + BackupDefaultEngine: "TokuDB", + } + conf := &Config{ + Proxy: mockProxyConfig, + Audit: DefaultAuditConfig(), + Router: DefaultRouterConfig(), + Binlog: DefaultBinlogConfig(), + Log: MockLogConfig, + } + + path := "/tmp/radon.test.config.json" + err := WriteConfig(path, conf) + assert.Nil(t, err) + want, err := LoadConfig(path) + assert.Nil(t, err) + assert.Equal(t, want, conf) + } + + { + mockProxyConfig := &ProxyConfig{ + Endpoint: ":5566", + MaxConnections: 1024, + MetaDir: "/tmp/radonmeta", + PeerAddress: ":8080", + BackupDefaultEngine: "TokuDB", + } + + conf := &Config{ + Proxy: mockProxyConfig, + Log: MockLogConfig, + } + path := "/tmp/radon.test.config.json" + err := WriteConfig(path, conf) + assert.Nil(t, err) + { + want := &Config{ + Proxy: MockProxyConfig, + Log: MockLogConfig, + Audit: DefaultAuditConfig(), + Binlog: DefaultBinlogConfig(), + Router: DefaultRouterConfig(), + } + got, err := LoadConfig(path) + assert.Nil(t, err) + assert.Equal(t, want, got) + } + } + + { + want := &Config{ + Proxy: MockProxyConfig, + Log: MockLogConfig, + Audit: DefaultAuditConfig(), + Router: DefaultRouterConfig(), + Binlog: DefaultBinlogConfig(), + } + + path := "/tmp/radon.test.config.json" + err := WriteConfig(path, want) + assert.Nil(t, err) + got, err := LoadConfig(path) + assert.Nil(t, err) + assert.Equal(t, want, got) + } +} + +func TestWriteLoadConfig(t *testing.T) { + conf := &Config{ + Proxy: MockProxyConfig, + Log: MockLogConfig, + } + + path := "/tmp/radon.test.config.json" + os.Remove(path) + err := WriteConfig(path, conf) + assert.Nil(t, err) + + { + conf, err := LoadConfig(path) + assert.Nil(t, err) + want := &Config{ + Proxy: MockProxyConfig, + Log: MockLogConfig, + Audit: DefaultAuditConfig(), + Router: DefaultRouterConfig(), + Binlog: DefaultBinlogConfig(), + } + got := conf + assert.Equal(t, want, got) + } +} + +func TestReadBackendsConfig(t *testing.T) { + data := `{ + "backends": [ + { + "name": "backend1", + "address": "127.0.0.1:3304", + "user": "root", + "password": "", + "max-connections": 1024 + } + ] +}` + + backend, err := ReadBackendsConfig(data) + assert.Nil(t, err) + want := &BackendsConfig{Backends: MockBackends} + got := backend + assert.Equal(t, want, got) +} + +func TestReadBackendsConfig1(t *testing.T) { + // backup is nil. + { + data := `{ + "backends": [ + { + "name": "backend1", + "address": "127.0.0.1:3304", + "user": "root", + "password": "", + "max-connections": 1024 + } + ] +}` + + backend, err := ReadBackendsConfig(data) + assert.Nil(t, err) + assert.Nil(t, backend.Backup) + } + + // backup is not nil. + { + data := `{ + "backup": + { + "name": "backupnode", + "address": "127.0.0.1:3304", + "user": "root", + "password": "", + "max-connections": 1024 + }, + "backends": [ + { + "name": "backend1", + "address": "127.0.0.1:3304", + "user": "root", + "password": "", + "max-connections": 1024 + } + ] +}` + + backend, err := ReadBackendsConfig(data) + assert.Nil(t, err) + want := MockBackup + got := backend.Backup + assert.Equal(t, want, got) + } +} + +func TestReadTableConfig(t *testing.T) { + data := `{ + "name": "A", + "shardtype": "", + "shardkey": "id", + "partitions": [ + { + "table": "A1", + "segment": "0-2", + "backend": "backend1" + }, + { + "table": "A2", + "segment": "2-4", + "backend": "backend1" + }, + { + "table": "A3", + "segment": "4-8", + "backend": "backend2" + }, + { + "table": "A4", + "segment": "8-16", + "backend": "backend2" + } + ] +}` + + table, err := ReadTableConfig(data) + assert.Nil(t, err) + want := MockTablesConfig[0] + got := table + assert.Equal(t, want, got) +} + +func TestRouterConfigUnmarshalJSON(t *testing.T) { + path := "/tmp/radon.test.config.json" + + // All nil. + { + os.Remove(path) + data := `{}` + err := ioutil.WriteFile(path, []byte(data), 0644) + assert.Nil(t, err) + got, err := LoadConfig(path) + assert.Nil(t, err) + want := &Config{ + Proxy: DefaultProxyConfig(), + Router: DefaultRouterConfig(), + Audit: DefaultAuditConfig(), + Binlog: DefaultBinlogConfig(), + Log: DefaultLogConfig(), + } + assert.Equal(t, want, got) + } + + // Default UnmarshalJSON. + { + os.Remove(path) + data := `{ + "proxy": { + "endpoint": ":5566", + "twopc-enable": false, + "max-connections": 1024 + }, + "audit": { + "mode": "N", + "expire-hours": 1 + }, + "router": { + "blocks-readonly": 128 + }, + "binlog": { + "binlog-dir": "/tmp/binlog" + }, + "log": { + "level": "ERROR" + } +}` + err := ioutil.WriteFile(path, []byte(data), 0644) + assert.Nil(t, err) + got, err := LoadConfig(path) + assert.Nil(t, err) + + proxy := DefaultProxyConfig() + proxy.Endpoint = ":5566" + want := &Config{ + Proxy: proxy, + Router: DefaultRouterConfig(), + Audit: DefaultAuditConfig(), + Binlog: DefaultBinlogConfig(), + Log: DefaultLogConfig(), + } + assert.Equal(t, want, got) + } +} diff --git a/src/config/mock.go b/src/config/mock.go new file mode 100644 index 00000000..5d6b754f --- /dev/null +++ b/src/config/mock.go @@ -0,0 +1,108 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package config + +var ( + // MockSchemaConfig config. + MockSchemaConfig = &SchemaConfig{ + DB: "sbtest", + Tables: MockTablesConfig, + } + + // MockTablesConfig config. + MockTablesConfig = []*TableConfig{ + &TableConfig{ + Name: "A", + ShardKey: "id", + Partitions: MockPartitionAConfig, + }, + &TableConfig{ + Name: "B", + ShardKey: "id", + Partitions: MockPartitionBConfig, + }, + } + + // MockPartitionAConfig config. + MockPartitionAConfig = []*PartitionConfig{ + &PartitionConfig{ + Table: "A1", + Segment: "0-2", + Backend: "backend1", + }, + &PartitionConfig{ + Table: "A2", + Segment: "2-4", + Backend: "backend1", + }, + &PartitionConfig{ + Table: "A3", + Segment: "4-8", + Backend: "backend2", + }, + &PartitionConfig{ + Table: "A4", + Segment: "8-16", + Backend: "backend2", + }, + } + + // MockPartitionBConfig config. + MockPartitionBConfig = []*PartitionConfig{ + &PartitionConfig{ + Table: "B1", + Segment: "0-4", + Backend: "backend2", + }, + &PartitionConfig{ + Table: "B2", + Segment: "4-8", + Backend: "backend1", + }, + &PartitionConfig{ + Table: "B3", + Segment: "8-16", + Backend: "backend2", + }, + } + + // MockBackends config. + MockBackends = []*BackendConfig{ + &BackendConfig{ + Name: "backend1", + Address: "127.0.0.1:3304", + User: "root", + Password: "", + MaxConnections: 1024, + }, + } + + // MockBackup config. + MockBackup = &BackendConfig{ + Name: "backupnode", + Address: "127.0.0.1:3304", + User: "root", + Password: "", + MaxConnections: 1024, + } + + // MockProxyConfig config. + MockProxyConfig = &ProxyConfig{ + Endpoint: ":5566", + MaxConnections: 1024, + MetaDir: "/tmp/radonmeta", + PeerAddress: ":8080", + BackupDefaultEngine: "TokuDB", + } + + // MockLogConfig config. + MockLogConfig = &LogConfig{ + Level: "DEBUG", + } +) diff --git a/src/config/version.go b/src/config/version.go new file mode 100644 index 00000000..7c3e8956 --- /dev/null +++ b/src/config/version.go @@ -0,0 +1,56 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package config + +import ( + "encoding/json" + "io/ioutil" + "path" + "time" + "xbase" + + "github.com/pkg/errors" +) + +const ( + // versionJSONFile version file name. + versionJSONFile = "version.json" +) + +// Version tuple. +type Version struct { + Ts int64 `json:"version"` +} + +// UpdateVersion used to update the config version of the file. +func UpdateVersion(metadir string) error { + name := path.Join(metadir, versionJSONFile) + version := &Version{ + Ts: time.Now().UnixNano(), + } + b, err := json.Marshal(version) + if err != nil { + return errors.WithStack(err) + } + return xbase.WriteFile(name, b) +} + +// ReadVersion used to read the config version from the file. +func ReadVersion(metadir string) int64 { + name := path.Join(metadir, versionJSONFile) + version := &Version{} + data, err := ioutil.ReadFile(name) + if err != nil { + return 0 + } + if err := json.Unmarshal([]byte(data), version); err != nil { + return 0 + } + return version.Ts +} diff --git a/src/config/version_test.go b/src/config/version_test.go new file mode 100644 index 00000000..fd0ce8e6 --- /dev/null +++ b/src/config/version_test.go @@ -0,0 +1,49 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package config + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestVersion(t *testing.T) { + metadir := "/tmp/" + defer os.RemoveAll("/tmp/version.json") + + // Update version. + { + err := UpdateVersion(metadir) + assert.Nil(t, err) + } + + // Read version. + { + ver := ReadVersion(metadir) + assert.True(t, ver > 1501750907829399355) + } +} + +func TestVersionError(t *testing.T) { + metadir := "/" + + // Update version. + { + err := UpdateVersion(metadir) + assert.NotNil(t, err) + } + + // Read version. + { + ver := ReadVersion(metadir) + assert.Equal(t, int64(0), ver) + } +} diff --git a/src/ctl/admin.go b/src/ctl/admin.go new file mode 100644 index 00000000..a5215db2 --- /dev/null +++ b/src/ctl/admin.go @@ -0,0 +1,65 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package ctl + +import ( + "context" + "log" + "net/http" + _ "net/http/pprof" + "proxy" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func init() { + go func() { + log.Println(http.ListenAndServe(":6060", nil)) + }() +} + +type Admin struct { + log *xlog.Log + proxy *proxy.Proxy + server *http.Server +} + +func NewAdmin(log *xlog.Log, proxy *proxy.Proxy) *Admin { + return &Admin{ + log: log, + proxy: proxy, + } +} + +// Start starts http server. +func (admin *Admin) Start() { + api := rest.NewApi() + router, err := admin.NewRouter() + if err != nil { + panic(err) + } + + api.SetApp(router) + handlers := api.MakeHandler() + admin.server = &http.Server{Addr: ":8080", Handler: handlers} + go func() { + log := admin.log + log.Info("http.server.start[%v]...", ":8080") + if err := admin.server.ListenAndServe(); err != http.ErrServerClosed { + log.Panic("%v", err) + } + }() +} + +func (admin *Admin) Stop() { + log := admin.log + admin.server.Shutdown(context.Background()) + log.Info("http.server.gracefully.stop") +} diff --git a/src/ctl/router.go b/src/ctl/router.go new file mode 100644 index 00000000..60e20f11 --- /dev/null +++ b/src/ctl/router.go @@ -0,0 +1,75 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package ctl + +import ( + "ctl/v1" + + "github.com/ant0ine/go-json-rest/rest" +) + +func (admin *Admin) NewRouter() (rest.App, error) { + log := admin.log + proxy := admin.proxy + + return rest.MakeRouter( + // radon + rest.Post("/v1/radon/explain", v1.ExplainHandler(log, proxy)), + rest.Put("/v1/radon/config", v1.RadonConfigHandler(log, proxy)), + rest.Get("/v1/radon/ping", v1.PingHandler(log, proxy)), + rest.Put("/v1/radon/readonly", v1.ReadonlyHandler(log, proxy)), + rest.Put("/v1/radon/twopc", v1.TwopcHandler(log, proxy)), + rest.Put("/v1/radon/throttle", v1.ThrottleHandler(log, proxy)), + rest.Post("/v1/radon/backend", v1.AddBackendHandler(log, proxy)), + rest.Delete("/v1/radon/backend/:name", v1.RemoveBackendHandler(log, proxy)), + rest.Post("/v1/radon/backup", v1.AddBackupHandler(log, proxy)), + rest.Get("/v1/radon/backupconfig", v1.BackupConfigHandler(log, proxy)), + rest.Get("/v1/radon/restapiaddress", v1.RestApiAddressHandler(log, proxy)), + rest.Delete("/v1/radon/backup/:name", v1.RemoveBackupHandler(log, proxy)), + rest.Get("/v1/radon/status", v1.StatusHandler(log, proxy)), + + // user + rest.Post("/v1/user/add", v1.CreateUserHandler(log, proxy)), + rest.Post("/v1/user/update", v1.AlterUserHandler(log, proxy)), + rest.Post("/v1/user/remove", v1.DropUserHandler(log, proxy)), + + // shard + rest.Get("/v1/shard/shardz", v1.ShardzHandler(log, proxy)), + rest.Get("/v1/shard/balanceadvice", v1.ShardBalanceAdviceHandler(log, proxy)), + rest.Post("/v1/shard/shift", v1.ShardRuleShiftHandler(log, proxy)), + rest.Post("/v1/shard/reload", v1.ShardReLoadHandler(log, proxy)), + + // meta + rest.Get("/v1/meta/versions", v1.VersionzHandler(log, proxy)), + rest.Get("/v1/meta/versioncheck", v1.VersionCheckHandler(log, proxy)), + rest.Get("/v1/meta/metas", v1.MetazHandler(log, proxy)), + + // peer + rest.Get("/v1/peer/peerz", v1.PeerzHandler(log, proxy)), + rest.Post("/v1/peer/add", v1.AddPeerHandler(log, proxy)), + rest.Post("/v1/peer/remove", v1.RemovePeerHandler(log, proxy)), + + // relay + rest.Get("/v1/relay/status", v1.RelayStatusHandler(log, proxy)), + rest.Get("/v1/relay/infos", v1.RelayInfosHandler(log, proxy)), + rest.Put("/v1/relay/start", v1.RelayStartHandler(log, proxy)), + rest.Put("/v1/relay/stop", v1.RelayStopHandler(log, proxy)), + rest.Put("/v1/relay/paralleltype", v1.RelayParallelTypeHandler(log, proxy)), + rest.Post("/v1/relay/reset", v1.RelayResetHandler(log, proxy)), + rest.Post("/v1/relay/workers", v1.RelayWorkersHandler(log, proxy)), + + // debug + rest.Get("/v1/debug/processlist", v1.ProcesslistHandler(log, proxy)), + rest.Get("/v1/debug/queryz/:limit", v1.QueryzHandler(log, proxy)), + rest.Get("/v1/debug/txnz/:limit", v1.TxnzHandler(log, proxy)), + rest.Get("/v1/debug/configz", v1.ConfigzHandler(log, proxy)), + rest.Get("/v1/debug/backendz", v1.BackendzHandler(log, proxy)), + rest.Get("/v1/debug/schemaz", v1.SchemazHandler(log, proxy)), + ) +} diff --git a/src/ctl/v1/backend.go b/src/ctl/v1/backend.go new file mode 100644 index 00000000..d9c9fdbc --- /dev/null +++ b/src/ctl/v1/backend.go @@ -0,0 +1,186 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "config" + "net/http" + "proxy" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/xelabs/go-mysqlstack/xlog" +) + +type backendParams struct { + Name string `json:"name"` + Address string `json:"address"` + User string `json:"user"` + Password string `json:"password"` + MaxConnections int `json:"max-connections"` +} + +// AddBackendHandler impl. +func AddBackendHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + addBackendHandler(log, proxy, w, r) + } + return f +} + +func addBackendHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + scatter := proxy.Scatter() + p := backendParams{} + err := r.DecodeJsonPayload(&p) + if err != nil { + log.Error("api.v1.add.backend.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + conf := &config.BackendConfig{ + Name: p.Name, + Address: p.Address, + User: p.User, + Password: p.Password, + Charset: "utf8", + MaxConnections: p.MaxConnections, + } + log.Warning("api.v1.add[from:%v].backend[%+v]", r.RemoteAddr, conf) + + if err := scatter.Add(conf); err != nil { + log.Error("api.v1.add.backend[%+v].error:%+v", conf, err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + if err := scatter.FlushConfig(); err != nil { + log.Error("api.v1.add.backend.flush.config.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +// RemoveBackendHandler impl. +func RemoveBackendHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + removeBackendHandler(log, proxy, w, r) + } + return f +} + +func removeBackendHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + scatter := proxy.Scatter() + backend := r.PathParam("name") + conf := &config.BackendConfig{ + Name: backend, + } + log.Warning("api.v1.remove[from:%v].backend[%+v]", r.RemoteAddr, conf) + + if err := scatter.Remove(conf); err != nil { + log.Error("api.v1.remove.backend[%+v].error:%+v", conf, err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + if err := scatter.FlushConfig(); err != nil { + log.Error("api.v1.remove.backend.flush.config.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +// AddBackupHandler impl. +func AddBackupHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + addBackupHandler(log, proxy, w, r) + } + return f +} + +func addBackupHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + scatter := proxy.Scatter() + p := backendParams{} + err := r.DecodeJsonPayload(&p) + if err != nil { + log.Error("api.v1.add.backend.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + conf := &config.BackendConfig{ + Name: p.Name, + Address: p.Address, + User: p.User, + Password: p.Password, + Charset: "utf8", + MaxConnections: p.MaxConnections, + } + log.Warning("api.v1.add[from:%v].backup[%+v]", r.RemoteAddr, conf) + + if err := scatter.AddBackup(conf); err != nil { + log.Error("api.v1.add.backup[%+v].error:%+v", conf, err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + if err := scatter.FlushConfig(); err != nil { + log.Error("api.v1.add.backup.flush.config.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +// RemoveBackupHandler impl. +func RemoveBackupHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + removeBackupHandler(log, proxy, w, r) + } + return f +} + +func removeBackupHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + scatter := proxy.Scatter() + backend := r.PathParam("name") + conf := &config.BackendConfig{ + Name: backend, + } + log.Warning("api.v1.remove[from:%v].backup[%+v]", r.RemoteAddr, conf) + + if err := scatter.RemoveBackup(conf); err != nil { + log.Error("api.v1.remove.backup[%+v].error:%+v", conf, err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + if err := scatter.FlushConfig(); err != nil { + log.Error("api.v1.remove.backup.flush.config.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +// BackupConfigHandler impl. +func BackupConfigHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + type resp struct { + Address string `json:"address"` + User string `json:"user"` + Password string `json:"password"` + } + + conf := proxy.Scatter().BackupConfig() + rsp := &resp{ + Address: conf.Address, + User: conf.User, + Password: conf.Password, + } + w.WriteJson(rsp) + } + return f +} diff --git a/src/ctl/v1/backend_test.go b/src/ctl/v1/backend_test.go new file mode 100644 index 00000000..555e5ac7 --- /dev/null +++ b/src/ctl/v1/backend_test.go @@ -0,0 +1,232 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + "testing" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/ant0ine/go-json-rest/rest/test" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestCtlV1BackendAdd(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/radon/backend", AddBackendHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + { + p := &backendParams{ + Name: "backend6", + Address: "192.168.0.1:3306", + User: "mock", + Password: "pwd", + MaxConnections: 1024, + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/radon/backend", p)) + recorded.CodeIs(200) + } +} + +func TestCtlV1BackendAddError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/radon/backend", AddBackendHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // 500. + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/radon/backend", nil)) + recorded.CodeIs(500) + } + + { + p := &backendParams{ + Name: "backend1", + Address: "192.168.0.1:3306", + User: "mock", + Password: "pwd", + MaxConnections: 1024, + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/radon/backend", p)) + recorded.CodeIs(500) + } +} + +func TestCtlV1BackendRemove(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Delete("/v1/radon/backend/:name", RemoveBackendHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("DELETE", "http://localhost/v1/radon/backend/backend1", nil)) + recorded.CodeIs(200) + } +} + +func TestCtlV1BackendRemoveError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Delete("/v1/radon/backend/:name", RemoveBackendHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // 404. + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("DELETE", "http://localhost/v1/radon/backend/xx", nil)) + recorded.CodeIs(500) + } +} + +// backup +func TestCtlV1BackupAdd(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/radon/backup", AddBackupHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + { + p := &backendParams{ + Name: "backupnode", + Address: "192.168.0.1:3306", + User: "mock", + Password: "pwd", + MaxConnections: 1024, + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/radon/backup", p)) + recorded.CodeIs(200) + } +} + +func TestCtlV1BackupAddError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxyWithBackup(log) + defer cleanup() + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/radon/backup", AddBackupHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // 500. + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/radon/backup", nil)) + recorded.CodeIs(500) + } + + { + p := &backendParams{ + Name: "backupnode", + Address: "192.168.0.1:3306", + User: "mock", + Password: "pwd", + MaxConnections: 1024, + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/radon/backup", p)) + recorded.CodeIs(500) + } +} + +func TestCtlV1BackupRemove(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxyWithBackup(log) + defer cleanup() + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Delete("/v1/radon/backup/:name", RemoveBackupHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("DELETE", "http://localhost/v1/radon/backup/backend4", nil)) + recorded.CodeIs(200) + } +} + +func TestCtlV1BackupRemoveError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Delete("/v1/radon/backup/:name", RemoveBackupHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // 404. + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("DELETE", "http://localhost/v1/radon/backup/xx", nil)) + recorded.CodeIs(500) + } +} + +func TestCtlV1BackupConfig(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxyWithBackup(log) + defer cleanup() + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/radon/backupconfig", BackupConfigHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/radon/backupconfig", nil)) + recorded.CodeIs(200) + } +} diff --git a/src/ctl/v1/backendz.go b/src/ctl/v1/backendz.go new file mode 100644 index 00000000..be26d1ed --- /dev/null +++ b/src/ctl/v1/backendz.go @@ -0,0 +1,29 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/xelabs/go-mysqlstack/xlog" +) + +// BackendzHandler impl. +func BackendzHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + backendzHandler(log, proxy, w, r) + } + return f +} + +func backendzHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + scatter := proxy.Scatter() + w.WriteJson(scatter.BackendConfigsClone()) +} diff --git a/src/ctl/v1/backendz_test.go b/src/ctl/v1/backendz_test.go new file mode 100644 index 00000000..c8f870eb --- /dev/null +++ b/src/ctl/v1/backendz_test.go @@ -0,0 +1,48 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + "strings" + "testing" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/ant0ine/go-json-rest/rest/test" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestCtlV1Backendz(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/radon/backend", AddBackendHandler(log, proxy)), + ) + api.SetApp(router) + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/debug/backendz", BackendzHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/debug/backendz", nil)) + recorded.CodeIs(200) + + got := recorded.Recorder.Body.String() + log.Debug(got) + assert.True(t, strings.Contains(got, "backend4")) + } +} diff --git a/src/ctl/v1/configz.go b/src/ctl/v1/configz.go new file mode 100644 index 00000000..87931163 --- /dev/null +++ b/src/ctl/v1/configz.go @@ -0,0 +1,28 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/xelabs/go-mysqlstack/xlog" +) + +// ConfigzHandler impl. +func ConfigzHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + configzHandler(log, proxy, w, r) + } + return f +} + +func configzHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + w.WriteJson(proxy.Config()) +} diff --git a/src/ctl/v1/configz_test.go b/src/ctl/v1/configz_test.go new file mode 100644 index 00000000..fbc88113 --- /dev/null +++ b/src/ctl/v1/configz_test.go @@ -0,0 +1,59 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + "strings" + "testing" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/ant0ine/go-json-rest/rest/test" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestCtlV1Configz(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/debug/configz", ConfigzHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/debug/configz", nil)) + recorded.CodeIs(200) + + body := recorded.Recorder.Body.String() + log.Debug("----%s", body) + got := strings.Contains(body, "twopc-enable") + assert.True(t, got) + } +} diff --git a/src/ctl/v1/explain.go b/src/ctl/v1/explain.go new file mode 100644 index 00000000..83d78ed0 --- /dev/null +++ b/src/ctl/v1/explain.go @@ -0,0 +1,68 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "net/http" + "optimizer" + "proxy" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/xelabs/go-mysqlstack/xlog" + + "github.com/xelabs/go-mysqlstack/sqlparser" +) + +type explainParams struct { + Query string `json:"query"` +} + +// ExplainHandler impl. +func ExplainHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + explainHandler(log, proxy, w, r) + } + return f +} + +func explainHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + type resp struct { + Msg string + } + p := explainParams{} + err := r.DecodeJsonPayload(&p) + if err != nil { + log.Error("api.v1.explain.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + rsp := &resp{} + router := proxy.Router() + query := p.Query + node, err := sqlparser.Parse(query) + if err != nil { + log.Error("ctl.v1.explain[%s].parser.error:%+v", query, err) + rsp.Msg = err.Error() + w.WriteJson(rsp) + return + } + simOptimizer := optimizer.NewSimpleOptimizer(log, "", query, node, router) + planTree, err := simOptimizer.BuildPlanTree() + if err != nil { + log.Error("ctl.v1.explain[%s].build.plan.error:%+v", query, err) + rsp.Msg = err.Error() + w.WriteJson(rsp) + return + } + if len(planTree.Plans()) > 0 { + rsp.Msg = planTree.Plans()[0].JSON() + w.WriteJson(rsp) + } +} diff --git a/src/ctl/v1/explain_test.go b/src/ctl/v1/explain_test.go new file mode 100644 index 00000000..016e6f99 --- /dev/null +++ b/src/ctl/v1/explain_test.go @@ -0,0 +1,58 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + "testing" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/ant0ine/go-json-rest/rest/test" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestCtlV1Explain(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + { + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/radon/explain", ExplainHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + p := &explainParams{ + Query: "select id, k, avg, c, count from test.t1 group by id order by c limit 1", + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/radon/explain", p)) + recorded.CodeIs(200) + } +} diff --git a/src/ctl/v1/meta.go b/src/ctl/v1/meta.go new file mode 100644 index 00000000..6401a3b5 --- /dev/null +++ b/src/ctl/v1/meta.go @@ -0,0 +1,75 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "config" + "net/http" + "proxy" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/xelabs/go-mysqlstack/xlog" +) + +// VersionzHandler impl. +func VersionzHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + versionzHandler(log, proxy, w, r) + } + return f +} + +func versionzHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + syncer := proxy.Syncer() + version := &config.Version{ + Ts: syncer.MetaVersion(), + } + w.WriteJson(version) +} + +type versionCheck struct { + Latest bool `json:"latest"` + Peers []string `json:"peers"` +} + +// VersionCheckHandler impl. +func VersionCheckHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + versionCheckHandler(log, proxy, w, r) + } + return f +} + +func versionCheckHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + syncer := proxy.Syncer() + latest, peers := syncer.MetaVersionCheck() + check := &versionCheck{ + Latest: latest, + Peers: peers, + } + w.WriteJson(check) +} + +// MetazHandler impl. +func MetazHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + metazHandler(log, proxy, w, r) + } + return f +} + +func metazHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + sync := proxy.Syncer() + meta, err := sync.MetaJSON() + if err != nil { + log.Error("api.v1.radon.flush.config.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + } + w.WriteJson(meta) +} diff --git a/src/ctl/v1/meta_test.go b/src/ctl/v1/meta_test.go new file mode 100644 index 00000000..17772c66 --- /dev/null +++ b/src/ctl/v1/meta_test.go @@ -0,0 +1,132 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + "strings" + "testing" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/ant0ine/go-json-rest/rest/test" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestCtlV1Versionz(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/meta/versions", VersionzHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/meta/versions", nil)) + recorded.CodeIs(200) + + got := strings.Contains(recorded.Recorder.Body.String(), "version") + assert.True(t, got) + } +} + +func TestCtlV1VersionCheck(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/meta/versioncheck", VersionCheckHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/meta/versioncheck", nil)) + recorded.CodeIs(200) + + want := "{\"latest\":true,\"peers\":[\"127.0.0.1:8080\"]}" + got := recorded.Recorder.Body.String() + assert.Equal(t, want, got) + } +} + +func TestCtlV1Metaz(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/meta/metas", MetazHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/meta/metas", nil)) + recorded.CodeIs(200) + + prefix := "{\"metas\":{\"backend.json\":\"{\\n\\t\\\"backup\\\": null,\\n\\t\\\"backends" + got := strings.HasPrefix(recorded.Recorder.Body.String(), prefix) + assert.True(t, got) + } +} diff --git a/src/ctl/v1/peer.go b/src/ctl/v1/peer.go new file mode 100644 index 00000000..f31885ea --- /dev/null +++ b/src/ctl/v1/peer.go @@ -0,0 +1,86 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "net/http" + "proxy" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/xelabs/go-mysqlstack/xlog" +) + +type peerParams struct { + Address string `json:"address"` +} + +// AddPeerHandler impl. +func AddPeerHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + addPeerHandler(log, proxy, w, r) + } + return f +} + +func addPeerHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + syncer := proxy.Syncer() + p := peerParams{} + err := r.DecodeJsonPayload(&p) + if err != nil { + log.Error("api.v1.add.peer.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + log.Warning("api.v1.add.peer[%+v].from[%v]", p, r.RemoteAddr) + + if err := syncer.AddPeer(p.Address); err != nil { + log.Error("api.v1.add.peer[%+v].error:%+v", p, err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +// RemovePeerHandler impl. +func RemovePeerHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + removePeerHandler(log, proxy, w, r) + } + return f +} + +func removePeerHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + syncer := proxy.Syncer() + p := peerParams{} + err := r.DecodeJsonPayload(&p) + if err != nil { + log.Error("api.v1.remove.peer.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + log.Warning("api.v1.remove.peer[%+v].from[%v]", p, r.RemoteAddr) + + if err := syncer.RemovePeer(p.Address); err != nil { + log.Error("api.v1.remove.peer[%+v].error:%+v", p, err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +// PeerzHandler impl. +func PeerzHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + peerzHandler(log, proxy, w, r) + } + return f +} + +func peerzHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + syncer := proxy.Syncer() + w.WriteJson(syncer.Peers()) +} diff --git a/src/ctl/v1/peer_test.go b/src/ctl/v1/peer_test.go new file mode 100644 index 00000000..6001e170 --- /dev/null +++ b/src/ctl/v1/peer_test.go @@ -0,0 +1,128 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + "testing" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/ant0ine/go-json-rest/rest/test" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestCtlV1PeerAdd(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/peer/add", AddPeerHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + { + p := &peerParams{ + Address: "192.168.0.1:3306", + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/peer/add", p)) + recorded.CodeIs(200) + } +} + +func TestCtlV1PeerAddError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/peer/add", AddPeerHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // 500. + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/peer/add", nil)) + recorded.CodeIs(500) + } + + { + p := &peerParams{} + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/peer/add", p)) + recorded.CodeIs(500) + } +} + +func TestCtlV1PeerRemove(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/peer/remove", RemovePeerHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + p := &peerParams{ + Address: "192.168.0.1:3306", + } + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/peer/remove", p)) + recorded.CodeIs(200) + } +} + +func TestCtlV1Peers(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/peer/add", AddPeerHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + { + p := &peerParams{ + Address: "192.168.0.1:3306", + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/peer/add", p)) + recorded.CodeIs(200) + } + + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/peer/peerz", PeerzHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/peer/peerz", nil)) + recorded.CodeIs(200) + + want := "[\"127.0.0.1:8080\",\"192.168.0.1:3306\"]" + got := recorded.Recorder.Body.String() + log.Debug(got) + assert.Equal(t, want, got) + } +} diff --git a/src/ctl/v1/ping.go b/src/ctl/v1/ping.go new file mode 100644 index 00000000..5badda3d --- /dev/null +++ b/src/ctl/v1/ping.go @@ -0,0 +1,33 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "net/http" + "proxy" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/xelabs/go-mysqlstack/xlog" +) + +// PingHandler impl. +func PingHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + pingHandler(log, proxy, w, r) + } + return f +} + +func pingHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + spanner := proxy.Spanner() + if _, err := spanner.ExecuteScatter("select 1"); err != nil { + log.Error("api.v1.ping.error:%+v", err) + rest.Error(w, err.Error(), http.StatusServiceUnavailable) + } +} diff --git a/src/ctl/v1/ping_test.go b/src/ctl/v1/ping_test.go new file mode 100644 index 00000000..27a97395 --- /dev/null +++ b/src/ctl/v1/ping_test.go @@ -0,0 +1,76 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "errors" + "proxy" + "testing" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/ant0ine/go-json-rest/rest/test" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestCtlV1Ping(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // fakedbs. + { + fakedbs.AddQueryPattern("select .*", &sqltypes.Result{}) + } + + { + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/radon/ping", PingHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // client + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/radon/ping", nil)) + recorded.CodeIs(200) + } +} + +func TestCtlV1PingError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // fakedbs. + { + fakedbs.AddQueryError("select 1", errors.New("mock.ping.error")) + } + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/radon/ping", PingHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // 405. + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/radon/ping", nil)) + recorded.CodeIs(405) + } + + // 503. + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/radon/ping", nil)) + recorded.CodeIs(503) + } +} diff --git a/src/ctl/v1/processlist.go b/src/ctl/v1/processlist.go new file mode 100644 index 00000000..7d753361 --- /dev/null +++ b/src/ctl/v1/processlist.go @@ -0,0 +1,55 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/xelabs/go-mysqlstack/xlog" +) + +// ProcesslistHandler impl. +func ProcesslistHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + processlistHandler(log, proxy, w, r) + } + return f +} + +func processlistHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + type processlist struct { + ID uint32 `json:"id"` + User string `json:"user"` + Host string `json:"host"` + DB string `json:"db"` + Command string `json:"command"` + Time uint32 `json:"time"` + State string `json:"state"` + Info string `json:"info"` + } + + var rsp []processlist + sessions := proxy.Sessions() + rows := sessions.Snapshot() + for _, sr := range rows { + r := processlist{ + ID: sr.ID, + User: sr.User, + Host: sr.Host, + DB: sr.DB, + Command: sr.Command, + Time: sr.Time, + State: sr.State, + Info: sr.Info, + } + rsp = append(rsp, r) + } + w.WriteJson(rsp) +} diff --git a/src/ctl/v1/processlist_test.go b/src/ctl/v1/processlist_test.go new file mode 100644 index 00000000..61e16ac5 --- /dev/null +++ b/src/ctl/v1/processlist_test.go @@ -0,0 +1,86 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + "strings" + "sync" + "testing" + "time" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/ant0ine/go-json-rest/rest/test" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestCtlV1Processlist(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("select .*", &sqltypes.Result{}) + fakedbs.AddQueryDelay("select * from test.t1_0000 as t1", &sqltypes.Result{}, 1000) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + var wg sync.WaitGroup + { + wg.Add(2) + go func() { + defer wg.Done() + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "select * from test.t1" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + }() + go func() { + defer wg.Done() + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "select * from test.t1" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + }() + } + time.Sleep(time.Millisecond * 100) + + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/debug/processlist", ProcesslistHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/debug/processlist", nil)) + recorded.CodeIs(200) + + got := recorded.Recorder.Body.String() + log.Debug(got) + assert.True(t, strings.Contains(got, "select * from test.t1")) + } + wg.Wait() +} diff --git a/src/ctl/v1/queryz.go b/src/ctl/v1/queryz.go new file mode 100644 index 00000000..4d36ca6a --- /dev/null +++ b/src/ctl/v1/queryz.go @@ -0,0 +1,61 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + "strconv" + "time" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/xelabs/go-mysqlstack/xlog" +) + +// QueryzHandler impl. +func QueryzHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + queryzHandler(log, proxy, w, r) + } + return f +} + +func queryzHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + type query struct { + ConnID uint64 `json:"connID"` + Host string `json:"host"` + Start time.Time `json:"start"` + Duration time.Duration `json:"duration"` + Color string `json:"color"` + Query string `json:"query"` + } + + limit := 100 + if v, err := strconv.Atoi(r.PathParam("limit")); err == nil { + limit = v + } + + var rsp []query + scatter := proxy.Scatter() + rows := scatter.Queryz().GetQueryzRows() + for i, row := range rows { + if i >= limit { + break + } + r := query{ + ConnID: uint64(row.ConnID), + Host: row.Address, + Start: row.Start, + Duration: row.Duration, + Color: row.Color, + Query: row.Query, + } + rsp = append(rsp, r) + } + w.WriteJson(rsp) +} diff --git a/src/ctl/v1/queryz_test.go b/src/ctl/v1/queryz_test.go new file mode 100644 index 00000000..57ca9410 --- /dev/null +++ b/src/ctl/v1/queryz_test.go @@ -0,0 +1,81 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + "strings" + "sync" + "testing" + "time" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/ant0ine/go-json-rest/rest/test" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestCtlV1Queryz(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("select .*", &sqltypes.Result{}) + fakedbs.AddQueryDelay("select * from test.t1_0014 as t1", &sqltypes.Result{}, 1000) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + var wg sync.WaitGroup + { + n := 2 + wg.Add(n) + for i := 0; i < n; i++ { + go func() { + defer wg.Done() + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "select * from test.t1" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + }() + } + } + time.Sleep(time.Millisecond * 100) + + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/debug/queryz/:limit", QueryzHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/debug/queryz/3", nil)) + recorded.CodeIs(200) + + got := recorded.Recorder.Body.String() + log.Debug(got) + assert.True(t, strings.Contains(got, "connID")) + } + wg.Wait() +} diff --git a/src/ctl/v1/radon.go b/src/ctl/v1/radon.go new file mode 100644 index 00000000..d8570604 --- /dev/null +++ b/src/ctl/v1/radon.go @@ -0,0 +1,182 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "net/http" + "proxy" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/xelabs/go-mysqlstack/xlog" +) + +type radonParams struct { + MaxConnections *int `json:"max-connections"` + MaxResultSize *int `json:"max-result-size"` + DDLTimeout *int `json:"ddl-timeout"` + QueryTimeout *int `json:"query-timeout"` + TwoPCEnable *bool `json:"twopc-enable"` + AllowIP []string `json:"allowip,omitempty"` + AuditMode *string `json:"audit-mode"` +} + +// RadonConfigHandler impl. +func RadonConfigHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + radonConfigHandler(log, proxy, w, r) + } + return f +} + +func radonConfigHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + p := radonParams{} + err := r.DecodeJsonPayload(&p) + if err != nil { + log.Error("api.v1.radon.config.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + log.Warning("api.v1.radon[from:%v].body:%+v", r.RemoteAddr, p) + if p.MaxConnections != nil { + proxy.SetMaxConnections(*p.MaxConnections) + } + if p.MaxResultSize != nil { + proxy.SetMaxResultSize(*p.MaxResultSize) + } + if p.DDLTimeout != nil { + proxy.SetDDLTimeout(*p.DDLTimeout) + } + if p.QueryTimeout != nil { + proxy.SetQueryTimeout(*p.QueryTimeout) + } + if p.TwoPCEnable != nil { + proxy.SetTwoPC(*p.TwoPCEnable) + } + proxy.SetAllowIP(p.AllowIP) + if p.AuditMode != nil { + proxy.SetAuditMode(*p.AuditMode) + } + + // reset the allow ip table list. + proxy.IPTable().Refresh() + + // write to file. + if err := proxy.FlushConfig(); err != nil { + log.Error("api.v1.radon.flush.config.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +type readonlyParams struct { + ReadOnly bool `json:"readonly"` +} + +// ReadonlyHandler impl. +func ReadonlyHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + readonlyHandler(log, proxy, w, r) + } + return f +} + +func readonlyHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + p := readonlyParams{} + err := r.DecodeJsonPayload(&p) + if err != nil { + log.Error("api.v1.readonly.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + log.Warning("api.v1.readonly[from:%v].body:%+v", r.RemoteAddr, p) + proxy.SetReadOnly(p.ReadOnly) +} + +type twopcParams struct { + Twopc bool `json:"twopc"` +} + +// TwopcHandler impl. +func TwopcHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + twopcHandler(log, proxy, w, r) + } + return f +} + +func twopcHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + p := twopcParams{} + err := r.DecodeJsonPayload(&p) + if err != nil { + log.Error("api.v1.twopc.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + log.Warning("api.v1.twopc[from:%v].body:%+v", r.RemoteAddr, p) + proxy.SetTwoPC(p.Twopc) +} + +type throttleParams struct { + Limits int `json:"limits"` +} + +// ThrottleHandler impl. +func ThrottleHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + throttleHandler(log, proxy, w, r) + } + return f +} + +func throttleHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + p := throttleParams{} + err := r.DecodeJsonPayload(&p) + if err != nil { + log.Error("api.v1.radon.throttle.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + log.Warning("api.v1.radon.throttle[from:%v].body:%+v", r.RemoteAddr, p) + proxy.SetThrottle(p.Limits) +} + +// StatusHandler impl. +func StatusHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + statusHandler(log, proxy, w, r) + } + return f +} + +func statusHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + spanner := proxy.Spanner() + type status struct { + ReadOnly bool `json:"readonly"` + } + statuz := &status{ + ReadOnly: spanner.ReadOnly(), + } + w.WriteJson(statuz) +} + +// RestApiAddressHandler impl. +func RestApiAddressHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + type resp struct { + Addr string `json:"address"` + } + rsp := &resp{Addr: proxy.PeerAddress()} + w.WriteJson(rsp) + } + return f +} diff --git a/src/ctl/v1/radon_test.go b/src/ctl/v1/radon_test.go new file mode 100644 index 00000000..bb7fd948 --- /dev/null +++ b/src/ctl/v1/radon_test.go @@ -0,0 +1,340 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + "testing" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/ant0ine/go-json-rest/rest/test" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestCtlV1RadonConfig(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + { + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Put("/v1/radon/config", RadonConfigHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + type radonParams1 struct { + MaxConnections int `json:"max-connections"` + DDLTimeout int `json:"ddl-timeout"` + QueryTimeout int `json:"query-timeout"` + TwoPCEnable bool `json:"twopc-enable"` + AllowIP []string `json:"allowip,omitempty"` + AuditMode string `json:"audit-mode"` + } + + // 200. + { + // client + p := &radonParams1{ + MaxConnections: 1023, + QueryTimeout: 33, + TwoPCEnable: true, + AllowIP: []string{"127.0.0.1", "127.0.0.2"}, + AuditMode: "A", + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("PUT", "http://localhost/v1/radon/config", p)) + recorded.CodeIs(200) + + radonConf := proxy.Config() + assert.Equal(t, 1023, radonConf.Proxy.MaxConnections) + assert.Equal(t, 1073741824, radonConf.Proxy.MaxResultSize) + assert.Equal(t, 0, radonConf.Proxy.DDLTimeout) + assert.Equal(t, 33, radonConf.Proxy.QueryTimeout) + assert.Equal(t, true, radonConf.Proxy.TwopcEnable) + assert.Equal(t, []string{"127.0.0.1", "127.0.0.2"}, radonConf.Proxy.IPS) + assert.Equal(t, "A", radonConf.Audit.Mode) + } + + // Unset AllowIP. + { + // client + p := &radonParams1{ + MaxConnections: 1023, + QueryTimeout: 33, + TwoPCEnable: true, + AuditMode: "A", + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("PUT", "http://localhost/v1/radon/config", p)) + recorded.CodeIs(200) + + radonConf := proxy.Config() + assert.Equal(t, 1023, radonConf.Proxy.MaxConnections) + assert.Equal(t, 1073741824, radonConf.Proxy.MaxResultSize) + assert.Equal(t, 0, radonConf.Proxy.DDLTimeout) + assert.Equal(t, 33, radonConf.Proxy.QueryTimeout) + assert.Equal(t, true, radonConf.Proxy.TwopcEnable) + assert.Nil(t, radonConf.Proxy.IPS) + assert.Equal(t, "A", radonConf.Audit.Mode) + } + } +} + +func TestCtlV1RadonConfigError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + { + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Put("/v1/radon/config", RadonConfigHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // 405. + { + p := &radonParams{} + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/radon/config", p)) + recorded.CodeIs(405) + } + + // 500. + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("PUT", "http://localhost/v1/radon/config", nil)) + recorded.CodeIs(500) + } + } +} + +func TestCtlV1RadonReadOnly(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + { + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Put("/v1/radon/readonly", ReadonlyHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // 200. + { + // client + p := &readonlyParams{ + ReadOnly: true, + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("PUT", "http://localhost/v1/radon/readonly", p)) + recorded.CodeIs(200) + } + } +} + +func TestCtlV1ReadOnlyError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + { + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Put("/v1/radon/readonly", ReadonlyHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // 405. + { + p := &readonlyParams{} + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/radon/readonly", p)) + recorded.CodeIs(405) + } + + // 500. + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("PUT", "http://localhost/v1/radon/readonly", nil)) + recorded.CodeIs(500) + } + } +} + +func TestCtlV1RadonTwopc(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + { + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Put("/v1/radon/twopc", TwopcHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // 200. + { + // client + p := &twopcParams{ + Twopc: true, + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("PUT", "http://localhost/v1/radon/twopc", p)) + recorded.CodeIs(200) + } + } +} + +func TestCtlV1TwopcError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + { + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Put("/v1/radon/twopc", ReadonlyHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // 405. + { + p := &twopcParams{} + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/radon/twopc", p)) + recorded.CodeIs(405) + } + + // 500. + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("PUT", "http://localhost/v1/radon/twopc", nil)) + recorded.CodeIs(500) + } + } +} + +func TestCtlV1RadonThrottle(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + { + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Put("/v1/radon/throttle", ThrottleHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // 200. + { + // client + p := &throttleParams{ + Limits: 100, + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("PUT", "http://localhost/v1/radon/throttle", p)) + recorded.CodeIs(200) + } + } +} + +func TestCtlV1RadonThrottleError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + { + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Put("/v1/radon/throttle", ThrottleHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // 405. + { + p := &throttleParams{} + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/radon/throttle", p)) + recorded.CodeIs(405) + } + + // 500. + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("PUT", "http://localhost/v1/radon/throttle", nil)) + recorded.CodeIs(500) + } + } +} + +func TestCtlV1RadonStatus(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/radon/status", StatusHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/radon/status", nil)) + recorded.CodeIs(200) + + want := "{\"readonly\":false}" + got := recorded.Recorder.Body.String() + assert.Equal(t, want, got) + } +} + +func TestCtlV1RadonApiAddress(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/radon/restapiaddress", RestApiAddressHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/radon/restapiaddress", nil)) + recorded.CodeIs(200) + } +} diff --git a/src/ctl/v1/relay.go b/src/ctl/v1/relay.go new file mode 100644 index 00000000..fa20b7af --- /dev/null +++ b/src/ctl/v1/relay.go @@ -0,0 +1,186 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "fmt" + "net/http" + "proxy" + "time" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/xelabs/go-mysqlstack/xlog" +) + +// RelayStatusHandler impl. +func RelayStatusHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + relayStatusHandler(log, proxy, w, r) + } + return f +} + +func relayStatusHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + type status struct { + Status bool `json:"status"` + MaxWorkers int32 `json:"max-workers"` + ParallelWorkers int32 `json:"parallel-workers"` + SecondBehinds int64 `json:"second-behinds"` + ParallelType int32 `json:"parallel-type"` + RelayBinlog string `json:"relay-binlog"` + RelayGTID int64 `json:"relay-gtid"` + RestartGTID int64 `json:"restart-gtid"` + Rates string `json:"rates"` + } + + spanner := proxy.Spanner() + bin := proxy.Binlog() + backupRelay := spanner.BackupRelay() + rsp := &status{ + Status: backupRelay.RelayStatus(), + MaxWorkers: backupRelay.MaxWorkers(), + ParallelWorkers: backupRelay.ParallelWorkers(), + SecondBehinds: (bin.LastGTID() - backupRelay.RelayGTID()) / int64(time.Second), + ParallelType: backupRelay.ParallelType(), + RelayBinlog: backupRelay.RelayBinlog(), + RelayGTID: backupRelay.RelayGTID(), + RestartGTID: backupRelay.RestartGTID(), + Rates: backupRelay.RelayRates(), + } + w.WriteJson(rsp) +} + +// RelayInfosHandler impl. +func RelayInfosHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + relayInfosHandler(log, proxy, w, r) + } + return f +} + +func relayInfosHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + bin := proxy.Binlog() + rsp := bin.RelayInfos() + w.WriteJson(rsp) +} + +// RelayStartHandler impl. +func RelayStartHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + log.Warning("api.v1.relay.start[from:%v]", r.RemoteAddr) + backupRelay := proxy.Spanner().BackupRelay() + backupRelay.StartRelayWorker() + } + return f +} + +// RelayStopHandler impl. +func RelayStopHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + log.Warning("api.v1.relay.stop[from:%v]", r.RemoteAddr) + backupRelay := proxy.Spanner().BackupRelay() + backupRelay.StopRelayWorker() + } + return f +} + +type parallelTypeParams struct { + Type int32 `json:"type"` +} + +// RelayParallelTypeHandler impl. +func RelayParallelTypeHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + relayParallelTypeHandler(log, proxy, w, r) + } + return f +} + +func relayParallelTypeHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + p := parallelTypeParams{} + err := r.DecodeJsonPayload(&p) + if err != nil { + log.Error("api.v1.relay.parallel.type.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + log.Warning("api.v1.relay.parallel.type.body:%+v", p) + backupRelay := proxy.Spanner().BackupRelay() + backupRelay.SetParallelType(p.Type) +} + +// RelayResetHandler impl. +func RelayResetHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + relayResetHandler(log, proxy, w, r) + } + return f +} + +type resetParams struct { + GTID int64 `json:"gtid"` +} + +func relayResetHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + backupRelay := proxy.Spanner().BackupRelay() + if backupRelay.RelayStatus() { + msg := "api.v1.relay.is.running.cant.reset.gitd.please.stop.first:relay stop" + log.Error(msg) + rest.Error(w, msg, http.StatusInternalServerError) + return + } + + p := resetParams{} + err := r.DecodeJsonPayload(&p) + if err != nil { + log.Error("api.v1.relay.reset.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if p.GTID < 1514254947594569594 { + msg := fmt.Sprintf("api.v1.relay.gtid[%v].less.than[1514254947594569594].should.be.UTC().UnixNano()", p.GTID) + log.Error(msg) + rest.Error(w, msg, http.StatusInternalServerError) + } + + log.Warning("api.v1.relay.reset[from:%v].gtid:%+v", r.RemoteAddr, p) + backupRelay.ResetRelayWorker(p.GTID) +} + +// RelayWorkersHandler impl. +func RelayWorkersHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + relayWorkersHandler(log, proxy, w, r) + } + return f +} + +type workersParams struct { + Workers int `json:"workers"` +} + +func relayWorkersHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + p := workersParams{} + err := r.DecodeJsonPayload(&p) + if err != nil { + log.Error("api.v1.relay.workers.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if p.Workers < 1 || p.Workers > 1024 { + msg := fmt.Sprintf("api.v1.relay.workers[%v].not.in[1, 1024]", p.Workers) + log.Error(msg) + rest.Error(w, msg, http.StatusInternalServerError) + } + + backupRelay := proxy.Spanner().BackupRelay() + log.Warning("api.v1.relay.set.max.worker.from[%v].to[%v]", backupRelay.MaxWorkers(), p.Workers) + backupRelay.SetMaxWorkers(int32(p.Workers)) +} diff --git a/src/ctl/v1/relay_test.go b/src/ctl/v1/relay_test.go new file mode 100644 index 00000000..846c6056 --- /dev/null +++ b/src/ctl/v1/relay_test.go @@ -0,0 +1,352 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + "strings" + "sync" + "testing" + "time" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/ant0ine/go-json-rest/rest/test" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestCtlV1Relay(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.ERROR)) + fakedbs, proxy, cleanup := proxy.MockProxyWithBackup(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("insert .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + var wg sync.WaitGroup + { + n := 1000 + wg.Add(1) + go func() { + defer wg.Done() + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + for i := 0; i < n; i++ { + query := "insert into test.t1(id, b)values(1,1)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + }() + wg.Add(1) + go func() { + defer wg.Done() + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + for i := 0; i < n; i++ { + query := "insert into test.t1(id, b)values(2,2)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + }() + } + wg.Wait() + time.Sleep(time.Second) + + // Relay status. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/relay/status", RelayStatusHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/relay/status", nil)) + recorded.CodeIs(200) + + got := recorded.Recorder.Body.String() + log.Debug(got) + assert.True(t, strings.Contains(got, "true")) + } + + // Stop relay worker. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Put("/v1/relay/stop", RelayStopHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("PUT", "http://localhost/v1/relay/stop", nil)) + recorded.CodeIs(200) + } + } + + // Relay status. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/relay/status", RelayStatusHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/relay/status", nil)) + recorded.CodeIs(200) + + got := recorded.Recorder.Body.String() + log.Info(got) + assert.True(t, strings.Contains(got, "false")) + } + // Start relay worker. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Put("/v1/relay/start", RelayStartHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("PUT", "http://localhost/v1/relay/start", nil)) + recorded.CodeIs(200) + } + } + + // Relay status. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/relay/status", RelayStatusHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/relay/status", nil)) + recorded.CodeIs(200) + + got := recorded.Recorder.Body.String() + log.Info(got) + assert.True(t, strings.Contains(got, "true")) + } + + // Relay infos. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/relay/infos", RelayInfosHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/relay/infos", nil)) + recorded.CodeIs(200) + + got := recorded.Recorder.Body.String() + log.Info(got) + assert.True(t, strings.Contains(got, "SecondBehinds")) + } + + // Relay set max workers. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/relay/workers", RelayWorkersHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // client + p := &workersParams{ + Workers: 1, + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/relay/workers", p)) + recorded.CodeIs(200) + } + // Relay set max workers error. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/relay/workers", RelayWorkersHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // client + p := &workersParams{ + Workers: 0, + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/relay/workers", p)) + recorded.CodeIs(500) + } +} + +func TestCtlV1RelayReset(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.ERROR)) + fakedbs, proxy, cleanup := proxy.MockProxyWithBackup(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("insert .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + time.Sleep(time.Second) + + // Stop relay worker. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Put("/v1/relay/stop", RelayStopHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("PUT", "http://localhost/v1/relay/stop", nil)) + recorded.CodeIs(200) + } + } + + // Relay reset GTID. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/relay/reset", RelayResetHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // client + p := &resetParams{ + GTID: time.Now().UTC().UnixNano(), + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/relay/reset", p)) + recorded.CodeIs(200) + } + + // Relay reset GTID error. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/relay/reset", RelayResetHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // client + p := &resetParams{ + GTID: 2017, + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/relay/reset", p)) + recorded.CodeIs(500) + } + // Start relay worker. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Put("/v1/relay/start", RelayStartHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("PUT", "http://localhost/v1/relay/start", nil)) + recorded.CodeIs(200) + } + } + + // Relay reset GTID. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/relay/reset", RelayResetHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // client + p := &resetParams{ + GTID: time.Now().UTC().UnixNano(), + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/relay/reset", p)) + recorded.CodeIs(500) + } + +} + +func TestCtlV1RelayParallelType(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.ERROR)) + fakedbs, proxy, cleanup := proxy.MockProxyWithBackup(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("insert .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + time.Sleep(time.Second) + + // Enable. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Put("/v1/relay/paralleltype", RelayParallelTypeHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + for i := 0; i < 100; i++ { + p := ¶llelTypeParams{ + Type: int32(i % 5), + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("PUT", "http://localhost/v1/relay/paralleltype", p)) + recorded.CodeIs(200) + } + } +} diff --git a/src/ctl/v1/schemaz.go b/src/ctl/v1/schemaz.go new file mode 100644 index 00000000..7e02ab33 --- /dev/null +++ b/src/ctl/v1/schemaz.go @@ -0,0 +1,28 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/xelabs/go-mysqlstack/xlog" +) + +// SchemazHandler impl. +func SchemazHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + schemazHandler(log, proxy, w, r) + } + return f +} + +func schemazHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + w.WriteJson(proxy.Router().Schemas) +} diff --git a/src/ctl/v1/schemaz_test.go b/src/ctl/v1/schemaz_test.go new file mode 100644 index 00000000..f5f66ec8 --- /dev/null +++ b/src/ctl/v1/schemaz_test.go @@ -0,0 +1,59 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + "strings" + "testing" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/ant0ine/go-json-rest/rest/test" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestCtlV1Schemaz(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/debug/schemaz", ConfigzHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/debug/schemaz", nil)) + recorded.CodeIs(200) + + body := recorded.Recorder.Body.String() + log.Debug("----%s", body) + got := strings.Contains(body, "twopc-enable") + assert.True(t, got) + } +} diff --git a/src/ctl/v1/shard.go b/src/ctl/v1/shard.go new file mode 100644 index 00000000..b908b0d0 --- /dev/null +++ b/src/ctl/v1/shard.go @@ -0,0 +1,311 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "fmt" + "net/http" + "proxy" + "strconv" + "strings" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/xelabs/go-mysqlstack/xlog" +) + +// ShardzHandler impl. +func ShardzHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + shardzHandler(log, proxy, w, r) + } + return f +} + +func shardzHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + router := proxy.Router() + rulez := router.Rules() + w.WriteJson(rulez) +} + +// ShardBalanceAdviceHandler impl. +func ShardBalanceAdviceHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + shardBalanceAdviceHandler(log, proxy, w, r) + } + return f +} + +// shardBalanceAdviceHandler used to get the advice who will be transfered. +// The Find algothm as follows: +// 1. first to sync all 'from.databases' to 'to.databases' +// +// 2. find the max datasize backend and min datasize backend. +// 1.1 max-datasize - min.datasize > 1GB +// 1.2 transfer path is: max --> min +// +// 3. find the best table(advice-table) to tansfer: +// 2.1 max.datasize - advice-table-size > min.datasize + advice-table-size +// +// Returns: +// 1. Status:200, Body:null +// 2. Status:503 +// 3. Status:200, Body:JSON +func shardBalanceAdviceHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + scatter := proxy.Scatter() + spanner := proxy.Spanner() + backends := scatter.Backends() + + type backendSize struct { + name string + address string + size float64 + user string + passwd string + } + + // 1.Find the max and min backend. + var max, min backendSize + for _, backend := range backends { + query := "select round((sum(data_length) + sum(index_length)) / 1024/ 1024, 0) as SizeInMB from information_schema.tables" + qr, err := spanner.ExecuteOnThisBackend(backend, query) + if err != nil { + log.Error("api.v1.balance.advice.backend[%s].error:%+v", backend, err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + if len(qr.Rows) > 0 { + valStr := string(qr.Rows[0][0].Raw()) + datasize, err := strconv.ParseFloat(valStr, 64) + if err != nil { + log.Error("api.v1.balance.advice.parse.value[%s].error:%+v", valStr, err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + if datasize > max.size { + max.name = backend + max.size = datasize + } + + if min.size == 0 { + min.name = backend + min.size = datasize + } + if datasize < min.size { + min.name = backend + min.size = datasize + } + } + } + log.Warning("api.v1.balance.advice.max:[%+v], min:[%+v]", max, min) + + // 2. Try to sync all databases from max.databases to min.databases. + query := "show databases" + qr, err := spanner.ExecuteOnThisBackend(max.name, query) + if err != nil { + log.Error("api.v1.balance.advice.show.databases.from[%+v].error:%+v", max, err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + var sysDatabases = map[string]bool{ + "sys": true, + "mysql": true, + "information_schema": true, + "performance_schema": true, + } + for _, row := range qr.Rows { + db := string(row[0].Raw()) + if _, isSystem := sysDatabases[strings.ToLower(db)]; !isSystem { + query1 := fmt.Sprintf("create database if not exists `%s`", db) + if _, err := spanner.ExecuteOnThisBackend(min.name, query1); err != nil { + log.Error("api.v1.balance.advice.create.database[%s].on[%+v].error:%+v", query1, min, err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + } + log.Warning("api.v1.balance.advice.create.database[%s].on[%+v].done", query1, min) + } + } + log.Warning("api.v1.balance.advice.sync.database.done") + + // The differ must big than 256MB. + delta := float64(256) + differ := (max.size - min.size) + if differ < delta { + log.Warning("api.v1.balance.advice.return.nil.since.differ[%+vMB].less.than.%vMB", differ, delta) + w.WriteJson(nil) + return + } + + backendConfs := scatter.BackendConfigsClone() + for _, bconf := range backendConfs { + if bconf.Name == max.name { + max.address = bconf.Address + max.user = bconf.User + max.passwd = bconf.Password + } else if bconf.Name == min.name { + min.address = bconf.Address + min.user = bconf.User + min.passwd = bconf.Password + } + } + + // 3. Find the best table. + query = "SELECT table_schema, table_name, ROUND((SUM(data_length+index_length)) / 1024/ 1024, 0) AS sizeMB FROM information_schema.TABLES GROUP BY table_name HAVING SUM(data_length + index_length)>10485760 ORDER BY (data_length + index_length) DESC" + qr, err = spanner.ExecuteOnThisBackend(max.name, query) + if err != nil { + log.Error("api.v1.balance.advice.get.max[%+v].tables.error:%+v", max, err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + var tableSize float64 + var database, table string + for _, row := range qr.Rows { + db := string(row[0].Raw()) + tbl := string(row[1].Raw()) + valStr := string(row[2].Raw()) + tblSize, err := strconv.ParseFloat(valStr, 64) + if err != nil { + log.Error("api.v1.balance.advice.get.tables.parse.value[%s].error:%+v", valStr, err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // Make sure the table is small enough. + if (min.size + tblSize) < (max.size - tblSize) { + // Find the advice table. + database = db + table = tbl + tableSize = tblSize + break + } + } + + // No best. + if database == "" || table == "" { + log.Warning("api.v1.balance.advice.return.nil.since.cant.find.the.best.table") + w.WriteJson(nil) + return + } + + type balanceAdvice struct { + From string `json:"from-address"` + FromDataSize float64 `json:"from-datasize"` + FromUser string `json:"from-user"` + FromPasswd string `json:"from-password"` + To string `json:"to-address"` + ToDataSize float64 `json:"to-datasize"` + ToUser string `json:"to-user"` + ToPasswd string `json:"to-password"` + Database string `json:"database"` + Table string `json:"table"` + TableSize float64 `json:"tablesize"` + } + + advice := balanceAdvice{ + From: max.address, + FromDataSize: max.size, + FromUser: max.user, + FromPasswd: max.passwd, + To: min.address, + ToDataSize: min.size, + ToUser: min.user, + ToPasswd: min.passwd, + Database: database, + Table: table, + TableSize: tableSize, + } + log.Warning("api.v1.balance.advice.return:%+v", advice) + w.WriteJson(advice) +} + +type ruleParams struct { + Database string `json:"database"` + Table string `json:"table"` + FromAddress string `json:"from-address"` + ToAddress string `json:"to-address"` +} + +// ShardRuleShiftHandler used to shift a partition rule to another backend. +func ShardRuleShiftHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + shardRuleShiftHandler(log, proxy, w, r) + } + return f +} + +var sysDBs = []string{"information_schema", "mysql", "performance_schema", "sys"} + +func shardRuleShiftHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + router := proxy.Router() + scatter := proxy.Scatter() + p := ruleParams{} + err := r.DecodeJsonPayload(&p) + if err != nil { + log.Error("api.v1.radon.shard.rule.parse.json.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + log.Warning("api.v1.radon.shard.rule[from:%v].request:%+v", r.RemoteAddr, p) + + if p.Database == "" || p.Table == "" { + rest.Error(w, "api.v1.shard.rule.request.database.or.table.is.null", http.StatusInternalServerError) + return + } + + for _, sysDB := range sysDBs { + if sysDB == strings.ToLower(p.Database) { + log.Error("api.v1.shard.rule.database[%s].is.system", p.Database) + rest.Error(w, "api.v1.shard.rule.database.can't.be.system.database", http.StatusInternalServerError) + return + } + } + + var fromBackend, toBackend string + backends := scatter.BackendConfigsClone() + for _, backend := range backends { + if backend.Address == p.FromAddress { + fromBackend = backend.Name + } else if backend.Address == p.ToAddress { + toBackend = backend.Name + } + } + + if fromBackend == "" || toBackend == "" { + log.Error("api.v1.shard.rule.fromBackend[%s].or.toBackend[%s].is.NULL", fromBackend, toBackend) + rest.Error(w, "api.v1.shard.rule.backend.NULL", http.StatusInternalServerError) + return + } + + if err := router.PartitionRuleShift(fromBackend, toBackend, p.Database, p.Table); err != nil { + log.Error("api.v1.shard.rule.PartitionRuleShift.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +// ShardReLoadHandler impl. +func ShardReLoadHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + shardReLoadHandler(log, proxy, w, r) + } + return f +} + +func shardReLoadHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + router := proxy.Router() + log.Warning("api.shard.reload.prepare.from[%v]...", r.RemoteAddr) + if err := router.ReLoad(); err != nil { + log.Error("api.v1.shard.reload.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + log.Warning("api.shard.reload.done...") +} diff --git a/src/ctl/v1/shard_test.go b/src/ctl/v1/shard_test.go new file mode 100644 index 00000000..f758912b --- /dev/null +++ b/src/ctl/v1/shard_test.go @@ -0,0 +1,873 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + "strings" + "testing" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/ant0ine/go-json-rest/rest/test" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestCtlV1Shardz(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/shard/shardz", ShardzHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/shard/shardz", nil)) + recorded.CodeIs(200) + + want := "{\"Schemas\":[{\"DB\":\"test\",\"Tables\":[{\"Name\":\"t1\",\"ShardKey\":\"id\",\"Partition\":{\"Segments\":[{\"Table\":\"t1_0000\",\"Backend\":\"backend0\",\"Range\":{\"Start\":0,\"End\":128}},{\"Table\":\"t1_0001\",\"Backend\":\"backend0\",\"Range\":{\"Start\":128,\"End\":256}},{\"Table\":\"t1_0002\",\"Backend\":\"backend0\",\"Range\":{\"Start\":256,\"End\":384}},{\"Table\":\"t1_0003\",\"Backend\":\"backend0\",\"Range\":{\"Start\":384,\"End\":512}},{\"Table\":\"t1_0004\",\"Backend\":\"backend0\",\"Range\":{\"Start\":512,\"End\":640}},{\"Table\":\"t1_0005\",\"Backend\":\"backend0\",\"Range\":{\"Start\":640,\"End\":819}},{\"Table\":\"t1_0006\",\"Backend\":\"backend1\",\"Range\":{\"Start\":819,\"End\":947}},{\"Table\":\"t1_0007\",\"Backend\":\"backend1\",\"Range\":{\"Start\":947,\"End\":1075}},{\"Table\":\"t1_0008\",\"Backend\":\"backend1\",\"Range\":{\"Start\":1075,\"End\":1203}},{\"Table\":\"t1_0009\",\"Backend\":\"backend1\",\"Range\":{\"Start\":1203,\"End\":1331}},{\"Table\":\"t1_0010\",\"Backend\":\"backend1\",\"Range\":{\"Start\":1331,\"End\":1459}},{\"Table\":\"t1_0011\",\"Backend\":\"backend1\",\"Range\":{\"Start\":1459,\"End\":1638}},{\"Table\":\"t1_0012\",\"Backend\":\"backend2\",\"Range\":{\"Start\":1638,\"End\":1766}},{\"Table\":\"t1_0013\",\"Backend\":\"backend2\",\"Range\":{\"Start\":1766,\"End\":1894}},{\"Table\":\"t1_0014\",\"Backend\":\"backend2\",\"Range\":{\"Start\":1894,\"End\":2022}},{\"Table\":\"t1_0015\",\"Backend\":\"backend2\",\"Range\":{\"Start\":2022,\"End\":2150}},{\"Table\":\"t1_0016\",\"Backend\":\"backend2\",\"Range\":{\"Start\":2150,\"End\":2278}},{\"Table\":\"t1_0017\",\"Backend\":\"backend2\",\"Range\":{\"Start\":2278,\"End\":2457}},{\"Table\":\"t1_0018\",\"Backend\":\"backend3\",\"Range\":{\"Start\":2457,\"End\":2585}},{\"Table\":\"t1_0019\",\"Backend\":\"backend3\",\"Range\":{\"Start\":2585,\"End\":2713}},{\"Table\":\"t1_0020\",\"Backend\":\"backend3\",\"Range\":{\"Start\":2713,\"End\":2841}},{\"Table\":\"t1_0021\",\"Backend\":\"backend3\",\"Range\":{\"Start\":2841,\"End\":2969}},{\"Table\":\"t1_0022\",\"Backend\":\"backend3\",\"Range\":{\"Start\":2969,\"End\":3097}},{\"Table\":\"t1_0023\",\"Backend\":\"backend3\",\"Range\":{\"Start\":3097,\"End\":3276}},{\"Table\":\"t1_0024\",\"Backend\":\"backend4\",\"Range\":{\"Start\":3276,\"End\":3404}},{\"Table\":\"t1_0025\",\"Backend\":\"backend4\",\"Range\":{\"Start\":3404,\"End\":3532}},{\"Table\":\"t1_0026\",\"Backend\":\"backend4\",\"Range\":{\"Start\":3532,\"End\":3660}},{\"Table\":\"t1_0027\",\"Backend\":\"backend4\",\"Range\":{\"Start\":3660,\"End\":3788}},{\"Table\":\"t1_0028\",\"Backend\":\"backend4\",\"Range\":{\"Start\":3788,\"End\":3916}},{\"Table\":\"t1_0029\",\"Backend\":\"backend4\",\"Range\":{\"Start\":3916,\"End\":4096}}]}}]}]}" + got := recorded.Recorder.Body.String() + log.Debug(got) + assert.Equal(t, want, got) + } +} + +func TestCtlV1ShardBalanceAdvice1(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + rdbs := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "Databases", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("test")), + }, + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("information_schema")), + }, + }, + } + + r10 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "SizeInMB", + Type: querypb.Type_DECIMAL, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_DECIMAL, []byte("8192")), + }, + }, + } + + r11 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "SizeInMB", + Type: querypb.Type_DECIMAL, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_DECIMAL, []byte("3072")), + }, + }, + } + + r2 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "table_schema", + Type: querypb.Type_VARCHAR, + }, + { + Name: "table_name", + Type: querypb.Type_VARCHAR, + }, + { + Name: "sizeMB", + Type: querypb.Type_DECIMAL, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("test")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("t1_00001")), + sqltypes.MakeTrusted(querypb.Type_DECIMAL, []byte("6144")), + }, + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("test")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("t1_00002")), + sqltypes.MakeTrusted(querypb.Type_DECIMAL, []byte("2048")), + }, + }, + } + + // fakedbs. + { + fakedbs.AddQuery("show databases", rdbs) + fakedbs.AddQuery("create database if not exists `test`", &sqltypes.Result{}) + fakedbs.AddQuerys("select round((sum(data_length) + sum(index_length)) / 1024/ 1024, 0) as sizeinmb from information_schema.tables", r10, r11) + fakedbs.AddQuery("SELECT table_schema, table_name, ROUND((SUM(data_length+index_length)) / 1024/ 1024, 0) AS sizeMB FROM information_schema.TABLES GROUP BY table_name HAVING SUM(data_length + index_length)>10485760 ORDER BY (data_length + index_length) DESC", r2) + } + + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/shard/balanceadvice", ShardBalanceAdviceHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/shard/balanceadvice", nil)) + recorded.CodeIs(200) + + got := recorded.Recorder.Body.String() + log.Debug(got) + assert.True(t, strings.Contains(got, `"to-datasize":3072,"to-user":"mock","to-password":"pwd","database":"test","table":"t1_00002","tablesize":2048`)) + } +} + +func TestCtlV1ShardBalanceAdviceNoBestDifferTooSmall(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + rdbs := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "Databases", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("test")), + }, + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("information_schema")), + }, + }, + } + + r10 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "SizeInMB", + Type: querypb.Type_DECIMAL, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_DECIMAL, []byte("0")), + }, + }, + } + + r11 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "SizeInMB", + Type: querypb.Type_DECIMAL, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_DECIMAL, []byte("255")), + }, + }, + } + + // fakedbs. + { + fakedbs.AddQuery("show databases", rdbs) + fakedbs.AddQuery("create database if not exists `test`", &sqltypes.Result{}) + fakedbs.AddQuerys("select round((sum(data_length) + sum(index_length)) / 1024/ 1024, 0) as sizeinmb from information_schema.tables", r10, r11) + } + + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/shard/balanceadvice", ShardBalanceAdviceHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/shard/balanceadvice", nil)) + recorded.CodeIs(200) + + got := recorded.Recorder.Body.String() + assert.Equal(t, "null", got) + } +} + +func TestCtlV1ShardBalanceAdviceNoBest(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + rdbs := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "Databases", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("test")), + }, + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("information_schema")), + }, + }, + } + + r10 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "SizeInMB", + Type: querypb.Type_DECIMAL, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_DECIMAL, []byte("8192")), + }, + }, + } + + r11 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "SizeInMB", + Type: querypb.Type_DECIMAL, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_DECIMAL, []byte("4096")), + }, + }, + } + + r2 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "table_schema", + Type: querypb.Type_VARCHAR, + }, + { + Name: "table_name", + Type: querypb.Type_VARCHAR, + }, + { + Name: "sizeMB", + Type: querypb.Type_DECIMAL, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("test")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("t1_00001")), + sqltypes.MakeTrusted(querypb.Type_DECIMAL, []byte("6144")), + }, + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("test")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("t1_00002")), + sqltypes.MakeTrusted(querypb.Type_DECIMAL, []byte("2048")), + }, + }, + } + + // fakedbs. + { + fakedbs.AddQuery("show databases", rdbs) + fakedbs.AddQuery("create database if not exists `test`", &sqltypes.Result{}) + fakedbs.AddQuerys("select round((sum(data_length) + sum(index_length)) / 1024/ 1024, 0) as sizeinmb from information_schema.tables", r10, r11) + fakedbs.AddQuery("SELECT table_schema, table_name, ROUND((SUM(data_length+index_length)) / 1024/ 1024, 0) AS sizeMB FROM information_schema.TABLES GROUP BY table_name HAVING SUM(data_length + index_length)>10485760 ORDER BY (data_length + index_length) DESC", r2) + } + + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/shard/balanceadvice", ShardBalanceAdviceHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/shard/balanceadvice", nil)) + recorded.CodeIs(200) + + got := recorded.Recorder.Body.String() + assert.Equal(t, "null", got) + } +} + +func TestCtlV1ShardRuleShift(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + address := proxy.Address() + scatter := proxy.Scatter() + routei := proxy.Router() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/shard/shift", ShardRuleShiftHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + var from, to string + backends := scatter.BackendConfigsClone() + for _, backend := range backends { + if backend.Name == "backend0" { + from = backend.Address + } else if backend.Name == "backend1" { + to = backend.Address + } + } + + p := &ruleParams{ + Database: "test", + Table: "t1_0000", + FromAddress: from, + ToAddress: to, + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/shard/shift", p)) + recorded.CodeIs(200) + want := `{ + "Schemas": { + "test": { + "DB": "test", + "Tables": { + "t1": { + "Name": "t1", + "ShardKey": "id", + "Partition": { + "Segments": [ + { + "Table": "t1_0000", + "Backend": "backend1", + "Range": { + "Start": 0, + "End": 128 + } + }, + { + "Table": "t1_0001", + "Backend": "backend0", + "Range": { + "Start": 128, + "End": 256 + } + }, + { + "Table": "t1_0002", + "Backend": "backend0", + "Range": { + "Start": 256, + "End": 384 + } + }, + { + "Table": "t1_0003", + "Backend": "backend0", + "Range": { + "Start": 384, + "End": 512 + } + }, + { + "Table": "t1_0004", + "Backend": "backend0", + "Range": { + "Start": 512, + "End": 640 + } + }, + { + "Table": "t1_0005", + "Backend": "backend0", + "Range": { + "Start": 640, + "End": 819 + } + }, + { + "Table": "t1_0006", + "Backend": "backend1", + "Range": { + "Start": 819, + "End": 947 + } + }, + { + "Table": "t1_0007", + "Backend": "backend1", + "Range": { + "Start": 947, + "End": 1075 + } + }, + { + "Table": "t1_0008", + "Backend": "backend1", + "Range": { + "Start": 1075, + "End": 1203 + } + }, + { + "Table": "t1_0009", + "Backend": "backend1", + "Range": { + "Start": 1203, + "End": 1331 + } + }, + { + "Table": "t1_0010", + "Backend": "backend1", + "Range": { + "Start": 1331, + "End": 1459 + } + }, + { + "Table": "t1_0011", + "Backend": "backend1", + "Range": { + "Start": 1459, + "End": 1638 + } + }, + { + "Table": "t1_0012", + "Backend": "backend2", + "Range": { + "Start": 1638, + "End": 1766 + } + }, + { + "Table": "t1_0013", + "Backend": "backend2", + "Range": { + "Start": 1766, + "End": 1894 + } + }, + { + "Table": "t1_0014", + "Backend": "backend2", + "Range": { + "Start": 1894, + "End": 2022 + } + }, + { + "Table": "t1_0015", + "Backend": "backend2", + "Range": { + "Start": 2022, + "End": 2150 + } + }, + { + "Table": "t1_0016", + "Backend": "backend2", + "Range": { + "Start": 2150, + "End": 2278 + } + }, + { + "Table": "t1_0017", + "Backend": "backend2", + "Range": { + "Start": 2278, + "End": 2457 + } + }, + { + "Table": "t1_0018", + "Backend": "backend3", + "Range": { + "Start": 2457, + "End": 2585 + } + }, + { + "Table": "t1_0019", + "Backend": "backend3", + "Range": { + "Start": 2585, + "End": 2713 + } + }, + { + "Table": "t1_0020", + "Backend": "backend3", + "Range": { + "Start": 2713, + "End": 2841 + } + }, + { + "Table": "t1_0021", + "Backend": "backend3", + "Range": { + "Start": 2841, + "End": 2969 + } + }, + { + "Table": "t1_0022", + "Backend": "backend3", + "Range": { + "Start": 2969, + "End": 3097 + } + }, + { + "Table": "t1_0023", + "Backend": "backend3", + "Range": { + "Start": 3097, + "End": 3276 + } + }, + { + "Table": "t1_0024", + "Backend": "backend4", + "Range": { + "Start": 3276, + "End": 3404 + } + }, + { + "Table": "t1_0025", + "Backend": "backend4", + "Range": { + "Start": 3404, + "End": 3532 + } + }, + { + "Table": "t1_0026", + "Backend": "backend4", + "Range": { + "Start": 3532, + "End": 3660 + } + }, + { + "Table": "t1_0027", + "Backend": "backend4", + "Range": { + "Start": 3660, + "End": 3788 + } + }, + { + "Table": "t1_0028", + "Backend": "backend4", + "Range": { + "Start": 3788, + "End": 3916 + } + }, + { + "Table": "t1_0029", + "Backend": "backend4", + "Range": { + "Start": 3916, + "End": 4096 + } + } + ] + } + } + } + } + } +}` + got := routei.JSON() + assert.Equal(t, want, got) + } +} + +func TestCtlV1ShardRuleShiftError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + address := proxy.Address() + scatter := proxy.Scatter() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // database is NULL. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/shard/shift", ShardRuleShiftHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + p := &ruleParams{ + Database: "", + Table: "t1_0000", + FromAddress: "", + ToAddress: "", + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/shard/shift", p)) + recorded.CodeIs(500) + + want := "{\"Error\":\"api.v1.shard.rule.request.database.or.table.is.null\"}" + got := recorded.Recorder.Body.String() + assert.Equal(t, want, got) + } + + // database is system. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/shard/shift", ShardRuleShiftHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + p := &ruleParams{ + Database: "mysql", + Table: "t1_0000", + FromAddress: "", + ToAddress: "", + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/shard/shift", p)) + recorded.CodeIs(500) + + want := "{\"Error\":\"api.v1.shard.rule.database.can't.be.system.database\"}" + got := recorded.Recorder.Body.String() + assert.Equal(t, want, got) + } + + // from is NULL. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/shard/shift", ShardRuleShiftHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + p := &ruleParams{ + Database: "test", + Table: "t1_0000", + FromAddress: "", + ToAddress: "", + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/shard/shift", p)) + recorded.CodeIs(500) + + want := "{\"Error\":\"api.v1.shard.rule.backend.NULL\"}" + got := recorded.Recorder.Body.String() + assert.Equal(t, want, got) + } + + // from equals to. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/shard/shift", ShardRuleShiftHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + var from string + backends := scatter.BackendConfigsClone() + for _, backend := range backends { + if backend.Name == "backend0" { + from = backend.Address + break + } + } + + p := &ruleParams{ + Database: "test", + Table: "t1_0000", + FromAddress: from, + ToAddress: from, + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/shard/shift", p)) + recorded.CodeIs(500) + + want := "{\"Error\":\"api.v1.shard.rule.backend.NULL\"}" + got := recorded.Recorder.Body.String() + assert.Equal(t, want, got) + } + + // Tables cant find. + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/shard/shift", ShardRuleShiftHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + var from, to string + backends := scatter.BackendConfigsClone() + for _, backend := range backends { + if backend.Name == "backend0" { + from = backend.Address + } else if backend.Name == "backend1" { + to = backend.Address + } + } + + p := &ruleParams{ + Database: "test", + Table: "t1_000x", + FromAddress: from, + ToAddress: to, + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/shard/shift", p)) + recorded.CodeIs(500) + + want := "{\"Error\":\"router.rule.change.cant.found.backend[backend0]+table:[t1_000x]\"}" + got := recorded.Recorder.Body.String() + assert.Equal(t, want, got) + } +} + +func TestCtlV1ShardReLoad(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/shard/reload", ShardReLoadHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/shard/reload", nil)) + recorded.CodeIs(200) + } +} + +func TestCtlV1ShardReLoadError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/shard/reload", ShardReLoadHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/shard/reload", nil)) + recorded.CodeIs(405) + } +} diff --git a/src/ctl/v1/txnz.go b/src/ctl/v1/txnz.go new file mode 100644 index 00000000..706d9fcc --- /dev/null +++ b/src/ctl/v1/txnz.go @@ -0,0 +1,61 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + "strconv" + "time" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/xelabs/go-mysqlstack/xlog" +) + +// TxnzHandler impl. +func TxnzHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + txnzHandler(log, proxy, w, r) + } + return f +} + +func txnzHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + type txn struct { + TxnID uint64 `json:"txnid"` + Start time.Time `json:"start"` + Duration time.Duration `json:"duration"` + State string `json:"state"` + XaState string `json:"xa-state"` + Color string `json:"color"` + } + + limit := 100 + if v, err := strconv.Atoi(r.PathParam("limit")); err == nil { + limit = v + } + + var rsp []txn + scatter := proxy.Scatter() + rows := scatter.Txnz().GetTxnzRows() + for i, row := range rows { + if i >= limit { + break + } + r := txn{ + TxnID: row.TxnID, + Start: row.Start, + Duration: row.Duration, + State: row.State, + XaState: row.XaState, + Color: row.Color, + } + rsp = append(rsp, r) + } + w.WriteJson(rsp) +} diff --git a/src/ctl/v1/txnz_test.go b/src/ctl/v1/txnz_test.go new file mode 100644 index 00000000..9f629fc7 --- /dev/null +++ b/src/ctl/v1/txnz_test.go @@ -0,0 +1,87 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "proxy" + "strings" + "sync" + "testing" + "time" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/ant0ine/go-json-rest/rest/test" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestCtlV1Txnz(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("select .*", &sqltypes.Result{}) + fakedbs.AddQueryDelay("select * from test.t1_0000 as t1", &sqltypes.Result{}, 1000) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + var wg sync.WaitGroup + { + wg.Add(2) + go func() { + defer wg.Done() + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "select * from test.t1" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + }() + go func() { + defer wg.Done() + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "select * from test.t1" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + }() + } + time.Sleep(time.Millisecond * 100) + + { + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Get("/v1/debug/txnz/:limit", TxnzHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/v1/debug/txnz/3", nil)) + recorded.CodeIs(200) + + got := recorded.Recorder.Body.String() + log.Debug(got) + assert.True(t, strings.Contains(got, "txnStateExecutingNormal")) + } + wg.Wait() +} diff --git a/src/ctl/v1/user.go b/src/ctl/v1/user.go new file mode 100644 index 00000000..e0e2de42 --- /dev/null +++ b/src/ctl/v1/user.go @@ -0,0 +1,101 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "fmt" + "net/http" + "proxy" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/xelabs/go-mysqlstack/xlog" +) + +type userParams struct { + User string `json:"user"` + Password string `json:"password"` +} + +// CreateUserHandler impl. +func CreateUserHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + createUserHandler(log, proxy, w, r) + } + return f +} + +func createUserHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + spanner := proxy.Spanner() + p := userParams{} + err := r.DecodeJsonPayload(&p) + if err != nil { + log.Error("api.v1.create.user.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + log.Warning("api.v1.create.user[from:%v].[%v]", r.RemoteAddr, p) + + query := fmt.Sprintf("GRANT SELECT ON *.* TO '%s'@'localhost' IDENTIFIED BY '%s'", p.User, p.Password) + if _, err := spanner.ExecuteScatter(query); err != nil { + log.Error("api.v1.create.user[%+v].error:%+v", p, err) + rest.Error(w, err.Error(), http.StatusServiceUnavailable) + } +} + +// AlterUserHandler impl. +func AlterUserHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + alterUserHandler(log, proxy, w, r) + } + return f +} + +func alterUserHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + spanner := proxy.Spanner() + p := userParams{} + err := r.DecodeJsonPayload(&p) + if err != nil { + log.Error("api.v1.alter.user.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + log.Warning("api.v1.alter.user[from:%v].[%v]", r.RemoteAddr, p) + + query := fmt.Sprintf("ALTER USER '%s'@'localhost' IDENTIFIED BY '%s'", p.User, p.Password) + if _, err := spanner.ExecuteScatter(query); err != nil { + log.Error("api.v1.alter.user[%+v].error:%+v", p, err) + rest.Error(w, err.Error(), http.StatusServiceUnavailable) + } +} + +// DropUserHandler impl. +func DropUserHandler(log *xlog.Log, proxy *proxy.Proxy) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + dropUserHandler(log, proxy, w, r) + } + return f +} + +func dropUserHandler(log *xlog.Log, proxy *proxy.Proxy, w rest.ResponseWriter, r *rest.Request) { + spanner := proxy.Spanner() + p := userParams{} + err := r.DecodeJsonPayload(&p) + if err != nil { + log.Error("api.v1.drop.user.error:%+v", err) + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + log.Warning("api.v1.drop.user[from:%v].[%v]", r.RemoteAddr, p) + + query := fmt.Sprintf("DROP USER '%s'@'localhost'", p.User) + if _, err := spanner.ExecuteScatter(query); err != nil { + log.Error("api.v1.drop.user[%+v].error:%+v", p.User, err) + rest.Error(w, err.Error(), http.StatusServiceUnavailable) + } +} diff --git a/src/ctl/v1/user_test.go b/src/ctl/v1/user_test.go new file mode 100644 index 00000000..0cb11059 --- /dev/null +++ b/src/ctl/v1/user_test.go @@ -0,0 +1,199 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package v1 + +import ( + "errors" + "proxy" + "testing" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/ant0ine/go-json-rest/rest/test" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestCtlV1CreateUser(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // fakedbs. + { + fakedbs.AddQuery("GRANT SELECT ON *.* TO 'mock'@'localhost' IDENTIFIED BY 'pwd'", &sqltypes.Result{}) + } + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/user/add", CreateUserHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + { + p := &userParams{ + User: "mock", + Password: "pwd", + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/user/add", p)) + recorded.CodeIs(200) + } +} + +func TestCtlV1CreateUserError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // fakedbs. + { + fakedbs.AddQueryError("GRANT SELECT ON *.* TO 'mock'@'localhost' IDENTIFIED BY 'pwd'", errors.New("mock.create.user.error")) + } + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/user/add", CreateUserHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/user/add", nil)) + recorded.CodeIs(500) + } + + { + p := &userParams{ + User: "mock", + Password: "pwd", + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/user/add", p)) + recorded.CodeIs(503) + } +} + +func TestCtlV1AlterUser(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // fakedbs. + { + fakedbs.AddQuery("ALTER USER 'mock'@'localhost' IDENTIFIED BY 'pwd'", &sqltypes.Result{}) + } + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/user/update", AlterUserHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + { + p := &userParams{ + User: "mock", + Password: "pwd", + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/user/update", p)) + recorded.CodeIs(200) + } +} + +func TestCtlV1AlterUserError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // fakedbs. + { + fakedbs.AddQueryError("ALTER USER 'mock'@'localhost' IDENTIFIED BY 'pwd'", errors.New("mock.alter.user.error")) + } + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/user/update", AlterUserHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // 500. + { + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/user/update", nil)) + recorded.CodeIs(500) + } + + // 503. + { + p := &userParams{ + User: "mock", + Password: "pwd", + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/user/update", p)) + recorded.CodeIs(503) + } +} + +func TestCtlV1DropUser(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // fakedbs. + { + fakedbs.AddQueryPattern("DROP USER 'mock'@'localhost'", &sqltypes.Result{}) + } + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/user/remove", DropUserHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + { + p := &userParams{ + User: "mock", + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/user/remove", p)) + recorded.CodeIs(200) + } +} + +func TestCtlV1DropError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := proxy.MockProxy(log) + defer cleanup() + + // fakedbs. + { + fakedbs.AddQueryErrorPattern("DROP .*", errors.New("mock.drop.user.error")) + } + + // server + api := rest.NewApi() + router, _ := rest.MakeRouter( + rest.Post("/v1/user/remove", DropUserHandler(log, proxy)), + ) + api.SetApp(router) + handler := api.MakeHandler() + + // 503. + { + p := &userParams{ + User: "mock", + } + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/v1/user/remove", p)) + recorded.CodeIs(503) + } +} diff --git a/src/executor/aggregate_executor.go b/src/executor/aggregate_executor.go new file mode 100644 index 00000000..7249be03 --- /dev/null +++ b/src/executor/aggregate_executor.go @@ -0,0 +1,139 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "planner" + "xcontext" + + "github.com/xelabs/go-mysqlstack/sqlparser/depends/hack" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Executor = &AggregateExecutor{} +) + +// AggregateExecutor represents aggregate executor. +// Including: COUNT/MAX/MIN/SUM/AVG/GROUPBY. +type AggregateExecutor struct { + log *xlog.Log + plan planner.Plan +} + +// NewAggregateExecutor creates new AggregateExecutor. +func NewAggregateExecutor(log *xlog.Log, plan planner.Plan) *AggregateExecutor { + return &AggregateExecutor{ + log: log, + plan: plan, + } +} + +// Execute used to execute the executor. +func (executor *AggregateExecutor) Execute(ctx *xcontext.ResultContext) error { + rs := ctx.Results + executor.aggregate(rs) + return nil +} + +// Aggregate used to do rows-aggregator(COUNT/SUM/MIN/MAX/AVG) and grouped them into group-by fields. +func (executor *AggregateExecutor) aggregate(result *sqltypes.Result) { + var deIdxs []int + plan := executor.plan.(*planner.AggregatePlan) + if plan.Empty() { + return + } + aggrs := plan.NormalAggregators() + aggrLen := len(aggrs) + groupAggrs := plan.GroupAggregators() + + groups := make(map[string][]sqltypes.Value) + for _, row1 := range result.Rows { + keySlice := []byte{0x01} + for _, v := range groupAggrs { + keySlice = append(keySlice, row1[v.Index].Raw()...) + keySlice = append(keySlice, 0x02) + } + key := hack.String(keySlice) + if row2, ok := groups[key]; !ok { + groups[key] = row1 + } else { + if aggrLen > 0 { + groups[key] = operator(aggrs, row1)(row2) + } + } + } + + // Handle the avg operator and rebuild the results. + i := 0 + result.Rows = make([][]sqltypes.Value, len(groups)) + for _, v := range groups { + for _, aggr := range aggrs { + switch aggr.Type { + case planner.AggrTypeAvg: + v1, v2 := v[aggr.Index+1], v[aggr.Index+2] + v[aggr.Index] = sqltypes.Operator(v1, v2, sqltypes.DivFn) + deIdxs = append(deIdxs, aggr.Index+1, aggr.Index+2) + } + } + result.Rows[i] = v + i++ + } + + // sort by the first field. + if len(groups) > 0 { + result.OrderedByAsc(result.Fields[0].Name) + result.Sort() + } + + // Remove avg decompose columns. + result.RemoveColumns(deIdxs...) +} + +// aggregate supported type: SUM/COUNT/MIN/MAX/AVG. +func operator(aggrs []planner.Aggregator, x []sqltypes.Value) func([]sqltypes.Value) []sqltypes.Value { + return func(y []sqltypes.Value) []sqltypes.Value { + ret := sqltypes.Row(x).Copy() + for _, aggr := range aggrs { + switch aggr.Type { + case planner.AggrTypeSum, planner.AggrTypeCount: + v1, v2 := x[aggr.Index], y[aggr.Index] + if v1.Type() == sqltypes.Null { + ret[aggr.Index] = v2 + } else if v2.Type() == sqltypes.Null { + ret[aggr.Index] = v1 + } else { + ret[aggr.Index] = sqltypes.Operator(v1, v2, sqltypes.SumFn) + } + case planner.AggrTypeMin: + v1, v2 := x[aggr.Index], y[aggr.Index] + if v1.Type() == sqltypes.Null { + ret[aggr.Index] = v2 + } else if v2.Type() == sqltypes.Null { + ret[aggr.Index] = v1 + } else { + ret[aggr.Index] = sqltypes.Operator(v1, v2, sqltypes.MinFn) + } + case planner.AggrTypeMax: + v1, v2 := x[aggr.Index], y[aggr.Index] + if v1.Type() == sqltypes.Null { + ret[aggr.Index] = v2 + } else if v2.Type() == sqltypes.Null { + ret[aggr.Index] = v1 + } else { + ret[aggr.Index] = sqltypes.Operator(v1, v2, sqltypes.MaxFn) + } + case planner.AggrTypeAvg: + // nop + } + } + return ret + } +} diff --git a/src/executor/aggregate_executor_test.go b/src/executor/aggregate_executor_test.go new file mode 100644 index 00000000..fb0a97aa --- /dev/null +++ b/src/executor/aggregate_executor_test.go @@ -0,0 +1,325 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "backend" + "fmt" + "planner" + "router" + "testing" + "xcontext" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +func TestAggregateExecutor(t *testing.T) { + r1 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "score", + Type: querypb.Type_INT32, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("3")), + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("3")), + }, + }, + } + r2 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "score", + Type: querypb.Type_INT32, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("3")), + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("7")), + }, + }, + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableAConfig()) + assert.Nil(t, err) + + // Create scatter and query handler. + scatter, fakedbs, cleanup := backend.MockScatter(log, 10) + defer cleanup() + // sum. + fakedbs.AddQuery("select id, sum(score) as score from sbtest.A0 as A where id > 8", r1) + fakedbs.AddQuery("select id, sum(score) as score from sbtest.A2 as A where id > 8", r2) + fakedbs.AddQuery("select id, sum(score) as score from sbtest.A4 as A where id > 8", r1) + fakedbs.AddQuery("select id, sum(score) as score from sbtest.A8 as A where id > 8", r2) + + // count. + fakedbs.AddQuery("select id, count(score) as score from sbtest.A0 as A where id > 8", r1) + fakedbs.AddQuery("select id, count(score) as score from sbtest.A2 as A where id > 8", r2) + fakedbs.AddQuery("select id, count(score) as score from sbtest.A4 as A where id > 8", r1) + fakedbs.AddQuery("select id, count(score) as score from sbtest.A8 as A where id > 8", r2) + + // min. + fakedbs.AddQuery("select id, min(score) as score from sbtest.A0 as A where id > 8", r1) + fakedbs.AddQuery("select id, min(score) as score from sbtest.A2 as A where id > 8", r2) + fakedbs.AddQuery("select id, min(score) as score from sbtest.A4 as A where id > 8", r1) + fakedbs.AddQuery("select id, min(score) as score from sbtest.A8 as A where id > 8", r2) + + // max. + fakedbs.AddQuery("select id, max(score) as score from sbtest.A0 as A where id > 8", r1) + fakedbs.AddQuery("select id, max(score) as score from sbtest.A2 as A where id > 8", r2) + fakedbs.AddQuery("select id, max(score) as score from sbtest.A4 as A where id > 8", r1) + fakedbs.AddQuery("select id, max(score) as score from sbtest.A8 as A where id > 8", r2) + + querys := []string{ + "select id, sum(score) as score from A where id>8", + "select id, count(score) as score from A where id>8", + "select id, min(score) as score from A where id>8", + "select id, max(score) as score from A where id>8", + } + results := []string{ + "[[3 20]]", + "[[3 20]]", + "[[3 3]]", + "[[3 7]]", + } + + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + + plan := planner.NewSelectPlan(log, database, query, node.(*sqlparser.Select), route) + err = plan.Build() + assert.Nil(t, err) + log.Debug("plan:%+v", plan.JSON()) + + txn, err := scatter.CreateTransaction() + assert.Nil(t, err) + defer txn.Finish() + executor := NewSelectExecutor(log, plan, txn) + { + ctx := xcontext.NewResultContext() + err := executor.Execute(ctx) + assert.Nil(t, err) + want := results[i] + got := fmt.Sprintf("%v", ctx.Results.Rows) + assert.Equal(t, want, got) + log.Debug("%+v", ctx.Results) + } + } +} + +func TestAggregateAvgExecutor(t *testing.T) { + r1 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "score", + Type: querypb.Type_FLOAT32, + }, + { + Name: "sum(score)", + Type: querypb.Type_INT32, + }, + { + Name: "count(score)", + Type: querypb.Type_INT32, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_FLOAT32, []byte("0")), + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("3")), + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("1")), + }, + }, + } + + r2 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "score", + Type: querypb.Type_FLOAT32, + }, + { + Name: "sum(score)", + Type: querypb.Type_INT32, + }, + { + Name: "count(score)", + Type: querypb.Type_INT32, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_FLOAT32, []byte("0")), + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("13")), + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("3")), + }, + }, + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableAConfig()) + assert.Nil(t, err) + + // Create scatter and query handler. + scatter, fakedbs, cleanup := backend.MockScatter(log, 10) + defer cleanup() + // avg. + fakedbs.AddQuery("select avg(score) as score, sum(score), count(score) from sbtest.A0 as A where id > 8", r1) + fakedbs.AddQuery("select avg(score) as score, sum(score), count(score) from sbtest.A2 as A where id > 8", r1) + fakedbs.AddQuery("select avg(score) as score, sum(score), count(score) from sbtest.A4 as A where id > 8", r1) + fakedbs.AddQuery("select avg(score) as score, sum(score), count(score) from sbtest.A8 as A where id > 8", r2) + + querys := []string{ + "select avg(score) as score from A where id>8", + } + results := []string{ + "[[3.6666666666666665]]", + } + + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + + plan := planner.NewSelectPlan(log, database, query, node.(*sqlparser.Select), route) + err = plan.Build() + assert.Nil(t, err) + log.Debug("plan:%+v", plan.JSON()) + + txn, err := scatter.CreateTransaction() + assert.Nil(t, err) + defer txn.Finish() + executor := NewSelectExecutor(log, plan, txn) + { + ctx := xcontext.NewResultContext() + err := executor.Execute(ctx) + assert.Nil(t, err) + want := results[i] + got := fmt.Sprintf("%v", ctx.Results.Rows) + assert.Equal(t, want, got) + log.Debug("%+v", ctx.Results) + } + } +} + +func TestAggregateGroup(t *testing.T) { + r1 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "sum(score)", + Type: querypb.Type_INT32, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("1")), + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("11")), + }, + }, + } + + r2 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "sum(score)", + Type: querypb.Type_INT32, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("2")), + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("22")), + }, + }, + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableAConfig()) + assert.Nil(t, err) + + // Create scatter and query handler. + scatter, fakedbs, cleanup := backend.MockScatter(log, 10) + defer cleanup() + // avg. + fakedbs.AddQuery("select id, sum(score) from sbtest.A0 as A where id > 8 group by id", r1) + fakedbs.AddQuery("select id, sum(score) from sbtest.A2 as A where id > 8 group by id", r1) + fakedbs.AddQuery("select id, sum(score) from sbtest.A4 as A where id > 8 group by id", r2) + fakedbs.AddQuery("select id, sum(score) from sbtest.A8 as A where id > 8 group by id", r2) + + querys := []string{ + "select id, sum(score) from A where id>8 group by id", + } + results := []string{ + "[[1 22] [2 44]]", + } + + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + + plan := planner.NewSelectPlan(log, database, query, node.(*sqlparser.Select), route) + err = plan.Build() + assert.Nil(t, err) + log.Debug("plan:%+v", plan.JSON()) + + txn, err := scatter.CreateTransaction() + assert.Nil(t, err) + defer txn.Finish() + executor := NewSelectExecutor(log, plan, txn) + { + ctx := xcontext.NewResultContext() + err := executor.Execute(ctx) + assert.Nil(t, err) + want := results[i] + got := fmt.Sprintf("%v", ctx.Results.Rows) + assert.Equal(t, want, got) + log.Debug("%+v", ctx.Results) + } + } +} diff --git a/src/executor/ddl_executor.go b/src/executor/ddl_executor.go new file mode 100644 index 00000000..7524a4d7 --- /dev/null +++ b/src/executor/ddl_executor.go @@ -0,0 +1,53 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "backend" + "planner" + "xcontext" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Executor = &DDLExecutor{} +) + +// DDLExecutor represents a CREATE, ALTER, DROP executor +type DDLExecutor struct { + log *xlog.Log + plan planner.Plan + txn *backend.Txn +} + +// NewDDLExecutor creates DDL executor. +func NewDDLExecutor(log *xlog.Log, plan planner.Plan, txn *backend.Txn) *DDLExecutor { + return &DDLExecutor{ + log: log, + plan: plan, + txn: txn, + } +} + +// Execute used to execute the executor. +func (executor *DDLExecutor) Execute(ctx *xcontext.ResultContext) error { + plan := executor.plan.(*planner.DDLPlan) + reqCtx := xcontext.NewRequestContext() + reqCtx.Mode = plan.ReqMode + reqCtx.Querys = plan.Querys + reqCtx.RawQuery = plan.RawQuery + + res, err := executor.txn.Execute(reqCtx) + if err != nil { + return err + } + ctx.Results = res + return nil +} diff --git a/src/executor/ddl_executor_test.go b/src/executor/ddl_executor_test.go new file mode 100644 index 00000000..1c84d147 --- /dev/null +++ b/src/executor/ddl_executor_test.go @@ -0,0 +1,81 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "backend" + "fakedb" + "planner" + "router" + "testing" + "xcontext" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestDDLExecutor(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + // Create scatter and query handler. + scatter, fakedbs, cleanup := backend.MockScatter(log, 10) + defer cleanup() + fakedbs.AddQueryPattern("create table `sbtest`.`A.*", fakedb.Result3) + fakedbs.AddQueryPattern("create database.*", fakedb.Result3) + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableAConfig()) + assert.Nil(t, err) + + // create table + { + query := "create table A(a int)" + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + + plan := planner.NewDDLPlan(log, database, query, node.(*sqlparser.DDL), route) + err = plan.Build() + assert.Nil(t, err) + + txn, err := scatter.CreateTransaction() + assert.Nil(t, err) + defer txn.Finish() + executor := NewDDLExecutor(log, plan, txn) + { + ctx := xcontext.NewResultContext() + err := executor.Execute(ctx) + assert.Nil(t, err) + } + } + + // create database + { + query := "create database sbtest" + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + + plan := planner.NewDDLPlan(log, database, query, node.(*sqlparser.DDL), route) + err = plan.Build() + assert.Nil(t, err) + + txn, err := scatter.CreateTransaction() + assert.Nil(t, err) + defer txn.Finish() + executor := NewDDLExecutor(log, plan, txn) + { + ctx := xcontext.NewResultContext() + err := executor.Execute(ctx) + assert.Nil(t, err) + } + } +} diff --git a/src/executor/delete_executor.go b/src/executor/delete_executor.go new file mode 100644 index 00000000..700aeb59 --- /dev/null +++ b/src/executor/delete_executor.go @@ -0,0 +1,54 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "backend" + "planner" + "xcontext" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Executor = &DeleteExecutor{} +) + +// DeleteExecutor represents delete executor +type DeleteExecutor struct { + log *xlog.Log + plan planner.Plan + txn *backend.Txn +} + +// NewDeleteExecutor creates new delete executor. +func NewDeleteExecutor(log *xlog.Log, plan planner.Plan, txn *backend.Txn) *DeleteExecutor { + return &DeleteExecutor{ + log: log, + plan: plan, + txn: txn, + } +} + +// Execute used to execute the executor. +func (executor *DeleteExecutor) Execute(ctx *xcontext.ResultContext) error { + plan := executor.plan.(*planner.DeletePlan) + reqCtx := xcontext.NewRequestContext() + reqCtx.Mode = plan.ReqMode + reqCtx.TxnMode = xcontext.TxnWrite + reqCtx.Querys = plan.Querys + reqCtx.RawQuery = plan.RawQuery + + rs, err := executor.txn.Execute(reqCtx) + if err != nil { + return err + } + ctx.Results = rs + return nil +} diff --git a/src/executor/delete_executor_test.go b/src/executor/delete_executor_test.go new file mode 100644 index 00000000..25980b5d --- /dev/null +++ b/src/executor/delete_executor_test.go @@ -0,0 +1,66 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "backend" + "fakedb" + "planner" + "router" + "testing" + "xcontext" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestDeleteExecutor(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + // Create scatter and query handler. + scatter, fakedbs, cleanup := backend.MockScatter(log, 10) + defer cleanup() + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableAConfig()) + assert.Nil(t, err) + + // delete. + querys := []string{ + "delete from sbtest.A where id=1", + "delete from sbtest.A where id=1 order by xx", + "delete from sbtest.A where name='xx'", + "delete from sbtest.A where id in (1, 2,3)", + } + // Add querys. + fakedbs.AddQueryPattern("delete from sbtest..*", fakedb.Result3) + + for _, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + + plan := planner.NewDeletePlan(log, database, query, node.(*sqlparser.Delete), route) + err = plan.Build() + assert.Nil(t, err) + + txn, err := scatter.CreateTransaction() + assert.Nil(t, err) + defer txn.Finish() + executor := NewDeleteExecutor(log, plan, txn) + { + ctx := xcontext.NewResultContext() + err := executor.Execute(ctx) + assert.Nil(t, err) + } + } +} diff --git a/src/executor/executor.go b/src/executor/executor.go new file mode 100644 index 00000000..ab494bb9 --- /dev/null +++ b/src/executor/executor.go @@ -0,0 +1,94 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "backend" + "planner" + "xcontext" + + "github.com/pkg/errors" + + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +// Executor interface. +type Executor interface { + Execute(*xcontext.ResultContext) error +} + +// Tree is a container for all executors +type Tree struct { + log *xlog.Log + children []Executor + txn *backend.Txn + planTree *planner.PlanTree +} + +// NewTree creates the new execute tree. +func NewTree(log *xlog.Log, planTree *planner.PlanTree, txn *backend.Txn) *Tree { + return &Tree{ + log: log, + txn: txn, + planTree: planTree, + children: make([]Executor, 0, 16), + } +} + +// Add adds a executor to the tree +func (et *Tree) Add(executor Executor) error { + et.children = append(et.children, executor) + return nil +} + +// Execute executes all Executor.Execute +func (et *Tree) Execute() (*sqltypes.Result, error) { + // build tree + for _, plan := range et.planTree.Plans() { + switch plan.Type() { + case planner.PlanTypeDDL: + executor := NewDDLExecutor(et.log, plan, et.txn) + if err := et.Add(executor); err != nil { + return nil, err + } + case planner.PlanTypeInsert: + executor := NewInsertExecutor(et.log, plan, et.txn) + if err := et.Add(executor); err != nil { + return nil, err + } + case planner.PlanTypeDelete: + executor := NewDeleteExecutor(et.log, plan, et.txn) + if err := et.Add(executor); err != nil { + return nil, err + } + case planner.PlanTypeUpdate: + executor := NewUpdateExecutor(et.log, plan, et.txn) + if err := et.Add(executor); err != nil { + return nil, err + } + case planner.PlanTypeSelect: + executor := NewSelectExecutor(et.log, plan, et.txn) + if err := et.Add(executor); err != nil { + return nil, err + } + default: + return nil, errors.Errorf("unsupported.execute.type:%v", plan.Type()) + } + } + + // execute all + rsCtx := xcontext.NewResultContext() + for _, executor := range et.children { + if err := executor.Execute(rsCtx); err != nil { + return nil, err + } + } + return rsCtx.Results, nil +} diff --git a/src/executor/executor_test.go b/src/executor/executor_test.go new file mode 100644 index 00000000..4ecd7b47 --- /dev/null +++ b/src/executor/executor_test.go @@ -0,0 +1,104 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "backend" + "fakedb" + "planner" + "router" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestExecutor1(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + // Create scatter and query handler. + scatter, fakedbs, cleanup := backend.MockScatter(log, 10) + defer cleanup() + fakedbs.AddQueryPattern("create table sbtest.A.*", fakedb.Result3) + fakedbs.AddQueryPattern("create database.*", fakedb.Result3) + + database := "sbtest" + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableAConfig()) + assert.Nil(t, err) + + planTree := planner.NewPlanTree() + + // DDL + { + query := "create table A(a int)" + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + + plan := planner.NewDDLPlan(log, database, query, node.(*sqlparser.DDL), route) + err = planTree.Add(plan) + assert.Nil(t, err) + } + + // insert + { + query := "insert into A(a) values(1)" + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + + plan := planner.NewInsertPlan(log, database, query, node.(*sqlparser.Insert), route) + err = planTree.Add(plan) + assert.Nil(t, err) + } + + // delete + { + query := "delete from A where a=2" + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + + plan := planner.NewDeletePlan(log, database, query, node.(*sqlparser.Delete), route) + err = planTree.Add(plan) + assert.Nil(t, err) + } + + // update + { + query := "update A set a=3 where a=2" + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + + plan := planner.NewUpdatePlan(log, database, query, node.(*sqlparser.Update), route) + err = planTree.Add(plan) + assert.Nil(t, err) + } + + // update + { + query := "select * from A where a=2" + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + + plan := planner.NewSelectPlan(log, database, query, node.(*sqlparser.Select), route) + err = planTree.Add(plan) + assert.Nil(t, err) + } + + // Execute. + txn, err := scatter.CreateTransaction() + assert.Nil(t, err) + defer txn.Finish() + executorTree := NewTree(log, planTree, txn) + qr, err := executorTree.Execute() + assert.Nil(t, err) + assert.Equal(t, fakedb.Result3, qr) +} diff --git a/src/executor/insert_executor.go b/src/executor/insert_executor.go new file mode 100644 index 00000000..ce829133 --- /dev/null +++ b/src/executor/insert_executor.go @@ -0,0 +1,54 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "backend" + "planner" + "xcontext" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Executor = &InsertExecutor{} +) + +// InsertExecutor represents insert executor +type InsertExecutor struct { + log *xlog.Log + plan planner.Plan + txn *backend.Txn +} + +// NewInsertExecutor creates new insert executor. +func NewInsertExecutor(log *xlog.Log, plan planner.Plan, txn *backend.Txn) *InsertExecutor { + return &InsertExecutor{ + log: log, + plan: plan, + txn: txn, + } +} + +// Execute used to execute the executor. +func (executor *InsertExecutor) Execute(ctx *xcontext.ResultContext) error { + plan := executor.plan.(*planner.InsertPlan) + reqCtx := xcontext.NewRequestContext() + reqCtx.Mode = plan.ReqMode + reqCtx.TxnMode = xcontext.TxnWrite + reqCtx.Querys = plan.Querys + reqCtx.RawQuery = plan.RawQuery + + rs, err := executor.txn.Execute(reqCtx) + if err != nil { + return err + } + ctx.Results = rs + return nil +} diff --git a/src/executor/insert_executor_test.go b/src/executor/insert_executor_test.go new file mode 100644 index 00000000..8ceef6c9 --- /dev/null +++ b/src/executor/insert_executor_test.go @@ -0,0 +1,64 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "backend" + "fakedb" + "planner" + "router" + "testing" + "xcontext" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestInsertExecutor(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + // Create scatter and query handler. + scatter, fakedbs, cleanup := backend.MockScatter(log, 10) + defer cleanup() + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableAConfig()) + assert.Nil(t, err) + + // delete. + querys := []string{ + "insert into A(id, b, c) values(1,2,3),(23,4,5), (117,3,4)", + "insert into sbtest.A(id, b, c) values(1,2,3),(23,4,5), (117,3,4)", + } + // Add querys. + fakedbs.AddQueryPattern("insert into sbtest.A.*", fakedb.Result3) + + for _, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + + plan := planner.NewInsertPlan(log, database, query, node.(*sqlparser.Insert), route) + err = plan.Build() + assert.Nil(t, err) + + txn, err := scatter.CreateTransaction() + assert.Nil(t, err) + defer txn.Finish() + executor := NewInsertExecutor(log, plan, txn) + { + ctx := xcontext.NewResultContext() + err := executor.Execute(ctx) + assert.Nil(t, err) + } + } +} diff --git a/src/executor/join_executor.go b/src/executor/join_executor.go new file mode 100644 index 00000000..bec6170c --- /dev/null +++ b/src/executor/join_executor.go @@ -0,0 +1,39 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "planner" + "xcontext" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Executor = &JoinExecutor{} +) + +// JoinExecutor tuple. +type JoinExecutor struct { + log *xlog.Log + plan planner.Plan +} + +// NewJoinExecutor creates new join executor. +func NewJoinExecutor(log *xlog.Log, plan planner.Plan) *JoinExecutor { + return &JoinExecutor{ + log: log, + plan: plan, + } +} + +// Execute used to execute the executor. +func (executor *JoinExecutor) Execute(ctx *xcontext.ResultContext) error { + return nil +} diff --git a/src/executor/join_executor_test.go b/src/executor/join_executor_test.go new file mode 100644 index 00000000..e33cd96d --- /dev/null +++ b/src/executor/join_executor_test.go @@ -0,0 +1,16 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "testing" +) + +func TestSelecdtExecutor(t *testing.T) { +} diff --git a/src/executor/limit_executor.go b/src/executor/limit_executor.go new file mode 100644 index 00000000..eba07307 --- /dev/null +++ b/src/executor/limit_executor.go @@ -0,0 +1,42 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "planner" + "xcontext" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Executor = &LimitExecutor{} +) + +// LimitExecutor represents limit executor. +type LimitExecutor struct { + log *xlog.Log + plan planner.Plan +} + +// NewLimitExecutor creates the new limit executor. +func NewLimitExecutor(log *xlog.Log, plan planner.Plan) *LimitExecutor { + return &LimitExecutor{ + log: log, + plan: plan, + } +} + +// Execute used to execute the executor. +func (executor *LimitExecutor) Execute(ctx *xcontext.ResultContext) error { + rs := ctx.Results + plan := executor.plan.(*planner.LimitPlan) + rs.Limit(plan.Offset, plan.Limit) + return nil +} diff --git a/src/executor/limit_executor_test.go b/src/executor/limit_executor_test.go new file mode 100644 index 00000000..dfa9678e --- /dev/null +++ b/src/executor/limit_executor_test.go @@ -0,0 +1,126 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "backend" + "fmt" + "planner" + "router" + "testing" + "xcontext" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +func TestLimitExecutor(t *testing.T) { + r1 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("11")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("nice name11")), + }, + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("12")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("nice name12")), + }, + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("13")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("nice name13")), + }, + }, + } + r2 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("21")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("nice name21")), + }, + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("22")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("nice name22")), + }, + }, + } + r3 := &sqltypes.Result{} + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableAConfig()) + assert.Nil(t, err) + + // Create scatter and query handler. + scatter, fakedbs, cleanup := backend.MockScatter(log, 10) + defer cleanup() + // Add querys. + fakedbs.AddQuery("select id, name from sbtest.A0 as A where id > 8 order by id asc limit 1", r1) + fakedbs.AddQuery("select id, name from sbtest.A2 as A where id > 8 order by id asc limit 1", r2) + fakedbs.AddQuery("select id, name from sbtest.A4 as A where id > 8 order by id asc limit 1", r3) + fakedbs.AddQuery("select id, name from sbtest.A8 as A where id > 8 order by id asc limit 1", r3) + + querys := []string{ + "select id, name from A where id>8 order by id limit 1", + } + results := []string{ + "[[11 nice name11]]", + } + + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + + plan := planner.NewSelectPlan(log, database, query, node.(*sqlparser.Select), route) + err = plan.Build() + assert.Nil(t, err) + log.Debug("plan:%+v", plan.JSON()) + + txn, err := scatter.CreateTransaction() + assert.Nil(t, err) + defer txn.Finish() + executor := NewSelectExecutor(log, plan, txn) + { + ctx := xcontext.NewResultContext() + err := executor.Execute(ctx) + assert.Nil(t, err) + want := results[i] + got := fmt.Sprintf("%v", ctx.Results.Rows) + assert.Equal(t, want, got) + log.Debug("%+v", ctx.Results) + } + } +} diff --git a/src/executor/orderby_executor.go b/src/executor/orderby_executor.go new file mode 100644 index 00000000..dc9ce7f7 --- /dev/null +++ b/src/executor/orderby_executor.go @@ -0,0 +1,57 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "planner" + "xcontext" + + "github.com/pkg/errors" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Executor = &OrderByExecutor{} +) + +// OrderByExecutor represents order by executor. +type OrderByExecutor struct { + log *xlog.Log + plan planner.Plan +} + +// NewOrderByExecutor creates new orderby executor. +func NewOrderByExecutor(log *xlog.Log, plan planner.Plan) *OrderByExecutor { + return &OrderByExecutor{ + log: log, + plan: plan, + } +} + +// Execute used to execute the executor. +func (executor *OrderByExecutor) Execute(ctx *xcontext.ResultContext) error { + rs := ctx.Results + plan := executor.plan.(*planner.OrderByPlan) + + for _, orderby := range plan.OrderBys { + switch orderby.Direction { + case planner.ASC: + if err := rs.OrderedByAsc(orderby.Field); err != nil { + return errors.WithStack(err) + } + case planner.DESC: + if err := rs.OrderedByDesc(orderby.Field); err != nil { + return errors.WithStack(err) + } + } + } + rs.Sort() + return nil +} diff --git a/src/executor/orderby_executor_test.go b/src/executor/orderby_executor_test.go new file mode 100644 index 00000000..9ff9d7de --- /dev/null +++ b/src/executor/orderby_executor_test.go @@ -0,0 +1,126 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "backend" + "fmt" + "planner" + "router" + "testing" + "xcontext" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +func TestOrderByExecutor(t *testing.T) { + r1 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("3")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("z")), + }, + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("1")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("x")), + }, + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("5")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("g")), + }, + }, + } + r2 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("3")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("go")), + }, + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("51")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("lang")), + }, + }, + } + r3 := &sqltypes.Result{} + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableAConfig()) + assert.Nil(t, err) + + // Create scatter and query handler. + scatter, fakedbs, cleanup := backend.MockScatter(log, 10) + defer cleanup() + // desc + fakedbs.AddQuery("select id, name from sbtest.A0 as A where id > 8 order by id desc, name asc", r1) + fakedbs.AddQuery("select id, name from sbtest.A2 as A where id > 8 order by id desc, name asc", r2) + fakedbs.AddQuery("select id, name from sbtest.A4 as A where id > 8 order by id desc, name asc", r3) + fakedbs.AddQuery("select id, name from sbtest.A8 as A where id > 8 order by id desc, name asc", r3) + + querys := []string{ + "select id, name from A where id>8 order by id desc, name asc", + } + results := []string{ + "[[51 lang] [5 g] [3 go] [3 z] [1 x]]", + } + + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + + plan := planner.NewSelectPlan(log, database, query, node.(*sqlparser.Select), route) + err = plan.Build() + assert.Nil(t, err) + log.Debug("plan:%+v", plan.JSON()) + + txn, err := scatter.CreateTransaction() + assert.Nil(t, err) + defer txn.Finish() + executor := NewSelectExecutor(log, plan, txn) + { + ctx := xcontext.NewResultContext() + err := executor.Execute(ctx) + assert.Nil(t, err) + want := results[i] + got := fmt.Sprintf("%v", ctx.Results.Rows) + assert.Equal(t, want, got) + log.Debug("%+v", ctx.Results) + } + } +} diff --git a/src/executor/select_executor.go b/src/executor/select_executor.go new file mode 100644 index 00000000..2584b11d --- /dev/null +++ b/src/executor/select_executor.go @@ -0,0 +1,89 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "backend" + "planner" + "xcontext" + + "github.com/pkg/errors" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Executor = &SelectExecutor{} +) + +// SelectExecutor represents select executor +type SelectExecutor struct { + log *xlog.Log + plan planner.Plan + txn *backend.Txn +} + +// NewSelectExecutor creates the new select executor. +func NewSelectExecutor(log *xlog.Log, plan planner.Plan, txn *backend.Txn) *SelectExecutor { + return &SelectExecutor{ + log: log, + plan: plan, + txn: txn, + } +} + +// Execute used to execute the executor. +func (executor *SelectExecutor) Execute(ctx *xcontext.ResultContext) error { + var err error + log := executor.log + plan := executor.plan.(*planner.SelectPlan) + subPlanTree := plan.Children() + reqCtx := xcontext.NewRequestContext() + reqCtx.Mode = plan.ReqMode + reqCtx.TxnMode = xcontext.TxnRead + reqCtx.Querys = plan.Querys + reqCtx.RawQuery = plan.RawQuery + + // Execute the parent plan. + if ctx.Results, err = executor.txn.Execute(reqCtx); err != nil { + return err + } + + // Execute all the chilren plan. + if subPlanTree != nil { + for _, subPlan := range subPlanTree.Plans() { + switch subPlan.Type() { + case planner.PlanTypeJoin: + joinExecutor := NewJoinExecutor(log, subPlan) + if err := joinExecutor.Execute(ctx); err != nil { + return err + } + case planner.PlanTypeAggregate: + aggrExecutor := NewAggregateExecutor(executor.log, subPlan) + if err := aggrExecutor.Execute(ctx); err != nil { + return err + } + case planner.PlanTypeOrderby: + orderByExecutor := NewOrderByExecutor(executor.log, subPlan) + if err := orderByExecutor.Execute(ctx); err != nil { + return err + } + case planner.PlanTypeLimit: + limitExecutor := NewLimitExecutor(executor.log, subPlan) + if err := limitExecutor.Execute(ctx); err != nil { + return err + } + case planner.PlanTypeDistinct: + default: + return errors.Errorf("unsupported.execute.type:%v", plan.Type()) + } + } + } + return nil +} diff --git a/src/executor/select_executor_test.go b/src/executor/select_executor_test.go new file mode 100644 index 00000000..d4e727b8 --- /dev/null +++ b/src/executor/select_executor_test.go @@ -0,0 +1,126 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "backend" + "fmt" + "planner" + "router" + "testing" + "xcontext" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +func TestSelectExecutor(t *testing.T) { + r1 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("3")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("z")), + }, + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("1")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("x")), + }, + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("5")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("g")), + }, + }, + } + r2 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("3")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("go")), + }, + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("51")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("lang")), + }, + }, + } + r3 := &sqltypes.Result{} + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableAConfig()) + assert.Nil(t, err) + + // Create scatter and query handler. + scatter, fakedbs, cleanup := backend.MockScatter(log, 10) + defer cleanup() + // desc + fakedbs.AddQuery("select id, name from sbtest.A0 as A where id > 8 order by id desc, name asc", r1) + fakedbs.AddQuery("select id, name from sbtest.A2 as A where id > 8 order by id desc, name asc", r2) + fakedbs.AddQuery("select id, name from sbtest.A4 as A where id > 8 order by id desc, name asc", r3) + fakedbs.AddQuery("select id, name from sbtest.A8 as A where id > 8 order by id desc, name asc", r3) + + querys := []string{ + "select id, name from A where id>8 order by id desc, name asc", + } + results := []string{ + "[[51 lang] [5 g] [3 go] [3 z] [1 x]]", + } + + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + + plan := planner.NewSelectPlan(log, database, query, node.(*sqlparser.Select), route) + err = plan.Build() + assert.Nil(t, err) + log.Debug("plan:%+v", plan.JSON()) + + txn, err := scatter.CreateTransaction() + assert.Nil(t, err) + defer txn.Finish() + executor := NewSelectExecutor(log, plan, txn) + { + ctx := xcontext.NewResultContext() + err := executor.Execute(ctx) + assert.Nil(t, err) + want := results[i] + got := fmt.Sprintf("%v", ctx.Results.Rows) + assert.Equal(t, want, got) + log.Debug("%+v", ctx.Results) + } + } +} diff --git a/src/executor/update_executor.go b/src/executor/update_executor.go new file mode 100644 index 00000000..9317c5a5 --- /dev/null +++ b/src/executor/update_executor.go @@ -0,0 +1,54 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "backend" + "planner" + "xcontext" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Executor = &UpdateExecutor{} +) + +// UpdateExecutor represents update executor +type UpdateExecutor struct { + log *xlog.Log + plan planner.Plan + txn *backend.Txn +} + +// NewUpdateExecutor creates the new update executor. +func NewUpdateExecutor(log *xlog.Log, plan planner.Plan, txn *backend.Txn) *UpdateExecutor { + return &UpdateExecutor{ + log: log, + plan: plan, + txn: txn, + } +} + +// Execute used to execute the executor. +func (executor *UpdateExecutor) Execute(ctx *xcontext.ResultContext) error { + plan := executor.plan.(*planner.UpdatePlan) + reqCtx := xcontext.NewRequestContext() + reqCtx.Mode = plan.ReqMode + reqCtx.TxnMode = xcontext.TxnWrite + reqCtx.Querys = plan.Querys + reqCtx.RawQuery = plan.RawQuery + + rs, err := executor.txn.Execute(reqCtx) + if err != nil { + return err + } + ctx.Results = rs + return nil +} diff --git a/src/executor/update_executor_test.go b/src/executor/update_executor_test.go new file mode 100644 index 00000000..08b326f4 --- /dev/null +++ b/src/executor/update_executor_test.go @@ -0,0 +1,65 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package executor + +import ( + "backend" + "fakedb" + "planner" + "router" + "testing" + "xcontext" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestUpdateExecutor(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + // Create scatter and query handler. + scatter, fakedbs, cleanup := backend.MockScatter(log, 10) + defer cleanup() + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableAConfig()) + assert.Nil(t, err) + + // delete. + querys := []string{ + "update sbtest.A set val = 1 where id = 1", + "update sbtest.A set val = 1 where id = id2 and id = 1", + "update sbtest.A set val = 1 where id in (1, 2)", + } + // Add querys. + fakedbs.AddQueryPattern("update sbtest..*", fakedb.Result3) + + for _, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + + plan := planner.NewUpdatePlan(log, database, query, node.(*sqlparser.Update), route) + err = plan.Build() + assert.Nil(t, err) + + txn, err := scatter.CreateTransaction() + assert.Nil(t, err) + defer txn.Finish() + executor := NewUpdateExecutor(log, plan, txn) + { + ctx := xcontext.NewResultContext() + err := executor.Execute(ctx) + assert.Nil(t, err) + } + } +} diff --git a/src/fakedb/fakedb.go b/src/fakedb/fakedb.go new file mode 100644 index 00000000..bc56377b --- /dev/null +++ b/src/fakedb/fakedb.go @@ -0,0 +1,220 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package fakedb + +import ( + "config" + "fmt" + "sync" + + "github.com/xelabs/go-mysqlstack/driver" + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + // Result1 result. + Result1 = &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("11")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("1nice name")), + }, + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("12")), + sqltypes.NULL, + }, + }, + } + + // Result2 result. + Result2 = &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("21")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("2nice name")), + }, + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("22")), + sqltypes.NULL, + }, + }, + } + + // Result3 result. + Result3 = &sqltypes.Result{} +) + +// DB is a fake database. +type DB struct { + log *xlog.Log + mu sync.RWMutex + handler *driver.TestHandler + listeners []*driver.Listener + backendconfs []*config.BackendConfig + addrs []string +} + +// New creates a new DB. +func New(log *xlog.Log, n int) *DB { + th := driver.NewTestHandler(log) + listeners := make([]*driver.Listener, 0, 8) + addrs := make([]string, 0, 8) + backendconfs := make([]*config.BackendConfig, 0, 8) + for i := 0; i < n; i++ { + l, err := driver.MockMysqlServer(log, th) + if err != nil { + panic(err) + } + conf := &config.BackendConfig{ + Name: fmt.Sprintf("backend%d", i), + Address: l.Addr(), + User: "mock", + Password: "pwd", + DBName: "sbtest", + Charset: "utf8", + MaxConnections: 1024, + } + backendconfs = append(backendconfs, conf) + addrs = append(addrs, l.Addr()) + listeners = append(listeners, l) + } + db := &DB{ + log: log, + handler: th, + addrs: addrs, + listeners: listeners, + backendconfs: backendconfs, + } + // Add mock/mock user to mysql.user table. + db.addMockUser() + return db +} + +// Addrs used to get all address of the server. +func (db *DB) Addrs() []string { + db.mu.RLock() + defer db.mu.RUnlock() + return db.addrs +} + +// BackendConfs used to get all backend configs. +func (db *DB) BackendConfs() []*config.BackendConfig { + db.mu.RLock() + defer db.mu.RUnlock() + return db.backendconfs +} + +// Close used to close all the listeners. +func (db *DB) Close() { + db.mu.Lock() + defer db.mu.Unlock() + for _, l := range db.listeners { + l.Close() + } +} + +// AddQuery used to add a query and the return result expected. +func (db *DB) AddQuery(query string, result *sqltypes.Result) { + db.handler.AddQuery(query, result) +} + +// AddQuerys used to add a query and the return results expected. +func (db *DB) AddQuerys(query string, result ...*sqltypes.Result) { + db.handler.AddQuerys(query, result...) +} + +// AddQueryStream used to add a query and the streamly return result expected. +func (db *DB) AddQueryStream(query string, result *sqltypes.Result) { + db.handler.AddQueryStream(query, result) +} + +// AddQueryDelay used to add query and return by delay. +func (db *DB) AddQueryDelay(query string, result *sqltypes.Result, delayMS int) { + db.handler.AddQueryDelay(query, result, delayMS) +} + +// AddQueryError use to add a query and return the error expected. +func (db *DB) AddQueryError(query string, err error) { + db.handler.AddQueryError(query, err) +} + +// AddQueryPanic used to add the query with panic. +func (db *DB) AddQueryPanic(query string) { + db.handler.AddQueryPanic(query) +} + +// AddQueryPattern used to add an expected result for a set of queries. +func (db *DB) AddQueryPattern(qp string, result *sqltypes.Result) { + db.handler.AddQueryPattern(qp, result) +} + +// AddQueryErrorPattern use to add a query and return the error expected. +func (db *DB) AddQueryErrorPattern(qp string, err error) { + db.handler.AddQueryErrorPattern(qp, err) +} + +// GetQueryCalledNum returns how many times db executes a certain query. +func (db *DB) GetQueryCalledNum(query string) int { + return db.handler.GetQueryCalledNum(query) +} + +// ResetAll will reset all, including: query and query patterns. +func (db *DB) ResetAll() { + db.handler.ResetAll() +} + +// ResetPatternErrors used to reset all the error pattern. +func (db *DB) ResetPatternErrors() { + db.handler.ResetPatternErrors() +} + +// ResetErrors used to reset all the errors. +func (db *DB) ResetErrors() { + db.handler.ResetErrors() +} + +// addMockUser adds mock/mock user to mysql.user table. +func (db *DB) addMockUser() { + r1 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "authentication_string ", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("*CC86C0D547DE7603129BC1D3B98DB2242E7F744F")), + }, + }, + } + db.AddQuery("select authentication_string from mysql.user where user='mock'", r1) +} diff --git a/src/optimizer/optimizer.go b/src/optimizer/optimizer.go new file mode 100644 index 00000000..3355d75f --- /dev/null +++ b/src/optimizer/optimizer.go @@ -0,0 +1,18 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package optimizer + +import ( + "planner" +) + +// Optimizer interface. +type Optimizer interface { + BuildPlanTree() (*planner.PlanTree, error) +} diff --git a/src/optimizer/simple_optimizer.go b/src/optimizer/simple_optimizer.go new file mode 100644 index 00000000..40a2905b --- /dev/null +++ b/src/optimizer/simple_optimizer.go @@ -0,0 +1,80 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package optimizer + +import ( + "planner" + "router" + + "github.com/pkg/errors" + + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Optimizer = &SimpleOptimizer{} +) + +// SimpleOptimizer is a simple optimizer who dispatches the plans +type SimpleOptimizer struct { + log *xlog.Log + database string + query string + node sqlparser.Statement + router *router.Router +} + +// NewSimpleOptimizer creates the new simple optimizer. +func NewSimpleOptimizer(log *xlog.Log, database string, query string, node sqlparser.Statement, router *router.Router) *SimpleOptimizer { + return &SimpleOptimizer{ + log: log, + database: database, + query: query, + node: node, + router: router, + } +} + +// BuildPlanTree used to build plan trees for the query. +func (so *SimpleOptimizer) BuildPlanTree() (*planner.PlanTree, error) { + log := so.log + database := so.database + query := so.query + node := so.node + router := so.router + + plans := planner.NewPlanTree() + switch node.(type) { + case *sqlparser.DDL: + node := planner.NewDDLPlan(log, database, query, node.(*sqlparser.DDL), router) + plans.Add(node) + case *sqlparser.Insert: + node := planner.NewInsertPlan(log, database, query, node.(*sqlparser.Insert), router) + plans.Add(node) + case *sqlparser.Delete: + node := planner.NewDeletePlan(log, database, query, node.(*sqlparser.Delete), router) + plans.Add(node) + case *sqlparser.Update: + node := planner.NewUpdatePlan(log, database, query, node.(*sqlparser.Update), router) + plans.Add(node) + case *sqlparser.Select: + nod := node.(*sqlparser.Select) + selectNode := planner.NewSelectPlan(log, database, query, nod, router) + plans.Add(selectNode) + default: + return nil, errors.Errorf("optimizer.unsupported.query.type[%+v]", node) + } + + // Build plantree. + if err := plans.Build(); err != nil { + return nil, err + } + return plans, nil +} diff --git a/src/planner/aggregate_plan.go b/src/planner/aggregate_plan.go new file mode 100644 index 00000000..382b6afd --- /dev/null +++ b/src/planner/aggregate_plan.go @@ -0,0 +1,235 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "encoding/json" + "fmt" + + "github.com/pkg/errors" + + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Plan = &AggregatePlan{} +) + +// AggrType type. +type AggrType string + +const ( + // AggrTypeNull enum. + AggrTypeNull AggrType = "" + + // AggrTypeCount enum. + AggrTypeCount AggrType = "COUNT" + + // AggrTypeSum enum. + AggrTypeSum AggrType = "SUM" + + // AggrTypeMin enum. + AggrTypeMin AggrType = "MIN" + + // AggrTypeMax enum. + AggrTypeMax AggrType = "MAX" + + // AggrTypeAvg enum. + AggrTypeAvg AggrType = "AVG" + + // AggrTypeGroupBy enum. + AggrTypeGroupBy AggrType = "GROUP BY" +) + +// Aggregator tuple. +type Aggregator struct { + Field string + Index int + Type AggrType +} + +// AggregatePlan represents order-by plan. +type AggregatePlan struct { + log *xlog.Log + node *sqlparser.Select + tuples []selectTuple + rewritten sqlparser.SelectExprs + + normalAggrs []Aggregator + groupAggrs []Aggregator + + // type + typ PlanType +} + +// NewAggregatePlan used to create AggregatePlan. +func NewAggregatePlan(log *xlog.Log, node *sqlparser.Select, tuples []selectTuple) *AggregatePlan { + return &AggregatePlan{ + log: log, + node: node, + tuples: tuples, + rewritten: node.SelectExprs, + typ: PlanTypeAggregate, + } +} + +// analyze used to check the aggregator is at the support level. +// Supports: +// SUM/COUNT/MIN/MAX/AVG/GROUPBY +// Notes: +// group by fields must be in the select list, for example: +// select count(a), a from t group by a --[OK] +// select count(a) from t group by a --[ER] +func (p *AggregatePlan) analyze() error { + var nullAggrs []Aggregator + node := p.node + tuples := p.tuples + + // Check the having has expr value. + exprInHaving := false + exprInHavingStr := "" + if node.Having != nil { + _ = sqlparser.Walk(func(n sqlparser.SQLNode) (kontinue bool, err error) { + switch n.(type) { + case *sqlparser.FuncExpr: + exprInHaving = true + buf := sqlparser.NewTrackedBuffer(nil) + n.Format(buf) + exprInHavingStr = buf.String() + return false, nil + } + return true, nil + }, node.Having) + } + + if exprInHaving { + return errors.Errorf("unsupported: expr[%s].in.having.clause", exprInHavingStr) + } + + // aggregators. + k := 0 + for _, tuple := range tuples { + if tuple.distinct { + return errors.Errorf("unsupported: distinct.in.function:%+v", tuple.fn) + } + switch tuple.fn { + case "": + // non-func + nullAggrs = append(nullAggrs, Aggregator{Field: tuple.field, Index: k, Type: AggrTypeNull}) + case "sum": + p.normalAggrs = append(p.normalAggrs, Aggregator{Field: tuple.field, Index: k, Type: AggrTypeSum}) + case "count": + p.normalAggrs = append(p.normalAggrs, Aggregator{Field: tuple.field, Index: k, Type: AggrTypeCount}) + case "min": + p.normalAggrs = append(p.normalAggrs, Aggregator{Field: tuple.field, Index: k, Type: AggrTypeMin}) + case "max": + p.normalAggrs = append(p.normalAggrs, Aggregator{Field: tuple.field, Index: k, Type: AggrTypeMax}) + case "avg": + p.normalAggrs = append(p.normalAggrs, Aggregator{Field: tuple.field, Index: k, Type: AggrTypeAvg}) + p.normalAggrs = append(p.normalAggrs, Aggregator{Field: fmt.Sprintf("sum(%s)", tuple.column), Index: k + 1, Type: AggrTypeSum}) + p.normalAggrs = append(p.normalAggrs, Aggregator{Field: fmt.Sprintf("count(%s)", tuple.column), Index: k + 2, Type: AggrTypeCount}) + + avgs := decomposeAvg(&tuple) + p.rewritten = append(p.rewritten, &sqlparser.AliasedExpr{}, &sqlparser.AliasedExpr{}) + copy(p.rewritten[(k+1)+2:], p.rewritten[(k+1):]) + p.rewritten[(k + 1)] = avgs[0] + p.rewritten[(k+1)+1] = avgs[1] + k += 2 + default: + return errors.Errorf("unsupported: function:%+v", tuple.fn) + } + k++ + } + + // Groupbys. + groupbys := node.GroupBy + for _, by := range groupbys { + by1 := by.(*sqlparser.ColName) + // check: select ... from t groupby t.a + if !by1.Qualifier.IsEmpty() { + return errors.Errorf("unsupported: group.by.field[%s].have.table.name[%s].please.use.AS.keyword", by1.Name, by1.Qualifier.Name) + } + field := by1.Name.String() + // check: groupby field in select list + idx := -1 + for _, null := range nullAggrs { + if null.Field == field { + idx = null.Index + break + } + } + if idx == -1 { + return errors.Errorf("unsupported: group.by.field[%s].should.be.in.select.list", field) + } + p.groupAggrs = append(p.groupAggrs, Aggregator{Field: field, Index: idx, Type: AggrTypeGroupBy}) + } + return nil +} + +// Build used to build distributed querys. +func (p *AggregatePlan) Build() error { + return p.analyze() +} + +// Type returns the type of the plan. +func (p *AggregatePlan) Type() PlanType { + return p.typ +} + +// JSON returns the plan info. +func (p *AggregatePlan) JSON() string { + type aggrs struct { + Aggrs []Aggregator + ReWritten string + } + a := &aggrs{} + a.Aggrs = append(a.Aggrs, p.normalAggrs...) + a.Aggrs = append(a.Aggrs, p.groupAggrs...) + + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf("%v", p.rewritten) + a.ReWritten = buf.String() + + bout, err := json.MarshalIndent(a, "", "\t") + if err != nil { + return err.Error() + } + return string(bout) +} + +// Children returns the children of the plan. +func (p *AggregatePlan) Children() *PlanTree { + return nil +} + +// NormalAggregators returns the aggregators. +func (p *AggregatePlan) NormalAggregators() []Aggregator { + return p.normalAggrs +} + +// GroupAggregators returns the group aggregators. +func (p *AggregatePlan) GroupAggregators() []Aggregator { + return p.groupAggrs +} + +// ReWritten used to re-write the SelectExprs clause. +func (p *AggregatePlan) ReWritten() sqlparser.SelectExprs { + return p.rewritten +} + +// Empty retuns the aggregator number more than zero. +func (p *AggregatePlan) Empty() bool { + return (len(p.normalAggrs) == 0 && len(p.groupAggrs) == 0) +} + +// Size returns the memory size. +func (p *AggregatePlan) Size() int { + return 0 +} diff --git a/src/planner/aggregate_plan_test.go b/src/planner/aggregate_plan_test.go new file mode 100644 index 00000000..2995e962 --- /dev/null +++ b/src/planner/aggregate_plan_test.go @@ -0,0 +1,216 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestAggregatePlan(t *testing.T) { + querys := []string{ + "select 1, a, min(b), max(a), avg(a), sum(a), count(a), b as b1, avg(b), c, avg(c) from t group by a, b1, c", + } + results := []string{ + `{ + "Aggrs": [ + { + "Field": "min(b)", + "Index": 2, + "Type": "MIN" + }, + { + "Field": "max(a)", + "Index": 3, + "Type": "MAX" + }, + { + "Field": "avg(a)", + "Index": 4, + "Type": "AVG" + }, + { + "Field": "sum(a)", + "Index": 5, + "Type": "SUM" + }, + { + "Field": "count(a)", + "Index": 6, + "Type": "COUNT" + }, + { + "Field": "sum(a)", + "Index": 7, + "Type": "SUM" + }, + { + "Field": "count(a)", + "Index": 8, + "Type": "COUNT" + }, + { + "Field": "avg(b)", + "Index": 10, + "Type": "AVG" + }, + { + "Field": "sum(b)", + "Index": 11, + "Type": "SUM" + }, + { + "Field": "count(b)", + "Index": 12, + "Type": "COUNT" + }, + { + "Field": "avg(c)", + "Index": 14, + "Type": "AVG" + }, + { + "Field": "sum(c)", + "Index": 15, + "Type": "SUM" + }, + { + "Field": "count(c)", + "Index": 16, + "Type": "COUNT" + }, + { + "Field": "a", + "Index": 1, + "Type": "GROUP BY" + }, + { + "Field": "b1", + "Index": 9, + "Type": "GROUP BY" + }, + { + "Field": "c", + "Index": 13, + "Type": "GROUP BY" + } + ], + "ReWritten": "1, a, min(b), max(a), avg(a), sum(a), count(a), sum(a), count(a), b as b1, avg(b), sum(b), count(b), c, avg(c), sum(c), count(c)" +}`, + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + for i, query := range querys { + tree, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := tree.(*sqlparser.Select) + assert.Nil(t, err) + tuples, err := parserSelectExprs(node.SelectExprs) + assert.Nil(t, err) + plan := NewAggregatePlan(log, node, tuples) + // plan build + { + err := plan.Build() + assert.Nil(t, err) + want := results[i] + got := plan.JSON() + log.Debug(got) + assert.Equal(t, want, got) + assert.True(t, nil == plan.Children()) + assert.Equal(t, 13, len(plan.NormalAggregators())) + assert.Equal(t, 3, len(plan.GroupAggregators())) + assert.False(t, plan.Empty()) + } + } +} + +func TestAggregatePlanHaving(t *testing.T) { + querys := []string{ + "select age,count(*) from A group by age having a >=2", + } + results := []string{ + `{ + "Aggrs": [ + { + "Field": "count(*)", + "Index": 1, + "Type": "COUNT" + }, + { + "Field": "age", + "Index": 0, + "Type": "GROUP BY" + } + ], + "ReWritten": "age, count(*)" +}`, + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + for i, query := range querys { + tree, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := tree.(*sqlparser.Select) + assert.Nil(t, err) + tuples, err := parserSelectExprs(node.SelectExprs) + assert.Nil(t, err) + plan := NewAggregatePlan(log, node, tuples) + // plan build + { + err := plan.Build() + assert.Nil(t, err) + want := results[i] + got := plan.JSON() + log.Debug(got) + assert.Equal(t, want, got) + assert.True(t, nil == plan.Children()) + assert.Equal(t, 1, len(plan.NormalAggregators())) + assert.Equal(t, 1, len(plan.GroupAggregators())) + assert.False(t, plan.Empty()) + } + } +} + +func TestAggregatePlanUnsupported(t *testing.T) { + querys := []string{ + "select sum(a) from t group by d", + "select sum(a),d from t group by db.t.d", + "select rand(a),d from t group by a", + "select count(distinct b) from t", + "select age,count(*) from A group by age having count(*) >=2", + } + results := []string{ + "unsupported: group.by.field[d].should.be.in.select.list", + "unsupported: group.by.field[d].have.table.name[t].please.use.AS.keyword", + "unsupported: function:rand", + "unsupported: distinct.in.function:count", + "unsupported: expr[count(*)].in.having.clause", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + for i, query := range querys { + tree, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := tree.(*sqlparser.Select) + tuples, err := parserSelectExprs(node.SelectExprs) + assert.Nil(t, err) + plan := NewAggregatePlan(log, node, tuples) + // plan build + { + err := plan.Build() + + want := results[i] + got := err.Error() + assert.Equal(t, want, got) + } + } +} diff --git a/src/planner/ddl_plan.go b/src/planner/ddl_plan.go new file mode 100644 index 00000000..d60e0f46 --- /dev/null +++ b/src/planner/ddl_plan.go @@ -0,0 +1,171 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "router" + "strings" + "xcontext" + + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/hack" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Plan = &DDLPlan{} +) + +// DDLPlan represents a CREATE, ALTER, DROP or RENAME plan +type DDLPlan struct { + log *xlog.Log + + // router + router *router.Router + + // ddl ast + node *sqlparser.DDL + + // database + database string + + // raw query + RawQuery string + + // type + typ PlanType + + // mode + ReqMode xcontext.RequestMode + + // query and backend tuple + Querys []xcontext.QueryTuple +} + +// NewDDLPlan used to create DDLPlan +func NewDDLPlan(log *xlog.Log, database string, query string, node *sqlparser.DDL, router *router.Router) *DDLPlan { + return &DDLPlan{ + log: log, + node: node, + router: router, + database: database, + RawQuery: query, + typ: PlanTypeDDL, + Querys: make([]xcontext.QueryTuple, 0, 16), + } +} + +// Build used to build DDL distributed querys. +// sqlparser.DDL is a simple grammar ast, it just parses database and table name in the prefix. +func (p *DDLPlan) Build() error { + node := p.node + + // Unsupported rename operation. + switch node.Action { + case sqlparser.CreateDBStr: + p.ReqMode = xcontext.ReqScatter + return nil + default: + table := node.Table.Name.String() + database := p.database + if !node.Table.Qualifier.IsEmpty() { + database = node.Table.Qualifier.String() + } + + // Get the shard key. + shardKey, err := p.router.ShardKey(database, table) + if err != nil { + return err + } + switch node.Action { + case sqlparser.AlterDropColumnStr: + if shardKey == node.DropColumnName { + return errors.New("unsupported: cannot.drop.the.column.on.shard.key") + } + case sqlparser.AlterModifyColumnStr: + if shardKey == node.ModifyColumnDef.Name.String() { + return errors.New("unsupported: cannot.modify.the.column.on.shard.key") + } + } + + segments, err := p.router.Lookup(database, table, nil, nil) + if err != nil { + return err + } + for _, segment := range segments { + var query string + + segTable := segment.Table + if node.Table.Qualifier.IsEmpty() { + segTable = fmt.Sprintf("`%s`.`%s`", database, segTable) + rawQuery := strings.Replace(p.RawQuery, "`", "", 2) + re, _ := regexp.Compile(fmt.Sprintf(`\b(%s)\b`, table)) + query = re.ReplaceAllString(rawQuery, segTable) + } else { + segTable = fmt.Sprintf("`%s`.`%s`", database, segTable) + newTable := fmt.Sprintf("%s.%s", database, table) + rawQuery := strings.Replace(p.RawQuery, "`", "", 4) + re, _ := regexp.Compile(fmt.Sprintf(`\b(%s)\b`, newTable)) + query = re.ReplaceAllString(rawQuery, segTable) + } + + tuple := xcontext.QueryTuple{ + Query: query, + Backend: segment.Backend, + Range: segment.Range.String(), + } + p.Querys = append(p.Querys, tuple) + } + } + return nil +} + +// Type returns the type of the plan. +func (p *DDLPlan) Type() PlanType { + return p.typ +} + +// JSON returns the plan info. +func (p *DDLPlan) JSON() string { + type explain struct { + RawQuery string `json:",omitempty"` + Partitions []xcontext.QueryTuple `json:",omitempty"` + } + + // Partitions. + var parts []xcontext.QueryTuple + parts = append(parts, p.Querys...) + exp := &explain{ + RawQuery: p.RawQuery, + Partitions: parts, + } + bout, err := json.MarshalIndent(exp, "", "\t") + if err != nil { + return err.Error() + } + return hack.String(bout) +} + +// Children returns the children of the plan. +func (p *DDLPlan) Children() *PlanTree { + return nil +} + +// Size returns the memory size. +func (p *DDLPlan) Size() int { + size := len(p.RawQuery) + for _, q := range p.Querys { + size += len(q.Query) + } + return size +} diff --git a/src/planner/ddl_plan_test.go b/src/planner/ddl_plan_test.go new file mode 100644 index 00000000..92253b2a --- /dev/null +++ b/src/planner/ddl_plan_test.go @@ -0,0 +1,240 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "router" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestDDLPlan1(t *testing.T) { + results := []string{ + "{\n\t\"RawQuery\": \"create table A(a int)\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A0`(a int)\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A2`(a int)\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A4`(a int)\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A8`(a int)\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + "{\n\t\"RawQuery\": \"drop table sbtest.A\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"drop table `sbtest`.`A0`\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"drop table `sbtest`.`A2`\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"drop table `sbtest`.`A4`\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"drop table `sbtest`.`A8`\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + "{\n\t\"RawQuery\": \"alter table A engine = tokudb\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A0` engine = tokudb\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A2` engine = tokudb\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A4` engine = tokudb\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A8` engine = tokudb\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + "{\n\t\"RawQuery\": \"create index idx_a on A(a)\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"create index idx_a on `sbtest`.`A0`(a)\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create index idx_a on `sbtest`.`A2`(a)\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create index idx_a on `sbtest`.`A4`(a)\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create index idx_a on `sbtest`.`A8`(a)\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + "{\n\t\"RawQuery\": \"drop index idx_a on sbtest.A\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"drop index idx_a on `sbtest`.`A0`\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"drop index idx_a on `sbtest`.`A2`\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"drop index idx_a on `sbtest`.`A4`\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"drop index idx_a on `sbtest`.`A8`\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + "{\n\t\"RawQuery\": \"alter table A add column(b int)\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A0` add column(b int)\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A2` add column(b int)\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A4` add column(b int)\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A8` add column(b int)\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + "{\n\t\"RawQuery\": \"alter table sbtest.A add column(b int)\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A0` add column(b int)\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A2` add column(b int)\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A4` add column(b int)\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A8` add column(b int)\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + "{\n\t\"RawQuery\": \"alter table sbtest.A add column(b int, c varchar(100))\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A0` add column(b int, c varchar(100))\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A2` add column(b int, c varchar(100))\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A4` add column(b int, c varchar(100))\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A8` add column(b int, c varchar(100))\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + "{\n\t\"RawQuery\": \"alter table A modify column b int\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A0` modify column b int\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A2` modify column b int\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A4` modify column b int\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A8` modify column b int\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + "{\n\t\"RawQuery\": \"alter table A drop column b\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A0` drop column b\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A2` drop column b\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A4` drop column b\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"alter table `sbtest`.`A8` drop column b\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + "{\n\t\"RawQuery\": \"truncate table A\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"truncate table `sbtest`.`A0`\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"truncate table `sbtest`.`A2`\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"truncate table `sbtest`.`A4`\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"truncate table `sbtest`.`A8`\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + } + + querys := []string{ + "create table A(a int)", + "drop table sbtest.A", + "alter table A engine = tokudb", + "create index idx_a on A(a)", + "drop index idx_a on sbtest.A", + "alter table A add column(b int)", + "alter table sbtest.A add column(b int)", + "alter table sbtest.A add column(b int, c varchar(100))", + "alter table A modify column b int", + "alter table A drop column b", + "truncate table A", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableAConfig()) + assert.Nil(t, err) + for i, query := range querys { + log.Debug("%v", query) + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + plan := NewDDLPlan(log, database, query, node.(*sqlparser.DDL), route) + + // plan build + { + err := plan.Build() + assert.Nil(t, err) + want := results[i] + got := plan.JSON() + log.Info(got) + assert.Equal(t, want, got) + assert.True(t, nil == plan.Children()) + } + + // type + { + want := PlanTypeDDL + got := plan.Type() + assert.Equal(t, want, got) + } + } +} + +func TestDDLAlterOnShardKey(t *testing.T) { + results := []string{ + "unsupported: cannot.modify.the.column.on.shard.key", + "unsupported: cannot.drop.the.column.on.shard.key", + } + + querys := []string{ + "alter table A modify column id int", + "alter table A drop column id", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableAConfig()) + assert.Nil(t, err) + for i, query := range querys { + log.Debug("%v", query) + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + plan := NewDDLPlan(log, database, query, node.(*sqlparser.DDL), route) + + // plan build + { + err := plan.Build() + want := results[i] + got := err.Error() + assert.Equal(t, want, got) + } + } +} + +func TestDDLPlanScatter(t *testing.T) { + results := []string{ + `{ + "RawQuery": "create database A" +}`, + } + querys := []string{ + "create database A", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableAConfig()) + assert.Nil(t, err) + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + plan := NewDDLPlan(log, database, query, node.(*sqlparser.DDL), route) + + // plan build + { + err := plan.Build() + assert.Nil(t, err) + + want := results[i] + got := plan.JSON() + log.Debug(got) + assert.Equal(t, want, got) + } + } +} + +func TestDDLPlanCreateIndexWithTableNameIssue10(t *testing.T) { + results := []string{ + "{\n\t\"RawQuery\": \"create index idx_A_id on A(a)\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"create index idx_A_id on `sbtest`.`A0`(a)\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create index idx_A_id on `sbtest`.`A2`(a)\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create index idx_A_id on `sbtest`.`A4`(a)\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create index idx_A_id on `sbtest`.`A8`(a)\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + } + + querys := []string{ + "create index idx_A_id on A(a)", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableAConfig()) + assert.Nil(t, err) + for i, query := range querys { + log.Debug("%v", query) + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + plan := NewDDLPlan(log, database, query, node.(*sqlparser.DDL), route) + + // plan build + { + err := plan.Build() + assert.Nil(t, err) + want := results[i] + got := plan.JSON() + log.Info("--got:%+v", got) + assert.Equal(t, want, got) + } + + // type + { + want := PlanTypeDDL + got := plan.Type() + assert.Equal(t, want, got) + } + } +} + +func TestDDLPlanWithQuote(t *testing.T) { + results := []string{ + "{\n\t\"RawQuery\": \"create table `A`(a int)\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A0`(a int)\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A2`(a int)\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A4`(a int)\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A8`(a int)\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + "{\n\t\"RawQuery\": \"create table A(`a` int)\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A0`(a int)\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A2`(a int)\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A4`(a int)\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A8`(a int)\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + "{\n\t\"RawQuery\": \"create table A(a int)\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A0`(a int)\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A2`(a int)\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A4`(a int)\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A8`(a int)\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + "{\n\t\"RawQuery\": \"create table sbtest.A(a int)\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A0`(a int)\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A2`(a int)\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A4`(a int)\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A8`(a int)\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + "{\n\t\"RawQuery\": \"create table sbtest.`A`(a int)\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A0`(a int)\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A2`(a int)\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A4`(a int)\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A8`(a int)\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + "{\n\t\"RawQuery\": \"create table `sbtest`.A(a int)\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A0`(a int)\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A2`(a int)\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A4`(a int)\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A8`(a int)\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + "{\n\t\"RawQuery\": \"create table `sbtest`.`A`(a int)\",\n\t\"Partitions\": [\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A0`(a int)\",\n\t\t\t\"Backend\": \"backend0\",\n\t\t\t\"Range\": \"[0-2)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A2`(a int)\",\n\t\t\t\"Backend\": \"backend2\",\n\t\t\t\"Range\": \"[2-4)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A4`(a int)\",\n\t\t\t\"Backend\": \"backend4\",\n\t\t\t\"Range\": \"[4-8)\"\n\t\t},\n\t\t{\n\t\t\t\"Query\": \"create table `sbtest`.`A8`(a int)\",\n\t\t\t\"Backend\": \"backend8\",\n\t\t\t\"Range\": \"[8-4096)\"\n\t\t}\n\t]\n}", + } + + querys := []string{ + "create table `A`(a int)", + "create table A(`a` int)", + "create table A(a int)", + "create table sbtest.A(a int)", + "create table sbtest.`A`(a int)", + "create table `sbtest`.A(a int)", + "create table `sbtest`.`A`(a int)", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableAConfig()) + assert.Nil(t, err) + for i, query := range querys { + log.Debug("%v", query) + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + plan := NewDDLPlan(log, database, query, node.(*sqlparser.DDL), route) + + // plan build + { + err := plan.Build() + assert.Nil(t, err) + want := results[i] + got := plan.JSON() + assert.Equal(t, want, got) + assert.True(t, nil == plan.Children()) + } + } +} diff --git a/src/planner/delete_plan.go b/src/planner/delete_plan.go new file mode 100644 index 00000000..52ffd0b6 --- /dev/null +++ b/src/planner/delete_plan.go @@ -0,0 +1,157 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "encoding/json" + "router" + "xcontext" + + "github.com/pkg/errors" + + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/hack" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Plan = &DeletePlan{} +) + +// DeletePlan represents delete plan +type DeletePlan struct { + log *xlog.Log + + // router + router *router.Router + + // insert ast + node *sqlparser.Delete + + // database + database string + + // raw query + RawQuery string + + // type + typ PlanType + + // mode + ReqMode xcontext.RequestMode + + // query and backend tuple + Querys []xcontext.QueryTuple +} + +// NewDeletePlan used to create DeletePlan +func NewDeletePlan(log *xlog.Log, database string, query string, node *sqlparser.Delete, router *router.Router) *DeletePlan { + return &DeletePlan{ + log: log, + node: node, + router: router, + database: database, + RawQuery: query, + typ: PlanTypeDelete, + Querys: make([]xcontext.QueryTuple, 0, 16), + } +} + +// analyze used to analyze the 'delete' is at the support level. +func (p *DeletePlan) analyze() error { + node := p.node + // analyze subquery. + if hasSubquery(node) { + return errors.New("unsupported: subqueries.in.delete") + } + if node.Where == nil { + return errors.New("unsupported: missing.where.clause.in.DML") + } + return nil +} + +// Build used to build distributed querys. +func (p *DeletePlan) Build() error { + if err := p.analyze(); err != nil { + return err + } + + node := p.node + // Database. + database := p.database + if !node.Table.Qualifier.IsEmpty() { + database = node.Table.Qualifier.String() + } + table := node.Table.Name.String() + + // Sharding key. + shardkey, err := p.router.ShardKey(database, table) + if err != nil { + return err + } + + // Get the routing segments info. + segments, err := getDMLRouting(database, table, shardkey, node.Where, p.router) + if err != nil { + return err + } + + // Rewritten the query. + for _, segment := range segments { + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf("delete %vfrom %s.%s%v%v%v", node.Comments, database, segment.Table, node.Where, node.OrderBy, node.Limit) + tuple := xcontext.QueryTuple{ + Query: buf.String(), + Backend: segment.Backend, + Range: segment.Range.String(), + } + p.Querys = append(p.Querys, tuple) + } + return nil +} + +// Type returns the type of the plan. +func (p *DeletePlan) Type() PlanType { + return p.typ +} + +// JSON returns the plan info. +func (p *DeletePlan) JSON() string { + type explain struct { + RawQuery string `json:",omitempty"` + Partitions []xcontext.QueryTuple `json:",omitempty"` + } + + // Partitions. + var parts []xcontext.QueryTuple + parts = append(parts, p.Querys...) + exp := &explain{ + RawQuery: p.RawQuery, + Partitions: parts, + } + bout, err := json.MarshalIndent(exp, "", "\t") + if err != nil { + return err.Error() + } + return hack.String(bout) +} + +// Children returns the children of the plan. +func (p *DeletePlan) Children() *PlanTree { + return nil +} + +// Size returns the memory size. +func (p *DeletePlan) Size() int { + size := len(p.RawQuery) + for _, q := range p.Querys { + size += len(q.Query) + } + return size +} diff --git a/src/planner/delete_plan_test.go b/src/planner/delete_plan_test.go new file mode 100644 index 00000000..6df42d18 --- /dev/null +++ b/src/planner/delete_plan_test.go @@ -0,0 +1,179 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "router" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestDeletePlan(t *testing.T) { + results := []string{ + `{ + "RawQuery": "delete from sbtest.A where id=1", + "Partitions": [ + { + "Query": "delete from sbtest.A6 where id = 1", + "Backend": "backend6", + "Range": "[512-4096)" + } + ] +}`, + `{ + "RawQuery": "delete from sbtest.A where id=1 order by xx", + "Partitions": [ + { + "Query": "delete from sbtest.A6 where id = 1 order by xx asc", + "Backend": "backend6", + "Range": "[512-4096)" + } + ] +}`, + `{ + "RawQuery": "delete from sbtest.A where name='xx'", + "Partitions": [ + { + "Query": "delete from sbtest.A1 where name = 'xx'", + "Backend": "backend1", + "Range": "[0-32)" + }, + { + "Query": "delete from sbtest.A2 where name = 'xx'", + "Backend": "backend2", + "Range": "[32-64)" + }, + { + "Query": "delete from sbtest.A3 where name = 'xx'", + "Backend": "backend3", + "Range": "[64-96)" + }, + { + "Query": "delete from sbtest.A4 where name = 'xx'", + "Backend": "backend4", + "Range": "[96-256)" + }, + { + "Query": "delete from sbtest.A5 where name = 'xx'", + "Backend": "backend5", + "Range": "[256-512)" + }, + { + "Query": "delete from sbtest.A6 where name = 'xx'", + "Backend": "backend6", + "Range": "[512-4096)" + } + ] +}`, + `{ + "RawQuery": "delete from sbtest.A where id in (1, 2,3)", + "Partitions": [ + { + "Query": "delete from sbtest.A1 where id in (1, 2, 3)", + "Backend": "backend1", + "Range": "[0-32)" + }, + { + "Query": "delete from sbtest.A2 where id in (1, 2, 3)", + "Backend": "backend2", + "Range": "[32-64)" + }, + { + "Query": "delete from sbtest.A3 where id in (1, 2, 3)", + "Backend": "backend3", + "Range": "[64-96)" + }, + { + "Query": "delete from sbtest.A4 where id in (1, 2, 3)", + "Backend": "backend4", + "Range": "[96-256)" + }, + { + "Query": "delete from sbtest.A5 where id in (1, 2, 3)", + "Backend": "backend5", + "Range": "[256-512)" + }, + { + "Query": "delete from sbtest.A6 where id in (1, 2, 3)", + "Backend": "backend6", + "Range": "[512-4096)" + } + ] +}`, + } + querys := []string{ + "delete from sbtest.A where id=1", + "delete from sbtest.A where id=1 order by xx", + "delete from sbtest.A where name='xx'", + "delete from sbtest.A where id in (1, 2,3)", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableMConfig()) + assert.Nil(t, err) + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + plan := NewDeletePlan(log, database, query, node.(*sqlparser.Delete), route) + + // plan build + { + err := plan.Build() + assert.Nil(t, err) + got := plan.JSON() + log.Debug(got) + want := results[i] + assert.Equal(t, want, got) + assert.Equal(t, PlanTypeDelete, plan.Type()) + assert.Nil(t, plan.Children()) + } + } +} + +func TestDeleteUnsupportedPlan(t *testing.T) { + querys := []string{ + "delete from sbtest.A", + "delete from sbtest.A where id in (select id from t1)", + } + + results := []string{ + "unsupported: missing.where.clause.in.DML", + "unsupported: subqueries.in.delete", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableMConfig()) + assert.Nil(t, err) + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + plan := NewDeletePlan(log, database, query, node.(*sqlparser.Delete), route) + + // plan build + { + err := plan.Build() + want := results[i] + got := err.Error() + assert.Equal(t, want, got) + } + } +} diff --git a/src/planner/distinct_plan.go b/src/planner/distinct_plan.go new file mode 100644 index 00000000..396b8736 --- /dev/null +++ b/src/planner/distinct_plan.go @@ -0,0 +1,75 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "github.com/pkg/errors" + + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Plan = &DistinctPlan{} +) + +// DistinctPlan represents distinct plan. +type DistinctPlan struct { + log *xlog.Log + + node *sqlparser.Select + + // type + Typ PlanType +} + +// NewDistinctPlan used to create DistinctPlan. +func NewDistinctPlan(log *xlog.Log, node *sqlparser.Select) *DistinctPlan { + return &DistinctPlan{ + log: log, + node: node, + Typ: PlanTypeDistinct, + } +} + +// analyze used to check the distinct is at the support level. +// Unsupported: +// 1. all distinct clause. +func (p *DistinctPlan) analyze() error { + node := p.node + if node.Distinct != "" { + return errors.New("unsupported: distinct") + } + return nil +} + +// Build used to build distributed querys. +func (p *DistinctPlan) Build() error { + return p.analyze() +} + +// Type returns the type of the plan. +func (p *DistinctPlan) Type() PlanType { + return p.Typ +} + +// JSON returns the plan info. +func (p *DistinctPlan) JSON() string { + return "" +} + +// Children returns the children of the plan. +func (p *DistinctPlan) Children() *PlanTree { + return nil +} + +// Size returns the memory size. +func (p *DistinctPlan) Size() int { + return 0 +} diff --git a/src/planner/distinct_plan_test.go b/src/planner/distinct_plan_test.go new file mode 100644 index 00000000..6a6e30a9 --- /dev/null +++ b/src/planner/distinct_plan_test.go @@ -0,0 +1,44 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestDistinctPlan(t *testing.T) { + querys := []string{ + "select distinct(a), b from t", + } + results := []string{ + "unsupported: distinct", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + for i, query := range querys { + tree, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := tree.(*sqlparser.Select) + assert.Nil(t, err) + plan := NewDistinctPlan(log, node) + { + err := plan.Build() + want := results[i] + got := err.Error() + assert.Equal(t, want, got) + + assert.Nil(t, plan.Children()) + assert.Equal(t, "", plan.JSON()) + } + } +} diff --git a/src/planner/expr.go b/src/planner/expr.go new file mode 100644 index 00000000..9404eceb --- /dev/null +++ b/src/planner/expr.go @@ -0,0 +1,204 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "fmt" + "router" + + "github.com/pkg/errors" + + "github.com/xelabs/go-mysqlstack/sqlparser" +) + +// getDMLRouting used to get the routing from the where clause. +func getDMLRouting(database, table, shardkey string, where *sqlparser.Where, router *router.Router) ([]router.Segment, error) { + if where != nil { + filters := splitAndExpression(nil, where.Expr) + for _, filter := range filters { + comparison, ok := filter.(*sqlparser.ComparisonExpr) + if !ok { + continue + } + + // Only deal with Equal statement. + switch comparison.Operator { + case sqlparser.EqualStr: + if nameMatch(comparison.Left, shardkey) { + sqlval, ok := comparison.Right.(*sqlparser.SQLVal) + if ok { + return router.Lookup(database, table, sqlval, sqlval) + } + } + } + } + } + return router.Lookup(database, table, nil, nil) +} + +func hasSubquery(node sqlparser.SQLNode) bool { + has := false + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + if _, ok := node.(*sqlparser.Subquery); ok { + has = true + return false, errors.New("dummy") + } + return true, nil + }, node) + return has +} + +func nameMatch(node sqlparser.Expr, shardkey string) bool { + colname, ok := node.(*sqlparser.ColName) + return ok && (colname.Name.String() == shardkey) +} + +// isShardKeyChanging returns true if any of the update +// expressions modify a shardkey column. +func isShardKeyChanging(exprs sqlparser.UpdateExprs, shardkey string) bool { + for _, assignment := range exprs { + if shardkey == assignment.Name.Name.String() { + return true + } + } + return false +} + +// splitAndExpression breaks up the Expr into AND-separated conditions +// and appends them to filters, which can be shuffled and recombined +// as needed. +func splitAndExpression(filters []sqlparser.Expr, node sqlparser.Expr) []sqlparser.Expr { + if node == nil { + return filters + } + if node, ok := node.(*sqlparser.AndExpr); ok { + filters = splitAndExpression(filters, node.Left) + return splitAndExpression(filters, node.Right) + } + return append(filters, node) +} + +// checkComparison checks the WHERE or JOIN-ON clause contains non-sqlval comparison(t1.id=t2.id). +func checkComparison(expr sqlparser.Expr) error { + filters := splitAndExpression(nil, expr) + for _, filter := range filters { + comparison, ok := filter.(*sqlparser.ComparisonExpr) + if !ok { + continue + } + if _, ok := comparison.Right.(*sqlparser.SQLVal); !ok { + buf := sqlparser.NewTrackedBuffer(nil) + comparison.Format(buf) + return errors.Errorf("unsupported: [%s].must.be.value.compare", buf.String()) + } + } + return nil +} + +type selectTuple struct { + field string + column string + fn string + distinct bool +} + +// parserSelectExpr parses the AliasedExpr to {as, column, func} tuple. +// field: the filed name of mysql returns +// column: column name +// func: function name +// For example: select count(*), count(*) as cstar, max(a), max(b) as mb, a as a1, x.b from t,x group by a1,b +// {field:count(*) column:* fn:count} +// {field:cstar column:* fn:count} +// {field:max(a) column:a fn:max} +// {field:mb column:b fn:max} +// {field:a1 column:a fn:} +// {field:b column:x.b fn:} +func parserSelectExpr(expr *sqlparser.AliasedExpr) (*selectTuple, error) { + field := "" + colName := "" + colName1 := "" + funcName := "" + distinct := false + field = expr.As.String() + switch expr.Expr.(type) { + case *sqlparser.ColName: + col := expr.Expr.(*sqlparser.ColName) + colName = col.Name.String() + colName1 = colName + if !col.Qualifier.IsEmpty() { + colName = col.Qualifier.Name.String() + "." + colName + } + case *sqlparser.FuncExpr: + ex := expr.Expr.(*sqlparser.FuncExpr) + distinct = ex.Distinct + funcName = ex.Name.String() + switch ex.Exprs[0].(type) { + case *sqlparser.AliasedExpr: + exx := ex.Exprs[0].(*sqlparser.AliasedExpr) + tuple, err := parserSelectExpr(exx) + if err != nil { + return nil, err + } + colName = tuple.column + case *sqlparser.StarExpr: + colName = "*" + } + } + if field == "" { + if funcName != "" { + field = fmt.Sprintf("%s(%s)", funcName, colName) + } else { + field = colName1 + } + } + return &selectTuple{field, colName, funcName, distinct}, nil +} + +func parserSelectExprs(exprs sqlparser.SelectExprs) ([]selectTuple, error) { + var tuples []selectTuple + for _, expr := range exprs { + switch expr.(type) { + case *sqlparser.AliasedExpr: + exp := expr.(*sqlparser.AliasedExpr) + tuple, err := parserSelectExpr(exp) + if err != nil { + return nil, err + } + tuples = append(tuples, *tuple) + case *sqlparser.StarExpr: + tuple := selectTuple{field: "*", column: "*"} + tuples = append(tuples, tuple) + } + } + return tuples, nil +} + +func checkInTuple(name string, tuples []selectTuple) bool { + for _, tuple := range tuples { + if (tuple.field == "*") || (tuple.field == name) { + return true + } + } + return false +} + +// decomposeAvg decomposes avg(a) to sum(a) and count(a). +func decomposeAvg(tuple *selectTuple) []*sqlparser.AliasedExpr { + var ret []*sqlparser.AliasedExpr + sum := &sqlparser.AliasedExpr{Expr: &sqlparser.FuncExpr{ + Name: sqlparser.NewColIdent("sum"), + Exprs: []sqlparser.SelectExpr{&sqlparser.AliasedExpr{Expr: sqlparser.NewValArg([]byte(tuple.column))}}, + }} + count := &sqlparser.AliasedExpr{Expr: &sqlparser.FuncExpr{ + Name: sqlparser.NewColIdent("count"), + Exprs: []sqlparser.SelectExpr{&sqlparser.AliasedExpr{Expr: sqlparser.NewValArg([]byte(tuple.column))}}, + }} + ret = append(ret, sum, count) + return ret +} diff --git a/src/planner/insert_plan.go b/src/planner/insert_plan.go new file mode 100644 index 00000000..b83e0fe7 --- /dev/null +++ b/src/planner/insert_plan.go @@ -0,0 +1,199 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "encoding/json" + "router" + "sort" + "xcontext" + + "github.com/pkg/errors" + + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/hack" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Plan = &InsertPlan{} +) + +// InsertPlan represents insertion plan +type InsertPlan struct { + log *xlog.Log + + // router + router *router.Router + + // insert ast + node *sqlparser.Insert + + // database + database string + + // raw query + RawQuery string + + // type + Typ PlanType + + // mode + ReqMode xcontext.RequestMode + + // query and backend tuple + Querys []xcontext.QueryTuple +} + +// NewInsertPlan used to create InsertPlan +func NewInsertPlan(log *xlog.Log, database string, query string, node *sqlparser.Insert, router *router.Router) *InsertPlan { + return &InsertPlan{ + log: log, + node: node, + router: router, + database: database, + RawQuery: query, + Typ: PlanTypeInsert, + Querys: make([]xcontext.QueryTuple, 0, 16), + } +} + +// Build used to build distributed querys. +func (p *InsertPlan) Build() error { + node := p.node + + database := p.database + // Qualifier is database in the insert query, such as "db.t1". + if !node.Table.Qualifier.IsEmpty() { + database = node.Table.Qualifier.String() + } + table := node.Table.Name.String() + + // Get the shard key. + shardKey, err := p.router.ShardKey(database, table) + if err != nil { + return err + } + + // Check the OnDup. + if len(node.OnDup) > 0 { + // analyze shardkey changing. + if isShardKeyChanging(sqlparser.UpdateExprs(node.OnDup), shardKey) { + return errors.New("unsupported: cannot.update.shard.key") + } + } + + // Find the shard key index. + idx := -1 + for i, column := range node.Columns { + if column.String() == shardKey { + idx = i + break + } + } + if idx == -1 { + return errors.Errorf("unsupported: shardkey.column[%v].missing", shardKey) + } + + // Rebuild distributed querys. + type valTuple struct { + backend string + table string + rangi string + vals sqlparser.Values + } + vals := make(map[string]*valTuple) + rows, ok := node.Rows.(sqlparser.Values) + if !ok { + return errors.Errorf("unsupported: rows.can.not.be.subquery[%T]", node.Rows) + } + + for _, row := range rows { + if idx >= len(row) { + return errors.Errorf("unsupported: shardkey[%v].out.of.index:[%v]", shardKey, idx) + } + shardVal, ok := row[idx].(*sqlparser.SQLVal) + if !ok { + return errors.Errorf("unsupported: shardkey[%v].type.canot.be[%T]", shardKey, row[idx]) + } + + segments, err := p.router.Lookup(database, table, shardVal, shardVal) + if err != nil { + return err + } + rewrittenTable := segments[0].Table + backend := segments[0].Backend + rangi := segments[0].Range.String() + val, ok := vals[rewrittenTable] + if !ok { + val = &valTuple{ + backend: backend, + table: rewrittenTable, + rangi: rangi, + vals: make(sqlparser.Values, 0, 16), + } + vals[rewrittenTable] = val + } + val.vals = append(val.vals, row) + } + + // Rebuild querys with router info. + for rewritten, v := range vals { + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf("%s %v%sinto %s.%s%v %v%v", node.Action, node.Comments, node.Ignore, database, rewritten, node.Columns, v.vals, node.OnDup) + tuple := xcontext.QueryTuple{ + Query: buf.String(), + Backend: v.backend, + Range: v.rangi, + } + p.Querys = append(p.Querys, tuple) + } + return nil +} + +// Type returns the type of the plan. +func (p *InsertPlan) Type() PlanType { + return p.Typ +} + +// JSON returns the plan info. +func (p *InsertPlan) JSON() string { + type explain struct { + RawQuery string `json:",omitempty"` + Partitions []xcontext.QueryTuple `json:",omitempty"` + } + + var parts []xcontext.QueryTuple + // Sort. + sort.Sort(xcontext.QueryTuples(p.Querys)) + parts = append(parts, p.Querys...) + exp := &explain{ + RawQuery: p.RawQuery, + Partitions: parts, + } + bout, err := json.MarshalIndent(exp, "", "\t") + if err != nil { + return err.Error() + } + return hack.String(bout) +} + +// Children returns the children of the plan. +func (p *InsertPlan) Children() *PlanTree { + return nil +} + +// Size returns the memory size. +func (p *InsertPlan) Size() int { + size := len(p.RawQuery) + for _, q := range p.Querys { + size += len(q.Query) + } + return size +} diff --git a/src/planner/insert_plan_test.go b/src/planner/insert_plan_test.go new file mode 100644 index 00000000..29eadf26 --- /dev/null +++ b/src/planner/insert_plan_test.go @@ -0,0 +1,293 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "fmt" + "router" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestInsertPlan(t *testing.T) { + results := []string{ + `{ + "RawQuery": "insert into A(id, b, c) values(1,2,3) on duplicate key update c=11", + "Partitions": [ + { + "Query": "insert into sbtest.A6(id, b, c) values (1, 2, 3) on duplicate key update c = 11", + "Backend": "backend6", + "Range": "[512-4096)" + } + ] +}`, + `{ + "RawQuery": "insert into A(id, b, c) values(1,2,3),(23,4,5), (65536,3,4)", + "Partitions": [ + { + "Query": "insert into sbtest.A5(id, b, c) values (65536, 3, 4)", + "Backend": "backend5", + "Range": "[256-512)" + }, + { + "Query": "insert into sbtest.A6(id, b, c) values (1, 2, 3), (23, 4, 5)", + "Backend": "backend6", + "Range": "[512-4096)" + } + ] +}`, + `{ + "RawQuery": "insert into sbtest.A(id, b, c) values(1,2,3),(23,4,5), (65536,3,4)", + "Partitions": [ + { + "Query": "insert into sbtest.A5(id, b, c) values (65536, 3, 4)", + "Backend": "backend5", + "Range": "[256-512)" + }, + { + "Query": "insert into sbtest.A6(id, b, c) values (1, 2, 3), (23, 4, 5)", + "Backend": "backend6", + "Range": "[512-4096)" + } + ] +}`, + } + querys := []string{ + "insert into A(id, b, c) values(1,2,3) on duplicate key update c=11", + "insert into A(id, b, c) values(1,2,3),(23,4,5), (65536,3,4)", + "insert into sbtest.A(id, b, c) values(1,2,3),(23,4,5), (65536,3,4)", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableMConfig()) + assert.Nil(t, err) + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + plan := NewInsertPlan(log, database, query, node.(*sqlparser.Insert), route) + + // plan build + { + err := plan.Build() + assert.Nil(t, err) + got := plan.JSON() + log.Info(got) + want := results[i] + assert.Equal(t, want, got) + plan.Type() + plan.Size() + } + } +} + +func TestInsertUnsupportedPlan(t *testing.T) { + querys := []string{ + "insert into sbtest.A(b, c, id) values(1,2)", + "insert into sbtest.A(b, c, d) values(1,2, 3)", + "insert into sbtest.A select * from sbtest.B", + "insert into sbtest.A(b, c, id) values(1,2,3) on duplicate key update id=1", + "insert into sbtest.A(b, c, id) values(1, floor(3), floor(3))", + "insert into sbtest.A(b,c,id) select id,b,c from sbtest.A", + } + + results := []string{ + "unsupported: shardkey[id].out.of.index:[2]", + "unsupported: shardkey.column[id].missing", + "unsupported: shardkey.column[id].missing", + "unsupported: cannot.update.shard.key", + "unsupported: shardkey[id].type.canot.be[*sqlparser.FuncExpr]", + "unsupported: rows.can.not.be.subquery[*sqlparser.Select]", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableMConfig()) + assert.Nil(t, err) + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + plan := NewInsertPlan(log, database, query, node.(*sqlparser.Insert), route) + + // plan build + { + err := plan.Build() + want := results[i] + got := err.Error() + assert.Equal(t, want, got) + } + } +} + +func TestInsertPlanBench(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + query := "insert into sbtest.A(id, b, c) values(1,2,3),(23,4,5), (117,3,4),(1,2,3),(23,4,5), (117,3,4)" + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableMConfig()) + assert.Nil(t, err) + + { + N := 100000 + now := time.Now() + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + for i := 0; i < N; i++ { + plan := NewInsertPlan(log, database, query, node.(*sqlparser.Insert), route) + err := plan.Build() + assert.Nil(t, err) + } + + took := time.Since(now) + fmt.Printf(" LOOP\t%v COST %v, avg:%v/s\n", N, took, (int64(N)/(took.Nanoseconds()/1e6))*1000) + } +} + +func TestReplacePlan(t *testing.T) { + results := []string{`{ + "RawQuery": "replace into A(id, b, c) values(1,2,3),(23,4,5), (65536,3,4)", + "Partitions": [ + { + "Query": "replace into sbtest.A5(id, b, c) values (65536, 3, 4)", + "Backend": "backend5", + "Range": "[256-512)" + }, + { + "Query": "replace into sbtest.A6(id, b, c) values (1, 2, 3), (23, 4, 5)", + "Backend": "backend6", + "Range": "[512-4096)" + } + ] +}`, + `{ + "RawQuery": "replace into sbtest.A(id, b, c) values(1,2,3),(23,4,5), (65536,3,4)", + "Partitions": [ + { + "Query": "replace into sbtest.A5(id, b, c) values (65536, 3, 4)", + "Backend": "backend5", + "Range": "[256-512)" + }, + { + "Query": "replace into sbtest.A6(id, b, c) values (1, 2, 3), (23, 4, 5)", + "Backend": "backend6", + "Range": "[512-4096)" + } + ] +}`, + } + querys := []string{ + "replace into A(id, b, c) values(1,2,3),(23,4,5), (65536,3,4)", + "replace into sbtest.A(id, b, c) values(1,2,3),(23,4,5), (65536,3,4)", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableMConfig()) + assert.Nil(t, err) + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + plan := NewInsertPlan(log, database, query, node.(*sqlparser.Insert), route) + + // plan build + { + err := plan.Build() + assert.Nil(t, err) + got := plan.JSON() + log.Info(got) + want := results[i] + assert.Equal(t, want, got) + plan.Type() + } + } +} + +func TestReplaceUnsupportedPlan(t *testing.T) { + querys := []string{ + "replace into sbtest.A(b, c, id) values(1,2)", + "replace into sbtest.A(b, c, d) values(1,2, 3)", + "replace into sbtest.A select * from sbtest.B", + } + + results := []string{ + "unsupported: shardkey[id].out.of.index:[2]", + "unsupported: shardkey.column[id].missing", + "unsupported: shardkey.column[id].missing", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableMConfig()) + assert.Nil(t, err) + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + plan := NewInsertPlan(log, database, query, node.(*sqlparser.Insert), route) + + // plan build + { + err := plan.Build() + want := results[i] + got := err.Error() + assert.Equal(t, want, got) + } + } +} + +func TestReplacePlanBench(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + query := "replace into sbtest.A(id, b, c) values(1,2,3),(23,4,5), (117,3,4),(1,2,3),(23,4,5), (117,3,4)" + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableMConfig()) + assert.Nil(t, err) + + { + N := 100000 + now := time.Now() + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + for i := 0; i < N; i++ { + plan := NewInsertPlan(log, database, query, node.(*sqlparser.Insert), route) + err := plan.Build() + assert.Nil(t, err) + } + + took := time.Since(now) + fmt.Printf(" LOOP\t%v COST %v, avg:%v/s\n", N, took, (int64(N)/(took.Nanoseconds()/1e6))*1000) + } +} diff --git a/src/planner/join_plan.go b/src/planner/join_plan.go new file mode 100644 index 00000000..f7330766 --- /dev/null +++ b/src/planner/join_plan.go @@ -0,0 +1,82 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "errors" + + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Plan = &JoinPlan{} +) + +// JoinPlan represents join plan. +type JoinPlan struct { + log *xlog.Log + + node *sqlparser.Select + + // type + typ PlanType +} + +// NewJoinPlan used to create JoinPlan. +func NewJoinPlan(log *xlog.Log, node *sqlparser.Select) *JoinPlan { + return &JoinPlan{ + log: log, + node: node, + typ: PlanTypeJoin, + } +} + +// analyze used to check the join is at the support level. +// unsupported join. +func (p *JoinPlan) analyze() error { + node := p.node + for _, tab := range node.From { + switch tab.(type) { + case *sqlparser.AliasedTableExpr: + // select * from a,b where a.id=b.id + if len(node.From) > 1 && node.Where != nil { + return errors.New("unsupported: JOIN.expression") + } + case *sqlparser.JoinTableExpr: + return errors.New("unsupported: JOIN.expression") + } + } + return nil +} + +// Build used to build distributed querys. +func (p *JoinPlan) Build() error { + return p.analyze() +} + +// Type returns the type of the plan. +func (p *JoinPlan) Type() PlanType { + return p.typ +} + +// JSON returns the plan info. +func (p *JoinPlan) JSON() string { + return "" +} + +// Children returns the children of the plan. +func (p *JoinPlan) Children() *PlanTree { + return nil +} + +// Size returns the memory size. +func (p *JoinPlan) Size() int { + return 0 +} diff --git a/src/planner/join_plan_test.go b/src/planner/join_plan_test.go new file mode 100644 index 00000000..dc198e79 --- /dev/null +++ b/src/planner/join_plan_test.go @@ -0,0 +1,64 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestJoinPlan(t *testing.T) { + querys := []string{ + "select * from t where t.a=t.b", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + for _, query := range querys { + tree, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := tree.(*sqlparser.Select) + assert.Nil(t, err) + plan := NewJoinPlan(log, node) + { + err := plan.Build() + assert.Nil(t, err) + assert.Nil(t, plan.Children()) + assert.Equal(t, "", plan.JSON()) + } + } +} + +func TestJoinUnsupportedPlan(t *testing.T) { + querys := []string{ + "select x.id, y.id from x,y where x.id=y.id", + "select x.id, y.id from x join y on x.id=y.id where x.id=1", + } + results := []string{ + "unsupported: JOIN.expression", + "unsupported: JOIN.expression", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + for i, query := range querys { + tree, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := tree.(*sqlparser.Select) + assert.Nil(t, err) + plan := NewJoinPlan(log, node) + { + err := plan.Build() + got := err.Error() + want := results[i] + assert.Equal(t, want, got) + } + } +} diff --git a/src/planner/limit_plan.go b/src/planner/limit_plan.go new file mode 100644 index 00000000..c1160b13 --- /dev/null +++ b/src/planner/limit_plan.go @@ -0,0 +1,140 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "encoding/json" + "fmt" + "strconv" + + "github.com/pkg/errors" + + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/hack" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Plan = &LimitPlan{} +) + +// LimitPlan represents order-by plan. +type LimitPlan struct { + log *xlog.Log + + node *sqlparser.Select + rewritten *sqlparser.Limit + Offset int + Limit int + + // type + typ PlanType +} + +// NewLimitPlan used to create LimitPlan. +func NewLimitPlan(log *xlog.Log, node *sqlparser.Select) *LimitPlan { + return &LimitPlan{ + log: log, + node: node, + typ: PlanTypeLimit, + } +} + +// analyze used to analyze the 'order by' is at the support level. +func (p *LimitPlan) analyze() error { + node := p.node.Limit + if node == nil { + return nil + } + + ok := true + sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node.(type) { + // Limit clause must be SQLVal type. + case *sqlparser.Limit: + return true, nil + case *sqlparser.SQLVal: + val := node.(*sqlparser.SQLVal) + if val.Type != sqlparser.IntVal { + ok = false + return false, nil + } + return true, nil + default: + ok = false + return false, nil + } + }, node) + + if !ok { + return errors.New("unsupported: limit.offset.or.counts.must.be.IntVal") + } + return nil +} + +// Build used to build distributed querys. +func (p *LimitPlan) Build() error { + if err := p.analyze(); err != nil { + return err + } + + node := p.node.Limit + if node == nil { + return nil + } + + if node.Offset != nil { + val := node.Offset.(*sqlparser.SQLVal) + out, err := strconv.ParseInt(hack.String(val.Val), 10, 64) + if err != nil { + return err + } + p.Offset = int(out) + } + + if node.Rowcount != nil { + val := node.Rowcount.(*sqlparser.SQLVal) + out, err := strconv.ParseInt(hack.String(val.Val), 10, 64) + if err != nil { + return err + } + p.Limit = int(out) + } + p.rewritten = &sqlparser.Limit{Rowcount: sqlparser.NewIntVal([]byte(fmt.Sprintf("%d", p.Offset+p.Limit)))} + return nil +} + +// Type returns the type of the plan. +func (p *LimitPlan) Type() PlanType { + return p.typ +} + +// JSON returns the plan info. +func (p *LimitPlan) JSON() string { + bout, err := json.MarshalIndent(p, "", "\t") + if err != nil { + return err.Error() + } + return string(bout) +} + +// Children returns the children of the plan. +func (p *LimitPlan) Children() *PlanTree { + return nil +} + +// ReWritten used to re-write the limit clause. +func (p *LimitPlan) ReWritten() *sqlparser.Limit { + return p.rewritten +} + +// Size returns the memory size. +func (p *LimitPlan) Size() int { + return 0 +} diff --git a/src/planner/limit_plan_test.go b/src/planner/limit_plan_test.go new file mode 100644 index 00000000..c76e0de1 --- /dev/null +++ b/src/planner/limit_plan_test.go @@ -0,0 +1,122 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestLimitPlan(t *testing.T) { + querys := []string{ + "select a,b from t order by a limit 10,9", + "select a,b from t order by a limit 10", + "select a,b from t", + } + results := []string{ + `{ + "Offset": 10, + "Limit": 9 +}`, + `{ + "Offset": 0, + "Limit": 10 +}`, + `{ + "Offset": 0, + "Limit": 0 +}`, + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + for i, query := range querys { + tree, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := tree.(*sqlparser.Select) + assert.Nil(t, err) + plan := NewLimitPlan(log, node) + // plan build + { + err := plan.Build() + assert.Nil(t, err) + want := results[i] + got := plan.JSON() + assert.Equal(t, want, got) + assert.True(t, nil == plan.Children()) + assert.Equal(t, PlanTypeLimit, plan.Type()) + } + } +} + +func TestLimitPlanReWritten(t *testing.T) { + querys := []string{ + "select a,b from t order by a limit 10,9", + "select a,b from t order by a limit 10", + "select a,b from t", + } + results := []string{ + " limit 19", + " limit 10", + "", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + for i, query := range querys { + tree, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := tree.(*sqlparser.Select) + assert.Nil(t, err) + plan := NewLimitPlan(log, node) + // plan build + { + err := plan.Build() + assert.Nil(t, err) + want := results[i] + + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf("%v", plan.ReWritten()) + got := buf.String() + assert.Equal(t, want, got) + } + } +} + +func TestLimitPlanError(t *testing.T) { + querys := []string{ + "select a,b from t order by a limit 10,x", + "select a,b from t order by a limit x,1", + "select a,b from t order by a limit x", + "select a,b from t order by a limit 3.1415", + } + results := []string{ + "unsupported: limit.offset.or.counts.must.be.IntVal", + "unsupported: limit.offset.or.counts.must.be.IntVal", + "unsupported: limit.offset.or.counts.must.be.IntVal", + "unsupported: limit.offset.or.counts.must.be.IntVal", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + for i, query := range querys { + tree, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := tree.(*sqlparser.Select) + assert.Nil(t, err) + plan := NewLimitPlan(log, node) + // plan build + { + err := plan.Build() + want := results[i] + got := err.Error() + assert.Equal(t, want, got) + } + } +} diff --git a/src/planner/orderby_plan.go b/src/planner/orderby_plan.go new file mode 100644 index 00000000..6f6d7b3d --- /dev/null +++ b/src/planner/orderby_plan.go @@ -0,0 +1,118 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "encoding/json" + + "github.com/pkg/errors" + + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Plan = &OrderByPlan{} +) + +// Direction type. +type Direction string + +const ( + // ASC enum. + ASC Direction = "ASC" + + // DESC enum. + DESC Direction = "DESC" +) + +// OrderBy tuple. +type OrderBy struct { + Field string + Direction Direction +} + +// OrderByPlan represents order-by plan. +type OrderByPlan struct { + log *xlog.Log + node *sqlparser.Select + tuples []selectTuple + OrderBys []OrderBy `json:"OrderBy(s)"` + typ PlanType +} + +// NewOrderByPlan used to create OrderByPlan. +func NewOrderByPlan(log *xlog.Log, node *sqlparser.Select, tuples []selectTuple) *OrderByPlan { + return &OrderByPlan{ + log: log, + node: node, + tuples: tuples, + typ: PlanTypeOrderby, + } +} + +// check used to check the 'order by' is at the support level. +// Supports: +// 1. sqlparser.ColName: 'select a from t order by a' +// +// Unsupported(orderby field must be in select list): +// 1. 'select a from t order by b' +func (p *OrderByPlan) analyze() error { + order := p.node.OrderBy + for _, o := range order { + switch o.Expr.(type) { + case *sqlparser.ColName: + order := OrderBy{} + switch o.Direction { + case "desc": + order.Direction = DESC + case "asc": + order.Direction = ASC + } + e := o.Expr.(*sqlparser.ColName) + order.Field = e.Name.String() + if !checkInTuple(order.Field, p.tuples) { + return errors.Errorf("unsupported: orderby[%+v].should.in.select.list", order.Field) + } + p.OrderBys = append(p.OrderBys, order) + default: + return errors.Errorf("unsupported: orderby:%+v", o.Expr) + } + } + return nil +} + +// Build used to build distributed querys. +func (p *OrderByPlan) Build() error { + return p.analyze() +} + +// Type returns the type of the plan. +func (p *OrderByPlan) Type() PlanType { + return p.typ +} + +// JSON returns the plan info. +func (p *OrderByPlan) JSON() string { + bout, err := json.MarshalIndent(p, "", "\t") + if err != nil { + return err.Error() + } + return string(bout) +} + +// Children returns the children of the plan. +func (p *OrderByPlan) Children() *PlanTree { + return nil +} + +// Size returns the memory size. +func (p *OrderByPlan) Size() int { + return 0 +} diff --git a/src/planner/orderby_plan_test.go b/src/planner/orderby_plan_test.go new file mode 100644 index 00000000..e62cbc64 --- /dev/null +++ b/src/planner/orderby_plan_test.go @@ -0,0 +1,71 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestOrderByPlan(t *testing.T) { + querys := []string{ + "select a,b from t order by a", + "select * from t order by a", + "select a,*,c,d from t order by a asc", + "select a as b,c,d from t order by b desc", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + for _, query := range querys { + tree, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := tree.(*sqlparser.Select) + tuples, err := parserSelectExprs(node.SelectExprs) + assert.Nil(t, err) + plan := NewOrderByPlan(log, node, tuples) + // plan build + { + err := plan.Build() + assert.Nil(t, err) + log.Debug("%v,%v,%s", plan.Type(), plan.Children(), plan.JSON()) + } + log.Debug("\n") + } +} + +func TestOrderByPlanError(t *testing.T) { + querys := []string{ + "select a,b from t order by c", + "select a,b from t order by rand()", + } + results := []string{ + "unsupported: orderby[c].should.in.select.list", + "unsupported: orderby:&{Qualifier: Name:rand Distinct:false Exprs:[]}", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + for i, query := range querys { + tree, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := tree.(*sqlparser.Select) + tuples, err := parserSelectExprs(node.SelectExprs) + assert.Nil(t, err) + plan := NewOrderByPlan(log, node, tuples) + // plan build + { + err := plan.Build() + want := results[i] + got := err.Error() + assert.Equal(t, want, got) + } + } +} diff --git a/src/planner/plan_types.go b/src/planner/plan_types.go new file mode 100644 index 00000000..04e0415c --- /dev/null +++ b/src/planner/plan_types.go @@ -0,0 +1,44 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +// PlanType type. +type PlanType string + +const ( + // PlanTypeDDL enum. + PlanTypeDDL PlanType = "PlanTypeDDL" + + // PlanTypeInsert enum. + PlanTypeInsert PlanType = "PlanTypeInsert" + + // PlanTypeDelete enum. + PlanTypeDelete PlanType = "PlanTypeDelete" + + // PlanTypeUpdate enum. + PlanTypeUpdate PlanType = "PlanTypeUpdate" + + // PlanTypeSelect enum. + PlanTypeSelect PlanType = "PlanTypeSelect" + + // PlanTypeOrderby enum. + PlanTypeOrderby PlanType = "PlanTypeOrderby" + + // PlanTypeLimit enum. + PlanTypeLimit PlanType = "PlanTypeLimit" + + // PlanTypeAggregate enum. + PlanTypeAggregate PlanType = "PlanTypeAggregate" + + // PlanTypeJoin enum. + PlanTypeJoin PlanType = "PlanTypeJoin" + + // PlanTypeDistinct enum. + PlanTypeDistinct PlanType = "PlanTypeDistinct" +) diff --git a/src/planner/planner.go b/src/planner/planner.go new file mode 100644 index 00000000..f5eb52f8 --- /dev/null +++ b/src/planner/planner.go @@ -0,0 +1,60 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import () + +// Plan interface. +type Plan interface { + Build() error + Type() PlanType + JSON() string + Size() int + Children() *PlanTree +} + +// PlanTree is a container for all plans +type PlanTree struct { + size int + children []Plan +} + +// NewPlanTree creates the new plan tree. +func NewPlanTree() *PlanTree { + return &PlanTree{ + children: make([]Plan, 0, 8), + } +} + +// Add used to add new plan to the tree. +func (pt *PlanTree) Add(plan Plan) error { + pt.children = append(pt.children, plan) + pt.size += plan.Size() + return nil +} + +// Build used to build plans(we won't build sub-plans in this plan). +func (pt *PlanTree) Build() error { + for _, plan := range pt.children { + if err := plan.Build(); err != nil { + return err + } + } + return nil +} + +// Plans returns all the plans of the tree. +func (pt *PlanTree) Plans() []Plan { + return pt.children +} + +// Size used to measure the memory useage for this plantree. +func (pt *PlanTree) Size() int { + return pt.size +} diff --git a/src/planner/planner_test.go b/src/planner/planner_test.go new file mode 100644 index 00000000..2a25a6d8 --- /dev/null +++ b/src/planner/planner_test.go @@ -0,0 +1,45 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "router" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestPlanner(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + database := "xx" + query := "create table A(a int)" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableAConfig()) + assert.Nil(t, err) + + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + DDL := NewDDLPlan(log, database, query, node.(*sqlparser.DDL), route) + + { + planTree := NewPlanTree() + for i := 0; i < 64; i++ { + err := planTree.Add(DDL) + assert.Nil(t, err) + } + err := planTree.Build() + assert.Nil(t, err) + } +} diff --git a/src/planner/select_plan.go b/src/planner/select_plan.go new file mode 100644 index 00000000..1b10c386 --- /dev/null +++ b/src/planner/select_plan.go @@ -0,0 +1,288 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "encoding/json" + "fmt" + "router" + "xcontext" + + "github.com/pkg/errors" + + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/hack" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Plan = &SelectPlan{} +) + +// SelectPlan represents select plan +type SelectPlan struct { + log *xlog.Log + + // router + router *router.Router + + // insert ast + node *sqlparser.Select + + // database + database string + + // raw query + RawQuery string + + // type + typ PlanType + + // mode + ReqMode xcontext.RequestMode + + // query and backend tuple + Querys []xcontext.QueryTuple + + // children plans in select(such as: orderby, limit or join). + children *PlanTree +} + +// NewSelectPlan used to create SelectPlan +func NewSelectPlan(log *xlog.Log, database string, query string, node *sqlparser.Select, router *router.Router) *SelectPlan { + return &SelectPlan{ + log: log, + node: node, + router: router, + database: database, + RawQuery: query, + typ: PlanTypeSelect, + Querys: make([]xcontext.QueryTuple, 0, 16), + children: NewPlanTree(), + } +} + +// check used to check the 'select' is at the support level. +// Unsupports: +// 1. subquery +func (p *SelectPlan) analyze() (string, string, error) { + var shardDatabase string + var shardTable string + var tableExpr *sqlparser.AliasedTableExpr + node := p.node + + // Check subquery. + if hasSubquery(node) || len(node.From) > 1 { + return shardDatabase, shardTable, errors.New("unsupported: subqueries.in.select") + } + + // Find the first table in the node.From. + // Currently only support AliasedTableExpr, JoinTableExpr select. + switch expr := (node.From[0]).(type) { + case *sqlparser.AliasedTableExpr: + tableExpr = expr + case *sqlparser.JoinTableExpr: + if v, ok := (expr.LeftExpr).(*sqlparser.AliasedTableExpr); ok { + tableExpr = v + } + } + + if tableExpr != nil { + switch expr := tableExpr.Expr.(type) { + case sqlparser.TableName: + if !expr.Qualifier.IsEmpty() { + shardDatabase = expr.Qualifier.String() + } + shardTable = expr.Name.String() + } + } + return shardDatabase, shardTable, nil +} + +// Build used to build distributed querys. +// For now, we don't support subquery in select. +func (p *SelectPlan) Build() error { + var err error + var shardTable string + var shardDatabase string + + log := p.log + node := p.node + if shardDatabase, shardTable, err = p.analyze(); err != nil { + return err + } + if shardDatabase == "" { + shardDatabase = p.database + } + + // Get the routing segments info. + shardkey, err := p.router.ShardKey(shardDatabase, shardTable) + if err != nil { + return err + } + segments, err := getDMLRouting(shardDatabase, shardTable, shardkey, node.Where, p.router) + if err != nil { + return err + } + + // Add sub-plans. + children := p.children + if len(segments) > 1 { + tuples, err := parserSelectExprs(node.SelectExprs) + if err != nil { + return err + } + // Distinct SubPlan. + distinctPlan := NewDistinctPlan(log, node) + if err := distinctPlan.Build(); err != nil { + return err + } + children.Add(distinctPlan) + + // Join SubPlan. + joinPlan := NewJoinPlan(log, node) + if err := joinPlan.Build(); err != nil { + return err + } + children.Add(joinPlan) + + // Aggregate SubPlan. + aggrPlan := NewAggregatePlan(log, node, tuples) + if err := aggrPlan.Build(); err != nil { + return err + } + children.Add(aggrPlan) + node.SelectExprs = aggrPlan.ReWritten() + + // Orderby SubPlan. + orderPlan := NewOrderByPlan(log, node, tuples) + if err := orderPlan.Build(); err != nil { + return err + } + children.Add(orderPlan) + + // Limit SubPlan. + if node.Limit != nil { + limitPlan := NewLimitPlan(log, node) + if err := limitPlan.Build(); err != nil { + return err + } + children.Add(limitPlan) + // Rewrite the limit clause. + node.Limit = limitPlan.ReWritten() + } + } + + // Rewritten the query. + for _, segment := range segments { + tn := sqlparser.TableName{ + Name: sqlparser.NewTableIdent(segment.Table), + Qualifier: sqlparser.NewTableIdent(shardDatabase), + } + as := fmt.Sprintf(" as %s", shardTable) + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf("select %v%s%v from %v%s%v%v%v%v%v", + node.Comments, node.Hints, node.SelectExprs, + tn, as, + node.Where, + node.GroupBy, node.Having, node.OrderBy, + node.Limit) + rewritten := buf.String() + + tuple := xcontext.QueryTuple{ + Query: rewritten, + Backend: segment.Backend, + Range: segment.Range.String(), + } + p.Querys = append(p.Querys, tuple) + } + return nil +} + +// Type returns the type of the plan. +func (p *SelectPlan) Type() PlanType { + return p.typ +} + +// JSON returns the plan info. +func (p *SelectPlan) JSON() string { + type limit struct { + Offset int + Limit int + } + + type explain struct { + RawQuery string `json:",omitempty"` + Project string `json:",omitempty"` + Partitions []xcontext.QueryTuple `json:",omitempty"` + Aggregate []string `json:",omitempty"` + GatherMerge []string `json:",omitempty"` + HashGroupBy []string `json:",omitempty"` + Limit *limit `json:",omitempty"` + } + + // Project. + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf("%v", p.node.SelectExprs) + project := buf.String() + + // Aggregate. + var aggregate []string + var hashGroup []string + var gatherMerge []string + var lim *limit + for _, sub := range p.children.Plans() { + switch sub.Type() { + case PlanTypeAggregate: + plan := sub.(*AggregatePlan) + for _, aggr := range plan.normalAggrs { + aggregate = append(aggregate, aggr.Field) + } + for _, aggr := range plan.groupAggrs { + hashGroup = append(hashGroup, aggr.Field) + } + case PlanTypeOrderby: + plan := sub.(*OrderByPlan) + for _, order := range plan.OrderBys { + gatherMerge = append(gatherMerge, order.Field) + } + case PlanTypeLimit: + plan := sub.(*LimitPlan) + lim = &limit{Offset: plan.Offset, Limit: plan.Limit} + } + } + + exp := &explain{Project: project, + RawQuery: p.RawQuery, + Partitions: p.Querys, + Aggregate: aggregate, + GatherMerge: gatherMerge, + HashGroupBy: hashGroup, + Limit: lim, + } + bout, err := json.MarshalIndent(exp, "", "\t") + if err != nil { + return err.Error() + } + return hack.String(bout) +} + +// Children returns the children of the plan. +func (p *SelectPlan) Children() *PlanTree { + return p.children +} + +// Size returns the memory size. +func (p *SelectPlan) Size() int { + size := len(p.RawQuery) + for _, q := range p.Querys { + size += len(q.Query) + } + return size +} diff --git a/src/planner/select_plan_test.go b/src/planner/select_plan_test.go new file mode 100644 index 00000000..4abf361a --- /dev/null +++ b/src/planner/select_plan_test.go @@ -0,0 +1,404 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "router" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestSelectPlan(t *testing.T) { + results := []string{ + `{ + "RawQuery": "select 1, sum(a),avg(a),a,b from sbtest.A where id\u003e1 group by a,b order by a desc limit 10 offset 100", + "Project": "1, sum(a), avg(a), sum(a), count(a), a, b", + "Partitions": [ + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from sbtest.A1 as A where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend1", + "Range": "[0-32)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from sbtest.A2 as A where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend2", + "Range": "[32-64)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from sbtest.A3 as A where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend3", + "Range": "[64-96)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from sbtest.A4 as A where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend4", + "Range": "[96-256)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from sbtest.A5 as A where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend5", + "Range": "[256-512)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from sbtest.A6 as A where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend6", + "Range": "[512-4096)" + } + ], + "Aggregate": [ + "sum(a)", + "avg(a)", + "sum(a)", + "count(a)" + ], + "GatherMerge": [ + "a" + ], + "HashGroupBy": [ + "a", + "b" + ], + "Limit": { + "Offset": 100, + "Limit": 10 + } +}`, + `{ + "RawQuery": "select id, sum(a) as A from A group by id having A\u003e1000", + "Project": "id, sum(a) as A", + "Partitions": [ + { + "Query": "select id, sum(a) as A from sbtest.A1 as A group by id having A \u003e 1000", + "Backend": "backend1", + "Range": "[0-32)" + }, + { + "Query": "select id, sum(a) as A from sbtest.A2 as A group by id having A \u003e 1000", + "Backend": "backend2", + "Range": "[32-64)" + }, + { + "Query": "select id, sum(a) as A from sbtest.A3 as A group by id having A \u003e 1000", + "Backend": "backend3", + "Range": "[64-96)" + }, + { + "Query": "select id, sum(a) as A from sbtest.A4 as A group by id having A \u003e 1000", + "Backend": "backend4", + "Range": "[96-256)" + }, + { + "Query": "select id, sum(a) as A from sbtest.A5 as A group by id having A \u003e 1000", + "Backend": "backend5", + "Range": "[256-512)" + }, + { + "Query": "select id, sum(a) as A from sbtest.A6 as A group by id having A \u003e 1000", + "Backend": "backend6", + "Range": "[512-4096)" + } + ], + "Aggregate": [ + "A" + ], + "HashGroupBy": [ + "id" + ] +}`, + } + querys := []string{ + "select 1, sum(a),avg(a),a,b from sbtest.A where id>1 group by a,b order by a desc limit 10 offset 100", + "select id, sum(a) as A from A group by id having A>1000", + } + + // Database not null. + { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableMConfig(), router.MockTableBConfig()) + assert.Nil(t, err) + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + plan := NewSelectPlan(log, database, query, node.(*sqlparser.Select), route) + + // plan build + { + log.Info("--select.query:%+v", query) + err := plan.Build() + assert.Nil(t, err) + got := plan.JSON() + want := results[i] + assert.Equal(t, want, got) + assert.Equal(t, PlanTypeSelect, plan.Type()) + assert.NotNil(t, plan.Children()) + } + } + } +} + +func TestSelectPlanDatabaseIsNull(t *testing.T) { + results := []string{ + `{ + "RawQuery": "select 1, sum(a),avg(a),a,b from sbtest.A where id\u003e1 group by a,b order by a desc limit 10 offset 100", + "Project": "1, sum(a), avg(a), sum(a), count(a), a, b", + "Partitions": [ + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from sbtest.A1 as A where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend1", + "Range": "[0-32)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from sbtest.A2 as A where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend2", + "Range": "[32-64)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from sbtest.A3 as A where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend3", + "Range": "[64-96)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from sbtest.A4 as A where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend4", + "Range": "[96-256)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from sbtest.A5 as A where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend5", + "Range": "[256-512)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from sbtest.A6 as A where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend6", + "Range": "[512-4096)" + } + ], + "Aggregate": [ + "sum(a)", + "avg(a)", + "sum(a)", + "count(a)" + ], + "GatherMerge": [ + "a" + ], + "HashGroupBy": [ + "a", + "b" + ], + "Limit": { + "Offset": 100, + "Limit": 10 + } +}`, + `{ + "RawQuery": "select id, sum(a) as A from sbtest.A group by id having A\u003e1000", + "Project": "id, sum(a) as A", + "Partitions": [ + { + "Query": "select id, sum(a) as A from sbtest.A1 as A group by id having A \u003e 1000", + "Backend": "backend1", + "Range": "[0-32)" + }, + { + "Query": "select id, sum(a) as A from sbtest.A2 as A group by id having A \u003e 1000", + "Backend": "backend2", + "Range": "[32-64)" + }, + { + "Query": "select id, sum(a) as A from sbtest.A3 as A group by id having A \u003e 1000", + "Backend": "backend3", + "Range": "[64-96)" + }, + { + "Query": "select id, sum(a) as A from sbtest.A4 as A group by id having A \u003e 1000", + "Backend": "backend4", + "Range": "[96-256)" + }, + { + "Query": "select id, sum(a) as A from sbtest.A5 as A group by id having A \u003e 1000", + "Backend": "backend5", + "Range": "[256-512)" + }, + { + "Query": "select id, sum(a) as A from sbtest.A6 as A group by id having A \u003e 1000", + "Backend": "backend6", + "Range": "[512-4096)" + } + ], + "Aggregate": [ + "A" + ], + "HashGroupBy": [ + "id" + ] +}`, + } + querys := []string{ + "select 1, sum(a),avg(a),a,b from sbtest.A where id>1 group by a,b order by a desc limit 10 offset 100", + "select id, sum(a) as A from sbtest.A group by id having A>1000", + } + + // Database is null. + { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest("sbtest", router.MockTableMConfig(), router.MockTableBConfig()) + assert.Nil(t, err) + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + plan := NewSelectPlan(log, "", query, node.(*sqlparser.Select), route) + + // plan build + { + log.Info("--select.query:%+v", query) + err := plan.Build() + assert.Nil(t, err) + got := plan.JSON() + want := results[i] + assert.Equal(t, want, got) + assert.Equal(t, PlanTypeSelect, plan.Type()) + assert.NotNil(t, plan.Children()) + } + } + } +} + +func TestSelectUnsupportedPlan(t *testing.T) { + querys := []string{ + "select * from A as A where id in (select id from B)", + "select distinct(b) from A", + "select A.id from A join B on B.id=A.id", + "select id, rand(id) from A", + "select id from A order by b", + "select id from A limit x", + "select 1", + "select age,count(*) from A group by age having count(*) >=2", + "select id from A,b limit x", + } + results := []string{ + "unsupported: subqueries.in.select", + "unsupported: distinct", + "unsupported: JOIN.expression", + "unsupported: function:rand", + "unsupported: orderby[b].should.in.select.list", + "unsupported: limit.offset.or.counts.must.be.IntVal", + "Table 'dual' doesn't exist (errno 1146) (sqlstate 42S02)", + "unsupported: expr[count(*)].in.having.clause", + "unsupported: subqueries.in.select", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableMConfig(), router.MockTableBConfig()) + assert.Nil(t, err) + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + plan := NewSelectPlan(log, database, query, node.(*sqlparser.Select), route) + + // plan build + { + err := plan.Build() + want := results[i] + got := err.Error() + assert.Equal(t, want, got) + } + } +} + +func TestSelectPlanAs(t *testing.T) { + results := []string{ + `{ + "RawQuery": "select A.id from A as a1 where A.id\u003e1000", + "Project": "A.id", + "Partitions": [ + { + "Query": "select A.id from sbtest.A1 as A where A.id \u003e 1000", + "Backend": "backend1", + "Range": "[0-32)" + }, + { + "Query": "select A.id from sbtest.A2 as A where A.id \u003e 1000", + "Backend": "backend2", + "Range": "[32-64)" + }, + { + "Query": "select A.id from sbtest.A3 as A where A.id \u003e 1000", + "Backend": "backend3", + "Range": "[64-96)" + }, + { + "Query": "select A.id from sbtest.A4 as A where A.id \u003e 1000", + "Backend": "backend4", + "Range": "[96-256)" + }, + { + "Query": "select A.id from sbtest.A5 as A where A.id \u003e 1000", + "Backend": "backend5", + "Range": "[256-512)" + }, + { + "Query": "select A.id from sbtest.A6 as A where A.id \u003e 1000", + "Backend": "backend6", + "Range": "[512-4096)" + } + ] +}`, + } + querys := []string{ + "select A.id from A as a1 where A.id>1000", + } + + // Database not null. + { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableMConfig(), router.MockTableBConfig()) + assert.Nil(t, err) + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + plan := NewSelectPlan(log, database, query, node.(*sqlparser.Select), route) + + // plan build + { + log.Info("--select.query:%+v", query) + err := plan.Build() + assert.Nil(t, err) + got := plan.JSON() + log.Debug("---%+v", got) + want := results[i] + assert.Equal(t, want, got) + assert.Equal(t, PlanTypeSelect, plan.Type()) + assert.NotNil(t, plan.Children()) + } + } + } +} diff --git a/src/planner/sqlparser_test.go b/src/planner/sqlparser_test.go new file mode 100644 index 00000000..a7b426ae --- /dev/null +++ b/src/planner/sqlparser_test.go @@ -0,0 +1,704 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "fmt" + "reflect" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestSqlJoinParser(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + querys := []string{ + "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1", + } + + for _, query := range querys { + log.Debug("query:%s", query) + sel, err := sqlparser.Parse(query) + assert.Nil(t, err) + tree := sel.(*sqlparser.Select) + processTableExprs(log, tree.From) + filters := splitAndExpression(nil, tree.Where.Expr) + log.Debug("where:%+v", filters[0]) + log.Debug("\n") + } + +} + +func processTableExprs(log *xlog.Log, tableExprs sqlparser.TableExprs) { + log.Debug("tables.count:%d", len(tableExprs)) + for _, tableExpr := range tableExprs { + processTableExpr(log, tableExpr) + } +} + +func processTableExpr(log *xlog.Log, tableExpr sqlparser.TableExpr) { + switch tableExpr := tableExpr.(type) { + case *sqlparser.AliasedTableExpr: + log.Debug("AliasedTableExpr %+v, %+v", tableExpr.Expr, tableExpr.As) + case *sqlparser.ParenTableExpr: + log.Debug("ParenTableExpr %+v", tableExpr) + case *sqlparser.JoinTableExpr: + processJoin(log, tableExpr) + } +} + +func debugNode(log *xlog.Log, tableExpr sqlparser.TableExpr) { + sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + default: + //log.Debug("type:%v", node) + _ = node + return true, nil + } + }, tableExpr) +} + +func processOnExpr(log *xlog.Log, on sqlparser.Expr) { + switch on.(type) { + case *sqlparser.ComparisonExpr: + on := on.(*sqlparser.ComparisonExpr) + log.Debug("on.compareison... %+v,%+v,%+v", on.Left, on.Right) + left := on.Left + switch left.(type) { + case *sqlparser.ColName: + cname := left.(*sqlparser.ColName) + log.Debug("left:....%+v", cname) + } + } +} + +func processJoin(log *xlog.Log, ajoin *sqlparser.JoinTableExpr) { + debugNode(log, ajoin) + log.Debug("jointype:%v", ajoin.Join) + switch ajoin.Join { + case sqlparser.JoinStr, sqlparser.StraightJoinStr, sqlparser.LeftJoinStr: + log.Debug("leftjoin") + case sqlparser.RightJoinStr: + log.Debug("rightjoin") + } + processTableExpr(log, ajoin.LeftExpr) + processTableExpr(log, ajoin.RightExpr) + processOnExpr(log, ajoin.On) +} + +func TestSQLInsert(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + querys := []string{ + "insert into t(a,b,c,d) values(1,2,3)", + "insert into a.t(a,b,c,d)values(3.1415,4,'5',6),(4,4,'4',4)", + } + + for _, query := range querys { + log.Debug("query:%s", query) + ast, err := sqlparser.Parse(query) + assert.Nil(t, err) + tree := ast.(*sqlparser.Insert) + + // table + qua := sqlparser.NewTableIdent("xx") + tree.Table.Qualifier = qua + log.Debug("table:%+v", tree.Table) + log.Debug("ondup:%+v, %d", tree.OnDup, len(tree.OnDup)) + + // columns + log.Debug("columns:%+v", tree.Columns) + + // rows + for _, rows := range tree.Rows.(sqlparser.Values) { + log.Debug("row:%+v", rows) + for _, row := range rows { + log.Debug("\titem:%+v, type:%+v", row, reflect.TypeOf(row)) + } + } + + // end + log.Debug("\n") + } +} + +func valConvert(node sqlparser.Expr) (interface{}, error) { + switch node := node.(type) { + case *sqlparser.SQLVal: + switch node.Type { + case sqlparser.ValArg: + return string(node.Val), nil + case sqlparser.StrVal: + return []byte(node.Val), nil + case sqlparser.HexVal: + return node.HexDecode() + case sqlparser.IntVal: + val := string(node.Val) + signed, err := strconv.ParseInt(val, 0, 64) + if err == nil { + return signed, nil + } + unsigned, err := strconv.ParseUint(val, 0, 64) + if err == nil { + return unsigned, nil + } + return nil, err + } + case *sqlparser.NullVal: + return nil, nil + } + return nil, fmt.Errorf("%v is not a value", sqlparser.String(node)) +} + +func TestSQLDelete(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + querys := []string{ + "delete from sbtest.t where t.id >=3 and id=4 and id='x'", + } + + for _, query := range querys { + log.Debug("query:%s", query) + ast, err := sqlparser.Parse(query) + assert.Nil(t, err) + tree := ast.(*sqlparser.Delete) + + // table + log.Debug("table:%+v", tree.Table) + + // where + filters := splitAndExpression(nil, tree.Where.Expr) + log.Debug("where:%+v", filters) + for _, filter := range filters { + comparison, ok := filter.(*sqlparser.ComparisonExpr) + if !ok { + continue + } + val, err := valConvert(comparison.Right) + if err != nil { + continue + } + log.Debug("%+v%+v%+v.type:%+v", comparison.Left, comparison.Operator, val, comparison.Right.(*sqlparser.SQLVal).Type) + } + + // end + log.Debug("\n") + } +} + +func TestSqlParserSelect(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + querys := []string{ + "select tb1.id from test.tb1", + } + + for _, query := range querys { + log.Debug("query:%s", query) + sel, err := sqlparser.Parse(query) + assert.Nil(t, err) + selectTree := sel.(*sqlparser.Select) + for _, tableExpr := range selectTree.From { + switch tableExpr := tableExpr.(type) { + case *sqlparser.AliasedTableExpr: + log.Debug("AliasedTableExpr") + processAliasedTable(log, tableExpr) + case *sqlparser.ParenTableExpr: + log.Debug("ParenTableExpr") + case *sqlparser.JoinTableExpr: + log.Debug("JoinTableExpr") + processJoin(log, tableExpr) + } + } + log.Debug("\n") + } +} + +func processAliasedTable(log *xlog.Log, tableExpr *sqlparser.AliasedTableExpr) { + switch expr := tableExpr.Expr.(type) { + case *sqlparser.TableName: + log.Debug("table:%+v", expr) + case *sqlparser.Subquery: + } +} + +func TestSQLXA(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + querys := []string{ + "xa start 'xatest'", + " XA END 'xatest'", + "XA PREPARE 'xatest'", + "XA COMMIT 'xatest'", + } + + for _, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + log.Debug("%+v", node) + } +} + +func TestSQLShardKey(t *testing.T) { + querys := []string{ + "CREATE TABLE t1 (col1 INT, col2 CHAR(5)) PARTITION BY HASH(col1)", + } + + for _, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + + ddl := node.(*sqlparser.DDL) + want := "col1" + got := ddl.PartitionName + assert.Equal(t, want, got) + } +} + +func TestSQLUseDB(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + querys := []string{ + "use xx", + } + + for _, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + log.Debug("%+v", node) + + sef := node.(*sqlparser.Use) + assert.Equal(t, "xx", sef.DBName.String()) + } +} + +func TestSQLDDL(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + querys := []string{ + "create database xx", + "create table `你/ 好`(a int, b int, c varchar)", + "create database if not exists xx", + "create table db.foo(a int, b int, c varchar)", + "create table foo(a int, b int, c varchar) partition by hash(a)", + "create table foo(a int, b int, c varchar) partition by hash(a) PARTITIONS 6", + "create index a on b(x,c)", + "create index a on db.b(x,c)", + } + + for _, query := range querys { + log.Debug("%+v", query) + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + sef := node.(*sqlparser.DDL) + log.Debug("%+v", sef.Table) + } +} + +func TestSQLDDLWithDatabase(t *testing.T) { + querys := []string{ + "create table db.foo(a int, b int, c varchar)", + "create table db.foo(a int, b int, c varchar) partition by hash(a)", + "create table db.foo(a int, b int, c varchar) partition by hash(a) PARTITIONS 6", + "create index a on db.b(x,c)", + "create index a on db.b(x,c)", + } + + for _, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + sef := node.(*sqlparser.DDL) + assert.Equal(t, "db", sef.Table.Qualifier.String()) + } +} + +func TestSQLDDLWithUnique(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + querys := []string{ + "CREATE TABLE t1(a int primary key,a1 char(12), b int unique) PARTITION BY HASH(a);", + } + + for _, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + sef := node.(*sqlparser.DDL) + columns := sef.TableSpec.Columns + for _, col := range columns { + log.Debug("--ddl.columns:%+v", col) + } + } +} + +func TestSQLSelect(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + querys := []string{ + "select a.id from t.a,b where a.id=b.id and a.id >(select count(*) from d)", + } + + for _, query := range querys { + log.Debug("query:%s", query) + sel, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := sel.(*sqlparser.Select) + + var table string + switch v := (node.From[0]).(type) { + case *sqlparser.AliasedTableExpr: + table = sqlparser.String(v.Expr) + case *sqlparser.JoinTableExpr: + if ate, ok := (v.LeftExpr).(*sqlparser.AliasedTableExpr); ok { + table = sqlparser.String(ate.Expr) + } else { + table = sqlparser.String(v) + } + default: + table = sqlparser.String(v) + } + log.Debug("+++++table:%s", table) + + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + case *sqlparser.TableName: + log.Debug("find.table.name:%s", node) + if node.Qualifier.IsEmpty() { + node.Qualifier = sqlparser.NewTableIdent("sbtest") + } + } + return true, nil + }, node.From) + + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + log.Debug("--node:[type:%v, values:%+v]", reflect.TypeOf(node), node) + return true, nil + }, node.Where) + + buf := sqlparser.NewTrackedBuffer(nil) + node.Format(buf) + log.Debug("--newquery:%s", buf.String()) + } +} + +func TestSQLSelectShardKey(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + querys := []string{ + "select a.id from t.a,b where a.id=b.id and a.id >(select count(*) from d)", + "select 1 from sbtest.t1 right outer join t2 on a = b", + "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1", + "select u1.id, u2.id from user u1 left join user u2 on u2.id = u1.col where u1.id = 1", + "select 1 from t1 right outer join t2 on a = b", + "select a.id from a,b where a.id=b.id", + "select a.id from a", + "select id from db.a", + } + + for _, query := range querys { + log.Debug("query:%s", query) + sel, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := sel.(*sqlparser.Select) + + var table, database string + var tableExpr *sqlparser.AliasedTableExpr + switch expr := (node.From[0]).(type) { + case *sqlparser.AliasedTableExpr: + tableExpr = expr + case *sqlparser.JoinTableExpr: + if ate, ok := (expr.LeftExpr).(*sqlparser.AliasedTableExpr); ok { + tableExpr = ate + } + case *sqlparser.ParenTableExpr: + log.Panic("don't support ParenTableExpr, %+v", expr) + } + + switch expr := tableExpr.Expr.(type) { + case *sqlparser.TableName: + if !expr.Qualifier.IsEmpty() { + database = expr.Qualifier.String() + } + table = expr.Name.String() + } + + log.Debug("db:%s, table:%s\n", database, table) + } +} + +func TestSQLSelectOrderBy(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + querys := []string{ + "select a,b from t order by a desc", + "select a,b from t order by rand()", + "select a,b from t order by abs(a)", + "select a as a1 from t order by a1", + } + + for _, query := range querys { + log.Debug("query:%s", query) + sel, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := sel.(*sqlparser.Select) + orderby := node.OrderBy + for _, o := range orderby { + log.Debug("orderby:type:%T, expr:%+v, %+v", o.Expr, o.Expr, o) + } + log.Debug("\n") + } +} + +func TestSQLSelectExprs(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + querys := []string{ + "select a as a1, b as b1 from t order by a1", + } + + for _, query := range querys { + log.Debug("query:%s", query) + sel, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := sel.(*sqlparser.Select) + exprs := node.SelectExprs + for _, e := range exprs { + switch e.(type) { + case *sqlparser.AliasedExpr: + e1 := e.(*sqlparser.AliasedExpr) + log.Debug("expr:type:%T, expr:%+v, %+v", e, e, e1.Expr) + } + } + log.Debug("\n") + } +} + +func TestSQLSelectLimit(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + querys := []string{ + "select a from t", + "select a from t limit 5,9", + "select a from t limit 9 offset 5", + "select a from t limit b,-9", + "select a from t limit 1", + } + + for _, query := range querys { + log.Debug("query:%s", query) + sel, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := sel.(*sqlparser.Select) + limit := node.Limit + if limit != nil { + log.Debug("limit:%+v(%T), %+v(%T)", limit.Offset, limit.Offset, limit.Rowcount, limit.Rowcount) + } + + sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + default: + log.Debug("type:%T, value:%+v", node, node) + return true, nil + } + }, limit) + + log.Debug("\n") + } +} + +func TestSQLSelectAggregator(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + querys := []string{ + "select 1, count(*), count(*) as cstar, avg(a), sum(a), count(a), max(a), max(b) as mb, a as a1, x.b from t,x group by a1,b, d.name", + } + + for _, query := range querys { + log.Debug("query:%s", query) + sel, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := sel.(*sqlparser.Select) + selexprs := node.SelectExprs + var tuples []*selectTuple + if selexprs != nil { + for _, exp := range selexprs { + switch exp.(type) { + case *sqlparser.AliasedExpr: + expr := exp.(*sqlparser.AliasedExpr) + tuple, _ := parserSelectExpr(expr) + tuples = append(tuples, tuple) + } + } + for _, tuple := range tuples { + log.Debug("--%+v", tuple) + } + } + + groupbys := node.GroupBy + if groupbys != nil { + for _, by := range groupbys { + log.Debug("group:%+v(%T)", by, by) + } + } + log.Debug("\n") + } +} + +func TestSQLSelectRewritten(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + querys := []string{ + "select avg(a), sum(a), count(a) from t", + } + + for _, query := range querys { + log.Debug("query:%s", query) + sel, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := sel.(*sqlparser.Select) + selexprs := node.SelectExprs + rewritten := node.SelectExprs + var tuples []*selectTuple + for _, exp := range selexprs { + switch exp.(type) { + case *sqlparser.AliasedExpr: + expr := exp.(*sqlparser.AliasedExpr) + tuple, _ := parserSelectExpr(expr) + tuples = append(tuples, tuple) + } + } + + k := 0 + for _, tuple := range tuples { + switch tuple.fn { + case "avg": + avgs := decomposeAvg(tuple) + rewritten = append(rewritten, &sqlparser.AliasedExpr{}, &sqlparser.AliasedExpr{}) + copy(rewritten[(k+1)+2:], rewritten[(k+1):]) + rewritten[(k + 1)] = avgs[0] + rewritten[(k+1)+1] = avgs[1] + } + log.Debug("--%+v", tuple) + k++ + } + + buf := sqlparser.NewTrackedBuffer(nil) + rewritten.Format(buf) + log.Debug("--newquery:%s", buf.String()) + } +} + +func TestSQLSelectJoin(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + querys := []string{ + "select a,b,c from t", + "select a,b,c from t where b=c", + "select a.id from a,b where a.id=b.id", + "select u1.id, u2.id from user u1 left join user u2 on u2.id = u1.col where u1.id = 1", + "select 1 from t1 right outer join t2 on a = b", + } + + for _, query := range querys { + log.Debug("query:%s", query) + sel, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := sel.(*sqlparser.Select) + for _, tab := range node.From { + switch tab.(type) { + case *sqlparser.AliasedTableExpr: + if len(node.From) > 1 && node.Where != nil { + if err := checkComparison(node.Where.Expr); err != nil { + log.Error("error:%+v", err) + } + } + case *sqlparser.ParenTableExpr: + log.Debug("ParenTableExpr %+v", tab) + case *sqlparser.JoinTableExpr: + ajoin := tab.(*sqlparser.JoinTableExpr) + if err := checkComparison(ajoin.On); err != nil { + log.Error("error:%+v", err) + } + } + } + log.Debug("\n") + } +} + +// TestSqlParserSelectOr used to check the or clause type. +func TestSqlParserSelectOr(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + querys := []string{ + "select * from tb1 where id=1 or id=3", + } + + for _, query := range querys { + log.Debug("query:%s", query) + sel, err := sqlparser.Parse(query) + assert.Nil(t, err) + selectTree := sel.(*sqlparser.Select) + filters := splitAndExpression(nil, selectTree.Where.Expr) + for _, filter := range filters { + switch filter.(type) { + case *sqlparser.ComparisonExpr: + log.Debug("comparison.expr....") + case *sqlparser.OrExpr: + log.Debug("or.expr....") + } + + buf := sqlparser.NewTrackedBuffer(nil) + filter.Format(buf) + log.Debug(buf.String()) + log.Debug("\n") + } + } +} + +func TestSqlParserHaving(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + querys := []string{ + "select age,count(*) from t1 group by age having count(*) >=2", + "select age,count(*) from t1 having a>2", + } + + for _, query := range querys { + log.Debug("query:%s", query) + sel, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := sel.(*sqlparser.Select) + if node.Having != nil { + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + case *sqlparser.FuncExpr: + buf := sqlparser.NewTrackedBuffer(nil) + node.Format(buf) + log.Debug(buf.String()) + log.Debug("found.expr.in.having:%#v....", node) + return false, nil + } + return true, nil + }, node.Having) + } + } +} + +func TestSqlParserTableAs(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + querys := []string{ + "select t1.age from test.t1 as t1", + "select 1", + } + + for _, query := range querys { + log.Debug("query:%s", query) + sel, err := sqlparser.Parse(query) + assert.Nil(t, err) + node := sel.(*sqlparser.Select) + log.Debug("from:%T, %+v", node.From[0], node.From[0]) + + buf := sqlparser.NewTrackedBuffer(nil) + node.Format(buf) + log.Debug(buf.String()) + } +} diff --git a/src/planner/update_plan.go b/src/planner/update_plan.go new file mode 100644 index 00000000..f668118f --- /dev/null +++ b/src/planner/update_plan.go @@ -0,0 +1,162 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "encoding/json" + "router" + "xcontext" + + "github.com/pkg/errors" + + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/hack" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _ Plan = &UpdatePlan{} +) + +// UpdatePlan represents delete plan +type UpdatePlan struct { + log *xlog.Log + + // router + router *router.Router + + // insert ast + node *sqlparser.Update + + // database + database string + + // raw query + RawQuery string + + // type + typ PlanType + + // mode + ReqMode xcontext.RequestMode + + // query and backend tuple + Querys []xcontext.QueryTuple +} + +// NewUpdatePlan used to create UpdatePlan +func NewUpdatePlan(log *xlog.Log, database string, query string, node *sqlparser.Update, router *router.Router) *UpdatePlan { + return &UpdatePlan{ + log: log, + node: node, + router: router, + database: database, + RawQuery: query, + typ: PlanTypeUpdate, + Querys: make([]xcontext.QueryTuple, 0, 16), + } +} + +// analyze used to analyze the 'update' is at the support level. +func (p *UpdatePlan) analyze() error { + node := p.node + // analyze subquery. + if hasSubquery(p.node) { + return errors.New("unsupported: subqueries.in.update") + } + if node.Where == nil { + return errors.New("unsupported: missing.where.clause.in.DML") + } + return nil +} + +// Build used to build distributed querys. +func (p *UpdatePlan) Build() error { + if err := p.analyze(); err != nil { + return err + } + + node := p.node + // Database. + database := p.database + if !node.Table.Qualifier.IsEmpty() { + database = node.Table.Qualifier.String() + } + table := node.Table.Name.String() + + // Sharding key. + shardkey, err := p.router.ShardKey(database, table) + if err != nil { + return err + } + + // analyze shardkey changing. + if isShardKeyChanging(node.Exprs, shardkey) { + return errors.New("unsupported: cannot.update.shard.key") + } + + // Get the routing segments info. + segments, err := getDMLRouting(database, table, shardkey, node.Where, p.router) + if err != nil { + return err + } + + // Rewrite the query. + for _, segment := range segments { + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf("update %v%s.%s set %v%v%v%v", node.Comments, database, segment.Table, node.Exprs, node.Where, node.OrderBy, node.Limit) + tuple := xcontext.QueryTuple{ + Query: buf.String(), + Backend: segment.Backend, + Range: segment.Range.String(), + } + p.Querys = append(p.Querys, tuple) + } + return nil +} + +// Type returns the type of the plan. +func (p *UpdatePlan) Type() PlanType { + return p.typ +} + +// JSON returns the plan info. +func (p *UpdatePlan) JSON() string { + type explain struct { + RawQuery string `json:",omitempty"` + Partitions []xcontext.QueryTuple `json:",omitempty"` + } + + // Partitions. + var parts []xcontext.QueryTuple + parts = append(parts, p.Querys...) + exp := &explain{ + RawQuery: p.RawQuery, + Partitions: parts, + } + bout, err := json.MarshalIndent(exp, "", "\t") + if err != nil { + return err.Error() + } + return hack.String(bout) +} + +// Children returns the children of the plan. +func (p *UpdatePlan) Children() *PlanTree { + return nil +} + +// Size returns the memory size. +func (p *UpdatePlan) Size() int { + size := len(p.RawQuery) + for _, q := range p.Querys { + size += len(q.Query) + } + return size +} diff --git a/src/planner/update_plan_test.go b/src/planner/update_plan_test.go new file mode 100644 index 00000000..61ea099c --- /dev/null +++ b/src/planner/update_plan_test.go @@ -0,0 +1,143 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package planner + +import ( + "router" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestUpdatePlan(t *testing.T) { + results := []string{ + `{ + "RawQuery": "update sbtest.A set val = 1 where id = 1", + "Partitions": [ + { + "Query": "update sbtest.A6 set val = 1 where id = 1", + "Backend": "backend6", + "Range": "[512-4096)" + } + ] +}`, + `{ + "RawQuery": "update sbtest.A set val = 1 where id = id2 and id = 1", + "Partitions": [ + { + "Query": "update sbtest.A6 set val = 1 where id = id2 and id = 1", + "Backend": "backend6", + "Range": "[512-4096)" + } + ] +}`, + `{ + "RawQuery": "update sbtest.A set val = 1 where id in (1, 2)", + "Partitions": [ + { + "Query": "update sbtest.A1 set val = 1 where id in (1, 2)", + "Backend": "backend1", + "Range": "[0-32)" + }, + { + "Query": "update sbtest.A2 set val = 1 where id in (1, 2)", + "Backend": "backend2", + "Range": "[32-64)" + }, + { + "Query": "update sbtest.A3 set val = 1 where id in (1, 2)", + "Backend": "backend3", + "Range": "[64-96)" + }, + { + "Query": "update sbtest.A4 set val = 1 where id in (1, 2)", + "Backend": "backend4", + "Range": "[96-256)" + }, + { + "Query": "update sbtest.A5 set val = 1 where id in (1, 2)", + "Backend": "backend5", + "Range": "[256-512)" + }, + { + "Query": "update sbtest.A6 set val = 1 where id in (1, 2)", + "Backend": "backend6", + "Range": "[512-4096)" + } + ] +}`} + querys := []string{ + "update sbtest.A set val = 1 where id = 1", + "update sbtest.A set val = 1 where id = id2 and id = 1", + "update sbtest.A set val = 1 where id in (1, 2)", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableMConfig()) + assert.Nil(t, err) + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + plan := NewUpdatePlan(log, database, query, node.(*sqlparser.Update), route) + + // plan build + { + err := plan.Build() + assert.Nil(t, err) + got := plan.JSON() + want := results[i] + assert.Equal(t, want, got) + assert.Equal(t, PlanTypeUpdate, plan.Type()) + assert.Nil(t, plan.Children()) + } + } +} + +func TestUpdateUnsupportedPlan(t *testing.T) { + querys := []string{ + "update sbtest.A set a=3", + "update sbtest.A set id=3 where id=1", + "update sbtest.A set b=3 where id in (select id from t1)", + } + + results := []string{ + "unsupported: missing.where.clause.in.DML", + "unsupported: cannot.update.shard.key", + "unsupported: subqueries.in.update", + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + database := "sbtest" + + route, cleanup := router.MockNewRouter(log) + defer cleanup() + + err := route.AddForTest(database, router.MockTableMConfig()) + assert.Nil(t, err) + for i, query := range querys { + node, err := sqlparser.Parse(query) + assert.Nil(t, err) + plan := NewUpdatePlan(log, database, query, node.(*sqlparser.Update), route) + + // plan build + { + err := plan.Build() + want := results[i] + got := err.Error() + assert.Equal(t, want, got) + } + } +} diff --git a/src/proxy/audit.go b/src/proxy/audit.go new file mode 100644 index 00000000..043162dc --- /dev/null +++ b/src/proxy/audit.go @@ -0,0 +1,44 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "time" + + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +type mode int + +const ( + // R enum. + R mode = iota + // W enum. + W +) + +func (spanner *Spanner) auditLog(session *driver.Session, m mode, typ string, query string, qr *sqltypes.Result) error { + adit := spanner.audit + user := session.User() + host := session.Addr() + connID := session.ID() + affected := uint64(0) + if qr != nil { + affected = uint64(len(qr.Rows)) + } + now := time.Now().UTC() + switch m { + case R: + adit.LogReadEvent(typ, user, host, connID, query, affected, now) + case W: + adit.LogWriteEvent(typ, user, host, connID, query, affected, now) + } + return nil +} diff --git a/src/proxy/auth.go b/src/proxy/auth.go new file mode 100644 index 00000000..9cf8de36 --- /dev/null +++ b/src/proxy/auth.go @@ -0,0 +1,143 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "bytes" + "crypto/sha1" + "encoding/hex" + "fmt" + "net" + "strings" + + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqldb" +) + +func localHostLogin(s *driver.Session) bool { + host, _, err := net.SplitHostPort(s.Addr()) + if err != nil { + return false + } + + if host == "127.0.0.1" { + return true + } + return false +} + +func localUserLogin(s *driver.Session) bool { + host, _, err := net.SplitHostPort(s.Addr()) + if err != nil { + return false + } + + if host == "127.0.0.1" && s.User() == "root" { + return true + } + return false +} + +// SessionCheck used to check authentication. +func (spanner *Spanner) SessionCheck(s *driver.Session) error { + // Max connection check. + max := spanner.conf.Proxy.MaxConnections + if spanner.sessions.Reaches(max) { + return sqldb.NewSQLError(sqldb.ER_CON_COUNT_ERROR, "Too many connections(max: %v)", max) + } + + // Local login bypass. + if localHostLogin(s) { + return nil + } + + log := spanner.log + host, _, err := net.SplitHostPort(s.Addr()) + if err != nil { + log.Error("proxy.spanner.split.address.error:%+v", s.Addr()) + return sqldb.NewSQLError(sqldb.ER_ACCESS_DENIED_ERROR, "Access denied for user from host '%v'", s.Addr()) + } + + // Ip check. + if !spanner.iptable.Check(host) { + log.Warning("proxy.spanner.host[%s].denied", host) + return sqldb.NewSQLError(sqldb.ER_ACCESS_DENIED_ERROR, "Access denied for user from host '%v'", host) + } + return nil +} + +// AuthCheck impl. +func (spanner *Spanner) AuthCheck(s *driver.Session) error { + // Local login bypass. + if localUserLogin(s) { + return nil + } + + log := spanner.log + user := s.User() + + // Server salt. + salt := s.Salt() + // Client response. + resp := s.Scramble() + + query := fmt.Sprintf("select authentication_string from mysql.user where user='%s'", user) + qr, err := spanner.ExecuteSingle(query) + + // Query error. + if err != nil { + log.Error("proxy: auth.error:%+v", err) + return sqldb.NewSQLError(sqldb.ER_ACCESS_DENIED_ERROR, "Access denied for user '%v'", user) + } + + // User not exists. + if len(qr.Rows) == 0 { + log.Error("proxy: auth.can't.find.the.user:%s", user) + return sqldb.NewSQLError(sqldb.ER_ACCESS_DENIED_ERROR, "Access denied for user '%v'", user) + } + + // mysql.user.authentication_string is ['*' + HEX(SHA1(SHA1(password)))] + authStr := strings.TrimPrefix(qr.Rows[0][0].String(), "*") + wantStage2, err := hex.DecodeString(authStr) + if err != nil { + log.Error("proxy: auth.user[%s].decode[%s].error:%+v", user, authStr, err) + return sqldb.NewSQLError(sqldb.ER_ACCESS_DENIED_ERROR, "Access denied for user '%v'", user) + } + + // last= SHA1(salt SHA1(SHA1(password))) + crypt := sha1.New() + crypt.Write(salt) + crypt.Write(wantStage2) + want := crypt.Sum(nil) + + // gotStage1 = SHA1(password) + gotStage1 := make([]byte, 20) + for i := range resp { + // SHA1(password) = (resp XOR SHA1(SHA1(password))) + gotStage1[i] = (resp[i] ^ want[i]) + } + + // gotStage2 = SHA1(SHA1(password)) + crypt.Reset() + crypt.Write(gotStage1) + gotStage2 := crypt.Sum(nil) + + // last= SHA1(salt SHA1(SHA1(password))) + crypt.Reset() + crypt.Write(salt) + crypt.Write(gotStage2) + got := crypt.Sum(nil) + + if !bytes.Equal(want, got) { + log.Warning("spanner.auth\nwant:\n\tstage2:%+v\n\tlast:%+v\ngot\n\tstage2:%+v\n\tlast:%+v\n\n\tsalt:%+v", wantStage2, want, gotStage2, got, salt) + log.Error("proxy: auth.user[%s].failed(password.invalid):want[%+v]!=got[%+v]", user, want, got) + return sqldb.NewSQLError(sqldb.ER_ACCESS_DENIED_ERROR, "Access denied for user '%v'", user) + } + return nil +} diff --git a/src/proxy/auth_test.go b/src/proxy/auth_test.go new file mode 100644 index 00000000..01484c7b --- /dev/null +++ b/src/proxy/auth_test.go @@ -0,0 +1,100 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/xlog" + + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +func TestProxyAuthSessionCheck(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := MockProxy1(log, MockConfigMax16()) + defer cleanup() + address := proxy.Address() + iptable := proxy.IPTable() + + // IPTables. + { + iptable.Add("192.168.0.255") + } + + // max connection. + { + + var clients []driver.Conn + for i := 0; i < 16; i++ { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + clients = append(clients, client) + } + { + _, err := driver.NewConn("mock", "mock", address, "", "utf8") + want := "Too many connections(max: 16) (errno 1040) (sqlstate 08004)" + got := err.Error() + assert.Equal(t, want, got) + } + _ = clients + } +} + +func TestProxyAuth(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // Select mysql.user error. + { + _, err := driver.NewConn("mockx", "mockx", address, "", "utf8") + want := "Access denied for user 'mockx' (errno 1045) (sqlstate 28000)" + got := err.Error() + assert.Equal(t, want, got) + } + + // User not exists. + { + fakedbs.AddQuery("select authentication_string from mysql.user where user='mocknull'", &sqltypes.Result{}) + _, err := driver.NewConn("mocknull", "mockx", address, "", "utf8") + want := "Access denied for user 'mocknull' (errno 1045) (sqlstate 28000)" + got := err.Error() + assert.Equal(t, want, got) + } + + // Auth password error. + { + _, err := driver.NewConn("mock", "mockx", address, "", "utf8") + want := "Access denied for user 'mock' (errno 1045) (sqlstate 28000)" + got := err.Error() + assert.Equal(t, want, got) + } + + // Auth OK. + { + _, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + } +} + +func TestProxyAuthLocalPassby(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + { + _, err := driver.NewConn("root", "", address, "", "utf8") + assert.Nil(t, err) + } +} diff --git a/src/proxy/backup.go b/src/proxy/backup.go new file mode 100644 index 00000000..42199da0 --- /dev/null +++ b/src/proxy/backup.go @@ -0,0 +1,113 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "errors" + + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +// handleBackupQuery used to execute read query to the backup node. +func (spanner *Spanner) handleBackupQuery(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + timeout := spanner.conf.Proxy.QueryTimeout + return spanner.queryBackupWithTimeout(session, query, node, timeout) +} + +func (spanner *Spanner) queryBackupWithTimeout(session *driver.Session, query string, node sqlparser.Statement, timeout int) (*sqltypes.Result, error) { + var qr *sqltypes.Result + + log := spanner.log + conf := spanner.conf + scatter := spanner.scatter + sessions := spanner.sessions + + // Make sure we have the backup node. + if scatter.HasBackup() { + txn, err := scatter.CreateBackupTransaction() + if err != nil { + log.Error("spanner.backup.read[%s].txn.create.error:[%v]", query, err) + return nil, err + } + defer txn.Finish() + + // txn limits. + txn.SetTimeout(timeout) + txn.SetMaxResult(conf.Proxy.MaxResultSize) + + // binding. + sessions.TxnBinding(session, txn, node, query) + defer sessions.TxnUnBinding(session) + if qr, err = txn.Execute(session.Schema(), query); err != nil { + log.Error("spanner.backup.read[%s].error:[%v]", query, err) + } + return qr, err + } + return nil, errors.New("we.do.not.have.the.backup.node") +} + +func (spanner *Spanner) handleBackupWrite(db string, query string) (*sqltypes.Result, error) { + timeout := spanner.conf.Proxy.QueryTimeout + return spanner.writeBackupWithTimeout(db, query, timeout) +} + +func (spanner *Spanner) changeBackupEngine(ddl *sqlparser.DDL) { + if ddl.TableSpec == nil { + ddl.TableSpec = &sqlparser.TableSpec{} + } + + defaultEngine := spanner.conf.Proxy.BackupDefaultEngine + if defaultEngine != "" { + ddl.TableSpec.Options.Engine = defaultEngine + } +} + +func (spanner *Spanner) handleBackupDDL(db string, query string) (*sqltypes.Result, error) { + log := spanner.log + node, err := sqlparser.Parse(query) + if err != nil { + log.Error("spaner.backup.parser.ddl[%v].error:%v", query, err) + return nil, err + } + // We only rewrite the 'CREATE TABLE' query. + ddl := node.(*sqlparser.DDL) + if ddl.Action == sqlparser.CreateTableStr { + spanner.changeBackupEngine(ddl) + query = sqlparser.String(ddl) + } + spanner.log.Warning("spanner.handle.backup.ddl.rewrite.query:%s", query) + timeout := spanner.conf.Proxy.DDLTimeout + return spanner.writeBackupWithTimeout(db, query, timeout) +} + +func (spanner *Spanner) writeBackupWithTimeout(db string, query string, timeout int) (*sqltypes.Result, error) { + var qr *sqltypes.Result + log := spanner.log + scatter := spanner.scatter + + // Make sure we have the backup node. + if scatter.HasBackup() { + txn, err := scatter.CreateBackupTransaction() + if err != nil { + log.Error("spanner.backup.write[%s].txn.create.error:[%v]", query, err) + return nil, err + } + defer txn.Finish() + + // txn limits. + txn.SetTimeout(timeout) + if qr, err = txn.Execute(db, query); err != nil { + log.Error("spanner.backup.wirte[%s].execute.error:[%v]", query, err) + } + return qr, err + } + return nil, nil +} diff --git a/src/proxy/binlog.go b/src/proxy/binlog.go new file mode 100644 index 00000000..67209b3a --- /dev/null +++ b/src/proxy/binlog.go @@ -0,0 +1,101 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "fmt" + "strconv" + + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/hack" + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +func (spanner *Spanner) logEvent(session *driver.Session, typ string, query string) error { + if spanner.conf.Binlog.EnableBinlog { + spanner.binlog.LogEvent(typ, session.Schema(), query) + } + return nil +} + +func (spanner *Spanner) handleShowBinlogEvents(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + var ts int64 + limit := 100 + binloger := spanner.binlog + + log := spanner.log + ast := node.(*sqlparser.Show) + if ast.From != "" { + gtid, err := strconv.ParseInt(ast.From, 0, 64) + if err != nil { + log.Error("spanner.send.binlog.parser.gtid[%v].error:%v", ast.From, err) + return nil, err + } + ts = gtid + } + if ast.Limit != nil { + rowcount := ast.Limit.Rowcount + if rowcount != nil { + val := rowcount.(*sqlparser.SQLVal) + out, err := strconv.ParseInt(hack.String(val.Val), 10, 64) + if err != nil { + return nil, err + } + limit = int(out) + } + } + + sqlworker, err := binloger.NewSQLWorker(ts) + if err != nil { + log.Error("spanner.send.binlog.new.sqlworker[from:%v, ts:%v].error:%v", ast.From, ts, err) + return nil, err + } + defer binloger.CloseSQLWorker(sqlworker) + + qr := &sqltypes.Result{Fields: []*querypb.Field{ + {Name: "Log_name", Type: querypb.Type_VARCHAR}, + {Name: "Pos", Type: querypb.Type_INT64}, + {Name: "GTID", Type: querypb.Type_VARCHAR}, + {Name: "Event_type", Type: querypb.Type_VARCHAR}, + {Name: "Schema", Type: querypb.Type_VARCHAR}, + {Name: "End_log_pos", Type: querypb.Type_INT64}, + {Name: "Info", Type: querypb.Type_VARCHAR}, + }, + } + + counts := 0 + for { + event, err := sqlworker.NextEvent() + if err != nil { + return nil, err + } + if event == nil { + break + } + if counts >= limit { + break + } + if event != nil { + row := []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(event.LogName)), + sqltypes.MakeTrusted(querypb.Type_INT64, []byte(fmt.Sprintf("%v", event.Pos))), + sqltypes.MakeTrusted(querypb.Type_INT64, []byte(fmt.Sprintf("%v", event.Timestamp))), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(event.Type)), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(event.Schema)), + sqltypes.MakeTrusted(querypb.Type_INT64, []byte(fmt.Sprintf("%v", event.EndLogPos))), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(event.Query)), + } + qr.Rows = append(qr.Rows, row) + counts++ + } + } + return qr, nil +} diff --git a/src/proxy/binlog_test.go b/src/proxy/binlog_test.go new file mode 100644 index 00000000..31fb8a7f --- /dev/null +++ b/src/proxy/binlog_test.go @@ -0,0 +1,119 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "fakedb" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestProxyBinlog(t *testing.T) { + conf := MockDefaultConfig() + conf.Binlog.EnableBinlog = true + os.RemoveAll(conf.Binlog.LogDir) + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy1(log, conf) + defer cleanup() + address := proxy.Address() + + // DDL. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("insert .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("update.*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("replace.*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("select .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // Insert. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "insert into test.t1 (id, b) values(1,2),(3,4)" + fakedbs.AddQuery(query, fakedb.Result3) + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // Update. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "update test.t1 set b=3 where id=1" + fakedbs.AddQuery(query, fakedb.Result3) + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // Replace. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "replace into test.t1 (id, b) values(1,2)" + fakedbs.AddQuery(query, fakedb.Result3) + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // Select. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "select * from test.t1" + fakedbs.AddQuery(query, fakedb.Result3) + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // Show binlog events. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "show binlog events" + qr, err := client.FetchAll(query, -1) + assert.Nil(t, err) + log.Debug("--qr:%+v", qr) + } + + // Show binlog events limits 1. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "show binlog events limit 1" + qr, err := client.FetchAll(query, -1) + assert.Nil(t, err) + log.Debug("--qr:%+v", qr) + } + + // Show binlog events from gtid '1514254947594569594' limits 1. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "show binlog events from gtid '1514254947594569594' limit 1" + qr, err := client.FetchAll(query, -1) + assert.Nil(t, err) + log.Debug("--qr:%+v", qr) + } +} diff --git a/src/proxy/ddl.go b/src/proxy/ddl.go new file mode 100644 index 00000000..327ad638 --- /dev/null +++ b/src/proxy/ddl.go @@ -0,0 +1,194 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "fmt" + "router" + "strings" + + "github.com/xelabs/go-mysqlstack/driver" + + "github.com/xelabs/go-mysqlstack/sqldb" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +var ( + supportEngines = []string{ + "innodb", + "tokudb", + } +) + +// CheckCreateTable used to check the CRERATE TABLE statement. +func CheckCreateTable(ddl *sqlparser.DDL) error { + shardKey := ddl.PartitionName + // Check the sharding key. + if shardKey == "" { + return fmt.Errorf("create table must end with 'PARTITION BY HASH(shard-key)'") + } + + // UNIQUE/PRIMARY constraint check. + shardKeyOK := false + for _, col := range ddl.TableSpec.Columns { + colName := col.Name.String() + if colName == shardKey { + shardKeyOK = true + } else { + switch col.Type.KeyOpt { + case sqlparser.ColKeyUnique, sqlparser.ColKeyUniqueKey, sqlparser.ColKeyPrimary: + return fmt.Errorf("The unique/primary constraint only be defined on the sharding key column[%s] not [%s]", shardKey, colName) + } + } + } + if !shardKeyOK { + return fmt.Errorf("Sharding Key column '%s' doesn't exist in table", shardKey) + } + + check := false + engine := ddl.TableSpec.Options.Engine + for _, eng := range supportEngines { + if eng == strings.ToLower(engine) { + check = true + break + } + } + + // Change the storage engine to InnoDB. + if !check { + ddl.TableSpec.Options.Engine = "InnoDB" + } + return nil +} + +func checkDatabaseAndTable(database string, table string, router *router.Router) error { + tblList := router.Tables() + tables, ok := tblList[database] + if !ok { + return sqldb.NewSQLError(sqldb.ER_BAD_DB_ERROR, "", database) + } + found := false + for _, t := range tables { + if t == table { + found = true + break + } + } + if !found { + return sqldb.NewSQLError(sqldb.ER_NO_SUCH_TABLE, "", table) + } + return nil +} + +// handleDDL used to handle the DDL command. +// Here we need to deal with database.table grammar. +// Supports: +// 1. CREATE/DROP DATABASE +// 2. CREATE/DROP TABLE ... PARTITION BY HASH(shardkey) +// 3. CREATE/DROP INDEX ON TABLE(columns...) +// 4. ALTER TABLE .. ENGINE=xx +// 5. ALTER TABLE .. ADD COLUMN (column definition) +// 6. ALTER TABLE .. MODIFY COLUMN column definition +// 7. ALTER TABLE .. DROP COLUMN column +func (spanner *Spanner) handleDDL(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + log := spanner.log + router := spanner.router + scatter := spanner.scatter + + ddl := node.(*sqlparser.DDL) + database := session.Schema() + // Database operation. + if !ddl.Database.IsEmpty() { + database = ddl.Database.String() + } + + // Table operation. + if !ddl.Table.Qualifier.IsEmpty() { + database = ddl.Table.Qualifier.String() + } + + // Check the database ACL. + if err := router.DatabaseACL(database); err != nil { + return nil, err + } + switch ddl.Action { + case sqlparser.CreateDBStr: + return spanner.ExecuteScatter(query) + case sqlparser.DropDBStr: + // Execute the ddl. + qr, err := spanner.ExecuteScatter(query) + if err != nil { + return nil, err + } + // Drop database from router. + if err := router.DropDatabase(database); err != nil { + return nil, err + } + return qr, nil + case sqlparser.CreateTableStr: + table := ddl.Table.Name.String() + backends := scatter.Backends() + shardKey := ddl.PartitionName + + // Check the table and change the engine. + if err := CheckCreateTable(ddl); err != nil { + log.Error("spanner.ddl.check.crete.table[%s].error:%+v", table, err) + return nil, err + } + + // Create table. + if err := router.CreateTable(database, table, shardKey, backends); err != nil { + return nil, err + } + r, err := spanner.ExecuteDDL(session, database, sqlparser.String(ddl), node) + if err != nil { + // Try to drop table. + router.DropTable(database, table) + return nil, err + } + return r, nil + case sqlparser.DropTableStr: + // Check the database and table is exists. + table := ddl.Table.Name.String() + if err := checkDatabaseAndTable(database, table, router); err != nil { + return nil, err + } + + // Execute. + r, err := spanner.ExecuteDDL(session, database, query, node) + if err != nil { + log.Error("spanner.ddl.execute[%v].error[%+v]", query, err) + } + if err := router.DropTable(database, table); err != nil { + log.Error("spanner.ddl.router.drop.table[%s].error[%+v]", table, err) + } + return r, err + case sqlparser.CreateIndexStr, sqlparser.DropIndexStr, + sqlparser.AlterEngineStr, sqlparser.AlterCharsetStr, + sqlparser.AlterAddColumnStr, sqlparser.AlterDropColumnStr, sqlparser.AlterModifyColumnStr, + sqlparser.TruncateTableStr: + + // Check the database and table is exists. + table := ddl.Table.Name.String() + if err := checkDatabaseAndTable(database, table, router); err != nil { + return nil, err + } + + // Execute. + r, err := spanner.ExecuteDDL(session, database, query, node) + if err != nil { + log.Error("spanner.ddl[%v].error[%+v]", query, err) + } + return r, err + default: + log.Error("spanner.ddl[%v, %+v].access.denied", query, node) + return nil, sqldb.NewSQLError(sqldb.ER_SPECIFIC_ACCESS_DENIED_ERROR, "Access denied; you don't have the privilege for %v operation", ddl.Action) + } +} diff --git a/src/proxy/ddl_test.go b/src/proxy/ddl_test.go new file mode 100644 index 00000000..8b1768bb --- /dev/null +++ b/src/proxy/ddl_test.go @@ -0,0 +1,603 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "errors" + "fakedb" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestProxyDDLDB(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxyWithBackup(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern(".* database .*", &sqltypes.Result{}) + } + + // create database. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create database test" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // drop database. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "drop database if exists test" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // ACL database. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create database mysql" + _, err = client.FetchAll(query, -1) + want := "Access denied; lacking privileges for database mysql (errno 1227) (sqlstate 42000)" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestProxyDDLTable(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("show tables from .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("alter table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("drop table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("truncate table .*", &sqltypes.Result{}) + } + + // create table error. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table t1(a int, b int)" + _, err = client.FetchAll(query, -1) + want := "create table must end with 'PARTITION BY HASH(shard-key)' (errno 1105) (sqlstate HY000)" + got := err.Error() + assert.Equal(t, want, got) + } + + // create table(ACL). + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "create table mysql.t2(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + want := "Access denied; lacking privileges for database mysql (errno 1227) (sqlstate 42000)" + got := err.Error() + assert.Equal(t, want, got) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "create table t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // create sbtest table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table sbtest.sbt1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // alter test table engine. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "alter table t1 engine=tokudb" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // truncate table. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "truncate table t1" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // create sbtest table mysql internal error. + { + fakedbs.AddQueryErrorPattern("create table .*", errors.New("mock.mysql.create.table.error")) + + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table sbtest.sberror2(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + want := "mock.mysql.create.table.error (errno 1105) (sqlstate HY000)" + got := err.Error() + assert.Equal(t, want, got) + } + + // check sbtest.tables. + { + client, err := driver.NewConn("mock", "mock", address, "sbtest", "utf8") + assert.Nil(t, err) + query := "show tables" + qr, err := client.FetchAll(query, -1) + assert.Nil(t, err) + want := "[[sbt1]]" + got := fmt.Sprintf("%+v", qr.Rows) + assert.Equal(t, want, got) + } + + // drop sbtest table error. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "drop table sbtest.t1" + _, err = client.FetchAll(query, -1) + want := "Table 't1' doesn't exist (errno 1146) (sqlstate 42S02)" + got := err.Error() + assert.Equal(t, want, got) + } + + // drop sbtest1 table error. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "drop table sbtest1.t1" + _, err = client.FetchAll(query, -1) + want := "Unknown database 'sbtest1' (errno 1049) (sqlstate 42000)" + got := err.Error() + assert.Equal(t, want, got) + } + + // drop sbtest table. + { + client, err := driver.NewConn("mock", "mock", address, "sbtest", "utf8") + assert.Nil(t, err) + query := "drop table sbt1" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // check sbtest.tables. + { + client, err := driver.NewConn("mock", "mock", address, "sbtest", "utf8") + assert.Nil(t, err) + query := "show tables" + qr, err := client.FetchAll(query, -1) + assert.Nil(t, err) + want := "[]" + got := fmt.Sprintf("%+v", qr.Rows) + assert.Equal(t, want, got) + } + + // create sbtest table. + { + fakedbs.ResetPatternErrors() + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table sbtest.sbt1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // drop sbtest table internal error. + { + fakedbs.AddQueryErrorPattern("drop table .*", errors.New("mock.mysql.drop.table.error")) + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "drop table sbtest.sbt1" + _, err = client.FetchAll(query, -1) + want := "mock.mysql.drop.table.error (errno 1105) (sqlstate HY000)" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestProxyDDLIndex(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("show tables from .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("show create table .*", fakedb.Result1) + fakedbs.AddQueryPattern("drop table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create index.*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("drop index.*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "create table t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // show create test table. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "show create table t1" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // create index. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "create index index1 on t1(a,b)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // create index error. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create index index1 on xx.t1(a,b)" + _, err = client.FetchAll(query, -1) + want := "Unknown database 'xx' (errno 1049) (sqlstate 42000)" + got := err.Error() + assert.Equal(t, want, got) + } + + // create index. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "create index index1 on t1(a,b)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // create index error. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create index index1 on xx.t1(a,b)" + _, err = client.FetchAll(query, -1) + want := "Unknown database 'xx' (errno 1049) (sqlstate 42000)" + got := err.Error() + assert.Equal(t, want, got) + } + + // drop index. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "drop index index1 on t1" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } +} + +func TestProxyDDLColumn(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("alter table .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "create table t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // add column. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "alter table t1 add column(c1 int, c2 varchar(100))" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // drop column. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "alter table t1 drop column c2" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // drop column error(drop the shardkey). + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "alter table t1 drop column id" + _, err = client.FetchAll(query, -1) + want := "unsupported: cannot.drop.the.column.on.shard.key (errno 1105) (sqlstate HY000)" + got := err.Error() + assert.Equal(t, want, got) + } + + // modify column. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "alter table t1 modify column c2 varchar(1)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // modify column error(drop the shardkey). + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "alter table t1 modify column id bigint" + _, err = client.FetchAll(query, -1) + want := "unsupported: cannot.modify.the.column.on.shard.key (errno 1105) (sqlstate HY000)" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestProxyDDLUnsupported(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("rename .*", &sqltypes.Result{}) + } + + // rename test table. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "rename table t1 to t2" + _, err = client.FetchAll(query, -1) + want := "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use, syntax error at position 7 near 'rename' (errno 1149) (sqlstate 42000)" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestProxyDDLCreateTable(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + querys := []string{ + "create table t1(a int, b int) partition by hash(a)", + "create table t2(a int, b int) PARTITION BY hash(a)", + "create table t3(a int, b int) PARTITION BY hash(a) ", + "create table t4(a int, b int)engine=tokudb PARTITION BY hash(a) ", + "create table t5(a int, b int) default charset=utf8 PARTITION BY hash(a) ", + "create table t6(a int, b int)engine=tokudb default charset=utf8 PARTITION BY hash(a) ", + } + + for _, query := range querys { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } +} + +func TestProxyDDLCreateTableError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + querys := []string{ + "create table t1(a int, b int)", + "create table t2(a int, partition int) PARTiITION BY hash(a)", + } + results := []string{ + "create table must end with 'PARTITION BY HASH(shard-key)' (errno 1105) (sqlstate HY000)", + "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use, syntax error at position 33 near 'partition' (errno 1149) (sqlstate 42000)", + } + + for i, query := range querys { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + _, err = client.FetchAll(query, -1) + want := results[i] + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestProxyMyLoaderImport(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("show create database .*", &sqltypes.Result{}) + fakedbs.AddQuery("/*show create database sbtest*/", &sqltypes.Result{}) + } + + querys := []string{ + "create table t1(a int, b int) partition by hash(a)", + "show create database sbtest", + "/*show create database sbtest*/", + "SET autocommit=0", + "SET SESSION wait_timeout = 2147483", + } + + for _, query := range querys { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } +} + +func TestProxyDDLConstraint(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + querys := []string{ + "CREATE TABLE t1(a int primary key,b int ) PARTITION BY HASH(a);", + "CREATE TABLE t2(a int unique,b int ) PARTITION BY HASH(a);", + "CREATE TABLE t2(a int ,b int primary key) PARTITION BY HASH(a);", + "CREATE TABLE t3(a int primary key,b int unique) PARTITION BY HASH(a);", + } + + results := []string{ + "", + "", + "The unique/primary constraint only be defined on the sharding key column[a] not [b] (errno 1105) (sqlstate HY000)", + "The unique/primary constraint only be defined on the sharding key column[a] not [b] (errno 1105) (sqlstate HY000)", + } + + for i, query := range querys { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + _, err = client.FetchAll(query, -1) + if err != nil { + want := results[i] + got := err.Error() + assert.Equal(t, want, got) + } + } +} + +func TestProxyDDLShardKeyCheck(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + querys := []string{ + "CREATE TABLE t1(a int primary key,b int ) PARTITION BY HASH(`a`);", + "CREATE TABLE t1(a int,b int ) PARTITION BY HASH(c);", + } + + results := []string{ + "", + "Sharding Key column 'c' doesn't exist in table (errno 1105) (sqlstate HY000)", + } + + for i, query := range querys { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + _, err = client.FetchAll(query, -1) + if err != nil { + want := results[i] + got := err.Error() + assert.Equal(t, want, got) + } + } +} + +func TestProxyDDLAlterCharset(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("show tables from .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("alter table .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "create table t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // alter test table charset. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "alter table t1 convert to character set utf8mb" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } +} diff --git a/src/proxy/delete.go b/src/proxy/delete.go new file mode 100644 index 00000000..6adaba3f --- /dev/null +++ b/src/proxy/delete.go @@ -0,0 +1,22 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "github.com/xelabs/go-mysqlstack/driver" + + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +// handleDelete used to handle the delete command. +func (spanner *Spanner) handleDelete(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + database := session.Schema() + return spanner.Execute(session, database, query, node) +} diff --git a/src/proxy/delete_test.go b/src/proxy/delete_test.go new file mode 100644 index 00000000..66e3651f --- /dev/null +++ b/src/proxy/delete_test.go @@ -0,0 +1,51 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "fakedb" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestProxyDelete(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("delete from .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // Delete. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "delete from test.t1 where id > 5" + fakedbs.AddQuery(query, fakedb.Result3) + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } +} diff --git a/src/proxy/disk.go b/src/proxy/disk.go new file mode 100644 index 00000000..c88663a0 --- /dev/null +++ b/src/proxy/disk.go @@ -0,0 +1,95 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "sync" + "time" + "xbase" + "xbase/sync2" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +// DiskCheck tuple. +type DiskCheck struct { + log *xlog.Log + dir string + done chan bool + ticker *time.Ticker + wg sync.WaitGroup + highwater sync2.AtomicBool +} + +// NewDiskCheck creates the DiskCheck tuple. +func NewDiskCheck(log *xlog.Log, dir string) *DiskCheck { + return &DiskCheck{ + log: log, + dir: dir, + done: make(chan bool), + ticker: time.NewTicker(time.Duration(time.Second * 5)), // 5 seconds. + } +} + +// HighWater returns the highwater mark. +// If true there is no spance left on device. +func (dc *DiskCheck) HighWater() bool { + return dc.highwater.Get() +} + +// Init used to init disk check goroutine. +func (dc *DiskCheck) Init() error { + log := dc.log + + dc.wg.Add(1) + go func(dc *DiskCheck) { + defer dc.wg.Done() + dc.check() + }(dc) + log.Info("disk.check.init.done") + return nil +} + +// Close used to close the disk check goroutine. +func (dc *DiskCheck) Close() { + close(dc.done) + dc.wg.Wait() +} + +func (dc *DiskCheck) check() { + defer dc.ticker.Stop() + for { + select { + case <-dc.ticker.C: + dc.doCheck() + case <-dc.done: + return + } + } +} + +func (dc *DiskCheck) doCheck() { + log := dc.log + ds, err := xbase.DiskUsage(dc.dir) + if err != nil { + log.Error("disk.check[%v].error:%v", dc.dir, err) + return + } + used := float64(ds.Used) / float64(ds.All) + switch { + case used >= 0.90: + log.Warning("disk.check.got.high.water:%+v, used.perc[%.2f].more.than.95percent!!!", ds, used) + dc.highwater.Set(true) + case used >= 0.80: + log.Warning("disk.check.got.water.mark:%+v, used.perc[%.2f].more.than.80percent", ds, used) + dc.highwater.Set(false) + default: + dc.highwater.Set(false) + } +} diff --git a/src/proxy/disk_test.go b/src/proxy/disk_test.go new file mode 100644 index 00000000..02c173f2 --- /dev/null +++ b/src/proxy/disk_test.go @@ -0,0 +1,29 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestDiskCheck(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + + dc := NewDiskCheck(log, "/tmp/") + err := dc.Init() + assert.Nil(t, err) + defer dc.Close() + + dc.doCheck() + high := dc.HighWater() + assert.False(t, high) +} diff --git a/src/proxy/dumper_test.go b/src/proxy/dumper_test.go new file mode 100644 index 00000000..e63f6877 --- /dev/null +++ b/src/proxy/dumper_test.go @@ -0,0 +1,161 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "os" + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mydumper/src/common" + "github.com/xelabs/go-mysqlstack/driver" + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestDumperWithProxy(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, server, cleanup := MockProxy(log) + defer cleanup() + address := server.Address() + + selectResult := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: make([][]sqltypes.Value, 0, 256)} + + for i := 0; i < 2017; i++ { + row := []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("11")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("1nice name")), + } + selectResult.Rows = append(selectResult.Rows, row) + } + + schemaResult := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "Table", + Type: querypb.Type_VARCHAR, + }, + { + Name: "Create Table", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("t1")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("CREATE TABLE `t1` (`a` int(11) DEFAULT NULL,`b` varchar(100) DEFAULT NULL) ENGINE=InnoDB")), + }, + }} + + tablesResult := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "Tables_in_test", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("t1")), + }, + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("t2")), + }, + }} + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("show create table .*", schemaResult) + fakedbs.AddQuery("show tables from test", tablesResult) + fakedbs.AddQueryPattern("select .*", selectResult) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b varchar(100)) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + + query = "create table test.t2(id int, b varchar(100)) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + args := &common.Args{ + Database: "test", + Outdir: "/tmp/dumperradontest", + User: "mock", + Password: "mock", + Address: address, + ChunksizeInMB: 1, + Threads: 16, + StmtSize: 10000, + IntervalMs: 1000, + } + + os.RemoveAll(args.Outdir) + x := os.MkdirAll(args.Outdir, 0777) + common.AssertNil(x) + common.Dumper(log, args) +} + +func TestLoaderWithProxy(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, server, cleanup := MockProxy(log) + defer cleanup() + address := server.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("insert .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + } + + args := &common.Args{ + Database: "test", + Outdir: "/tmp/dumperradontest", + User: "mock", + Password: "mock", + Address: address, + ChunksizeInMB: 1, + Threads: 16, + StmtSize: 10000, + IntervalMs: 1000, + } + + // Rewrite schema. + { + schema1 := "CREATE TABLE `t1` (`id` int(11) DEFAULT NULL,`b` varchar(100) DEFAULT NULL) ENGINE=InnoDB PARTITION BY HASH(id);" + common.WriteFile(path.Join(args.Outdir, "test.t1-schema.sql"), schema1) + + schema2 := "CREATE TABLE `t2` (`id` int(11) DEFAULT NULL,`b` varchar(100) DEFAULT NULL) ENGINE=InnoDB PARTITION BY HASH(id);" + common.WriteFile(path.Join(args.Outdir, "test.t2-schema.sql"), schema2) + } + + // Loader. + common.Loader(log, args) +} diff --git a/src/proxy/execute.go b/src/proxy/execute.go new file mode 100644 index 00000000..edc7d4a5 --- /dev/null +++ b/src/proxy/execute.go @@ -0,0 +1,224 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "errors" + "executor" + "optimizer" + "planner" + "xcontext" + + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +// ExecuteTwoPC allows multi-shards transactions with 2pc commit. +func (spanner *Spanner) ExecuteTwoPC(session *driver.Session, database string, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + log := spanner.log + conf := spanner.conf + router := spanner.router + scatter := spanner.scatter + sessions := spanner.sessions + + // transaction. + txn, err := scatter.CreateTransaction() + if err != nil { + log.Error("spanner.txn.create.error:[%v]", err) + return nil, err + } + defer txn.Finish() + + // txn limits. + txn.SetTimeout(conf.Proxy.QueryTimeout) + txn.SetMaxResult(conf.Proxy.MaxResultSize) + + // binding. + sessions.TxnBinding(session, txn, node, query) + defer sessions.TxnUnBinding(session) + + // Transaction begin. + if err := txn.Begin(); err != nil { + log.Error("spanner.execute.2pc.txn.begin.error:[%v]", err) + return nil, err + } + + // Transaction execute. + plans, err := optimizer.NewSimpleOptimizer(log, database, query, node, router).BuildPlanTree() + if err != nil { + return nil, err + } + + executors := executor.NewTree(log, plans, txn) + qr, err := executors.Execute() + if err != nil { + if x := txn.Rollback(); x != nil { + log.Error("spanner.execute.2pc.error.to.rollback.still.error:[%v]", x) + } + return nil, err + } + if err := txn.Commit(); err != nil { + log.Error("spanner.execute.2pc.txn.commit.error:[%v]", err) + return nil, err + } + return qr, nil +} + +// ExecuteNormal used to execute non-2pc querys to shards with QueryTimeout limits. +func (spanner *Spanner) ExecuteNormal(session *driver.Session, database string, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + timeout := spanner.conf.Proxy.QueryTimeout + return spanner.executeWithTimeout(session, database, query, node, timeout) +} + +// ExecuteDDL used to execute ddl querys to the shards with DDLTimeout limits, used for create/drop index long time operation. +func (spanner *Spanner) ExecuteDDL(session *driver.Session, database string, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + spanner.log.Warning("spanner.execute.ddl.query:%s", query) + timeout := spanner.conf.Proxy.DDLTimeout + return spanner.executeWithTimeout(session, database, query, node, timeout) +} + +// ExecuteNormal used to execute non-2pc querys to shards with timeout limits. +// timeout: +// 0x01. if timeout <= 0, no limits. +// 0x02. if timeout > 0, the query will be interrupted if the timeout(in millisecond) is exceeded. +func (spanner *Spanner) executeWithTimeout(session *driver.Session, database string, query string, node sqlparser.Statement, timeout int) (*sqltypes.Result, error) { + log := spanner.log + conf := spanner.conf + router := spanner.router + scatter := spanner.scatter + sessions := spanner.sessions + + // transaction. + txn, err := scatter.CreateTransaction() + if err != nil { + log.Error("spanner.txn.create.error:[%v]", err) + return nil, err + } + defer txn.Finish() + + // txn limits. + txn.SetTimeout(timeout) + txn.SetMaxResult(conf.Proxy.MaxResultSize) + + // binding. + sessions.TxnBinding(session, txn, node, query) + defer sessions.TxnUnBinding(session) + + plans, err := optimizer.NewSimpleOptimizer(log, database, query, node, router).BuildPlanTree() + if err != nil { + return nil, err + } + executors := executor.NewTree(log, plans, txn) + qr, err := executors.Execute() + if err != nil { + return nil, err + } + return qr, nil +} + +// ExecuteStreamFetch used to execute a stream fetch query. +func (spanner *Spanner) ExecuteStreamFetch(session *driver.Session, database string, query string, node sqlparser.Statement, callback func(qr *sqltypes.Result) error, streamBufferSize int) error { + log := spanner.log + router := spanner.router + scatter := spanner.scatter + sessions := spanner.sessions + + // transaction. + txn, err := scatter.CreateTransaction() + if err != nil { + log.Error("spanner.txn.create.error:[%v]", err) + return err + } + defer txn.Finish() + + // binding. + sessions.TxnBinding(session, txn, node, query) + defer sessions.TxnUnBinding(session) + + selectNode, ok := node.(*sqlparser.Select) + if !ok { + return errors.New("ExecuteStreamFetch.only.support.select") + } + + plan := planner.NewSelectPlan(log, database, query, selectNode, router) + if err := plan.Build(); err != nil { + return err + } + reqCtx := xcontext.NewRequestContext() + reqCtx.Mode = plan.ReqMode + reqCtx.Querys = plan.Querys + reqCtx.RawQuery = plan.RawQuery + return txn.ExecuteStreamFetch(reqCtx, callback, streamBufferSize) +} + +// Execute used to execute querys to shards. +func (spanner *Spanner) Execute(session *driver.Session, database string, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + // Execute. + if spanner.isTwoPC() { + if spanner.IsDML(node) { + return spanner.ExecuteTwoPC(session, database, query, node) + } + return spanner.ExecuteNormal(session, database, query, node) + } + return spanner.ExecuteNormal(session, database, query, node) +} + +// ExecuteSingle used to execute query on one shard without planner. +// The query must contain the database, such as db.table. +func (spanner *Spanner) ExecuteSingle(query string) (*sqltypes.Result, error) { + log := spanner.log + scatter := spanner.scatter + txn, err := scatter.CreateTransaction() + if err != nil { + log.Error("spanner.execute.single.txn.create.error:[%v]", err) + return nil, err + } + defer txn.Finish() + return txn.ExecuteSingle(query) +} + +// ExecuteScatter used to execute query on all shards without planner. +func (spanner *Spanner) ExecuteScatter(query string) (*sqltypes.Result, error) { + log := spanner.log + scatter := spanner.scatter + txn, err := scatter.CreateTransaction() + if err != nil { + log.Error("spanner.execute.scatter.txn.create.error:[%v]", err) + return nil, err + } + defer txn.Finish() + return txn.ExecuteScatter(query) +} + +// ExecuteOnThisBackend used to executye query on the backend whitout planner. +func (spanner *Spanner) ExecuteOnThisBackend(backend string, query string) (*sqltypes.Result, error) { + log := spanner.log + scatter := spanner.scatter + txn, err := scatter.CreateTransaction() + if err != nil { + log.Error("spanner.execute.on.this.backend..txn.create.error:[%v]", err) + return nil, err + } + defer txn.Finish() + return txn.ExecuteOnThisBackend(backend, query) +} + +// ExecuteOnBackup used to executye query on the backup. +func (spanner *Spanner) ExecuteOnBackup(database string, query string) (*sqltypes.Result, error) { + log := spanner.log + scatter := spanner.scatter + txn, err := scatter.CreateBackupTransaction() + if err != nil { + log.Error("spanner.execute.on.backup..txn.create.error:[%v]", err) + return nil, err + } + defer txn.Finish() + return txn.Execute(database, query) +} diff --git a/src/proxy/execute_test.go b/src/proxy/execute_test.go new file mode 100644 index 00000000..b92fcda9 --- /dev/null +++ b/src/proxy/execute_test.go @@ -0,0 +1,353 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "errors" + "fakedb" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqldb" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestProxyExecute(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("insert .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("select .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("xa .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // Insert. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "insert into test.t1 (id, b) values(1,2),(3,4)" + fakedbs.AddQuery(query, fakedb.Result3) + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // Insert with 2PC. + { + proxy.conf.Proxy.TwopcEnable = true + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "insert into test.t1 (id, b) values(1,2),(3,4)" + fakedbs.AddQuery(query, fakedb.Result3) + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // select with 2PC. + { + proxy.conf.Proxy.TwopcEnable = true + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "select * from test.t1" + fakedbs.AddQuery(query, fakedb.Result3) + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } +} + +func TestProxyExecute2PCError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("xa .*", &sqltypes.Result{}) + fakedbs.AddQueryError("insert into test.t1_0008(id, b) values (1, 2)", errors.New("xx")) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // Insert with 2PC but execute error. + { + proxy.conf.Proxy.TwopcEnable = true + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "insert into test.t1 (id, b) values(1,2),(3,4)" + fakedbs.AddQuery(query, fakedb.Result3) + _, err = client.FetchAll(query, -1) + assert.NotNil(t, err) + } +} + +func TestProxyExecute2PCCommitError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("insert .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("xa start .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("xa end .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("xa rollback .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("xa commit .*", &sqltypes.Result{}) + fakedbs.AddQueryErrorPattern("xa prepare.*", errors.New("mock.xa.prepare.error")) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // Insert with 2PC but prepare error in the commit phase. + { + proxy.conf.Proxy.TwopcEnable = true + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "insert into test.t1 (id, b) values(1,2),(3,4)" + _, err = client.FetchAll(query, -1) + want := "mock.xa.prepare.error (errno 1105) (sqlstate HY000)" + got := err.Error() + assert.Equal(t, want, got) + } + + // Insert with 2PC but rollback error in the commit phase. + { + fakedbs.ResetPatternErrors() + fakedbs.AddQueryErrorPattern("XA ROLLBACK .*", sqldb.NewSQLError1(1397, "XAE04", "XAER_NOTA: Unknown XID")) + fakedbs.AddQueryPattern("xa prepare .*", &sqltypes.Result{}) + + proxy.conf.Proxy.TwopcEnable = true + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "insert into test.t1 (id, b) values(1,2),(3,4)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } +} + +func TestProxyExecuteBackup(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxyWithBackup(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("insert .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("update .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("delete .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // Insert. + { + querys := []string{ + "insert into test.t1 (id, b) values(1,2),(3,4)", + "update test.t1 set b=1 where id=3", + "delete from test.t1 where 1=1", + } + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + for _, query := range querys { + fakedbs.AddQuery(query, fakedb.Result3) + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + } + + // select. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "select * from test.t1 join test.t2" + fakedbs.AddQuery(query, fakedb.Result3) + qr, err := client.FetchAll(query, -1) + assert.Nil(t, err) + assert.Equal(t, fakedb.Result3, qr) + } + + // select error. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "select * from test.t11 join test.t22" + fakedbs.AddQueryError(query, errors.New("backup.error")) + _, err = client.FetchAll(query, -1) + want := "backup.error (errno 1105) (sqlstate HY000)" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestProxyExecuteSelectError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // select. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "select * from test.t1 join test.t2" + fakedbs.AddQuery(query, fakedb.Result3) + _, err = client.FetchAll(query, -1) + + want := "unsupported: JOIN.expression (errno 1105) (sqlstate HY000)" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestProxyExecuteReadonly(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxyWithBackup(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("insert .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // Insert. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "insert into test.t1 (id, b) values(1,2),(3,4)" + fakedbs.AddQuery(query, fakedb.Result3) + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // Set readonly. + { + proxy.SetReadOnly(true) + } + + // select. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "select * from test.t1 join test.t2" + fakedbs.AddQuery(query, fakedb.Result3) + qr, err := client.FetchAll(query, -1) + assert.Nil(t, err) + assert.Equal(t, fakedb.Result3, qr) + } + + // Insert. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "insert into test.t1 (id, b) values(1,2),(3,4)" + fakedbs.AddQuery(query, fakedb.Result3) + _, err = client.FetchAll(query, -1) + want := "The MySQL server is running with the --read-only option so it cannot execute this statement (errno 1290) (sqlstate 42000)" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestProxyExecuteStreamFetch(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("select .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // select with stream. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "select * from test.t1" + fakedbs.AddQuery(query, fakedb.Result3) + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } +} diff --git a/src/proxy/explain.go b/src/proxy/explain.go new file mode 100644 index 00000000..4c9b3843 --- /dev/null +++ b/src/proxy/explain.go @@ -0,0 +1,84 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "fmt" + "optimizer" + "regexp" + + "github.com/pkg/errors" + "github.com/xelabs/go-mysqlstack/driver" + + "github.com/xelabs/go-mysqlstack/sqldb" + "github.com/xelabs/go-mysqlstack/sqlparser" + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +// handleExplain used to handle the EXPLAIN command. +func (spanner *Spanner) handleExplain(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + log := spanner.log + database := session.Schema() + router := spanner.router + qr := &sqltypes.Result{} + qr.Fields = []*querypb.Field{ + {Name: "EXPLAIN", Type: querypb.Type_VARCHAR}, + } + + pat := `(?i)explain` + reg := regexp.MustCompile(pat) + idx := reg.FindStringIndex(query) + if len(idx) != 2 { + return nil, errors.Errorf("explain.query[%s].syntax.error", query) + } + cutQuery := query[idx[1]:] + subNode, err := sqlparser.Parse(cutQuery) + if err != nil { + msg := fmt.Sprintf("query[%s].parser.error: %v", cutQuery, err) + row := []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(msg)), + } + qr.Rows = append(qr.Rows, row) + return qr, nil + } + + // Explain only supports DML. + // see https://dev.mysql.com/doc/refman/5.7/en/explain.html + switch subNode.(type) { + case *sqlparser.Select: + case *sqlparser.Delete: + case *sqlparser.Insert: + case *sqlparser.Update: + default: + return nil, sqldb.NewSQLError(sqldb.ER_SYNTAX_ERROR, "", "explain only supports SELECT/DELETE/INSERT/UPDATE") + } + + simOptimizer := optimizer.NewSimpleOptimizer(log, database, cutQuery, subNode, router) + planTree, err := simOptimizer.BuildPlanTree() + if err != nil { + log.Error("proxy.explain.error:%+v", err) + msg := fmt.Sprintf("unsupported: cannot.explain.the.query:%s", cutQuery) + row := []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(msg)), + } + qr.Rows = append(qr.Rows, row) + return qr, nil + } + + if len(planTree.Plans()) > 0 { + msg := planTree.Plans()[0].JSON() + row := []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(msg)), + } + qr.Rows = append(qr.Rows, row) + return qr, nil + } + return qr, nil +} diff --git a/src/proxy/explain_test.go b/src/proxy/explain_test.go new file mode 100644 index 00000000..0774edf9 --- /dev/null +++ b/src/proxy/explain_test.go @@ -0,0 +1,287 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestProxyExplain(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "create table t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + client.Quit() + } + + // explain. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "explain select 1, sum(a),avg(a),a,b from test.t1 as t1 where id>1 group by a,b order by a desc limit 10 offset 100" + qr, err := client.FetchAll(query, -1) + assert.Nil(t, err) + want := `{ + "RawQuery": " select 1, sum(a),avg(a),a,b from test.t1 as t1 where id\u003e1 group by a,b order by a desc limit 10 offset 100", + "Project": "1, sum(a), avg(a), sum(a), count(a), a, b", + "Partitions": [ + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0000 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend0", + "Range": "[0-128)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0001 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend0", + "Range": "[128-256)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0002 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend0", + "Range": "[256-384)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0003 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend0", + "Range": "[384-512)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0004 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend0", + "Range": "[512-640)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0005 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend0", + "Range": "[640-819)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0006 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend1", + "Range": "[819-947)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0007 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend1", + "Range": "[947-1075)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0008 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend1", + "Range": "[1075-1203)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0009 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend1", + "Range": "[1203-1331)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0010 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend1", + "Range": "[1331-1459)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0011 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend1", + "Range": "[1459-1638)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0012 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend2", + "Range": "[1638-1766)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0013 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend2", + "Range": "[1766-1894)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0014 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend2", + "Range": "[1894-2022)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0015 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend2", + "Range": "[2022-2150)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0016 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend2", + "Range": "[2150-2278)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0017 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend2", + "Range": "[2278-2457)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0018 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend3", + "Range": "[2457-2585)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0019 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend3", + "Range": "[2585-2713)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0020 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend3", + "Range": "[2713-2841)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0021 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend3", + "Range": "[2841-2969)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0022 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend3", + "Range": "[2969-3097)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0023 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend3", + "Range": "[3097-3276)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0024 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend4", + "Range": "[3276-3404)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0025 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend4", + "Range": "[3404-3532)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0026 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend4", + "Range": "[3532-3660)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0027 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend4", + "Range": "[3660-3788)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0028 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend4", + "Range": "[3788-3916)" + }, + { + "Query": "select 1, sum(a), avg(a), sum(a), count(a), a, b from test.t1_0029 as t1 where id \u003e 1 group by a, b order by a desc limit 110", + "Backend": "backend4", + "Range": "[3916-4096)" + } + ], + "Aggregate": [ + "sum(a)", + "avg(a)", + "sum(a)", + "count(a)" + ], + "GatherMerge": [ + "a" + ], + "HashGroupBy": [ + "a", + "b" + ], + "Limit": { + "Offset": 100, + "Limit": 10 + } +}` + got := string(qr.Rows[0][0].Raw()) + log.Info(got) + assert.Equal(t, want, got) + } +} + +func TestProxyExplainError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + // build plan error. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "explain select xx sdf" + qr, err := client.FetchAll(query, -1) + assert.Nil(t, err) + want := "unsupported: cannot.explain.the.query: select xx sdf" + got := string(qr.Rows[0][0].Raw()) + assert.Equal(t, want, got) + } + + // parse query error. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "explain xx sdf" + qr, err := client.FetchAll(query, -1) + assert.Nil(t, err) + want := "query[ xx sdf].parser.error: syntax error at position 4 near 'xx'" + got := string(qr.Rows[0][0].Raw()) + assert.Equal(t, want, got) + } +} + +func TestProxyExplainUnsupported(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + } + + // parse query error. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "explain create table t1(a int)" + _, err = client.FetchAll(query, -1) + want := "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use, explain only supports SELECT/DELETE/INSERT/UPDATE (errno 1149) (sqlstate 42000)" + got := err.Error() + assert.Equal(t, want, got) + } +} diff --git a/src/proxy/initdb.go b/src/proxy/initdb.go new file mode 100644 index 00000000..05bea5e4 --- /dev/null +++ b/src/proxy/initdb.go @@ -0,0 +1,32 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "fmt" + + "github.com/xelabs/go-mysqlstack/driver" +) + +// ComInitDB impl. +// Here, we will send a fake query 'SELECT 1' to the backend and check the 'USE DB'. +func (spanner *Spanner) ComInitDB(session *driver.Session, database string) error { + router := spanner.router + + // Check the database ACL. + if err := router.DatabaseACL(database); err != nil { + return err + } + query := fmt.Sprintf("use %s", database) + if _, err := spanner.ExecuteSingle(query); err != nil { + return err + } + session.SetSchema(database) + return nil +} diff --git a/src/proxy/insert.go b/src/proxy/insert.go new file mode 100644 index 00000000..8e5c5d8b --- /dev/null +++ b/src/proxy/insert.go @@ -0,0 +1,22 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "github.com/xelabs/go-mysqlstack/driver" + + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +// handleInsert used to handle the insert command. +func (spanner *Spanner) handleInsert(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + database := session.Schema() + return spanner.Execute(session, database, query, node) +} diff --git a/src/proxy/insert_test.go b/src/proxy/insert_test.go new file mode 100644 index 00000000..67331ebe --- /dev/null +++ b/src/proxy/insert_test.go @@ -0,0 +1,94 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "fakedb" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestProxyInsert(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("insert .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // Delete. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "insert into test.t1 (id, b) values(1,2),(3,4)" + fakedbs.AddQuery(query, fakedb.Result3) + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } +} + +func TestProxyInsertQuerys(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("insert .*", &sqltypes.Result{}) + } + + tables := []string{ + "create table test.t1(id int, b int) partition by hash(id)", + "create table test.t2(id datetime, b int) partition by hash(id)", + "create table test.t3(id varchar(200), b int) partition by hash(id)", + "create table test.t4(id decimal, b int) partition by hash(id)", + "create table test.t5(id float, b int) partition by hash(id)", + } + + querys := []string{ + "insert into test.t1(id, b) values(1, 1)", + "insert into test.t2(id, b) values(20111218131717, 1)", + "insert into test.t3(id, b) values('xx', 1)", + "insert into test.t4(id, b) values(1.11, 1)", + "insert into test.t5(id, b) values(0.3333, 1)", + } + + for _, table := range tables { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + _, err = client.FetchAll(table, -1) + assert.Nil(t, err) + } + + for _, query := range querys { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } +} diff --git a/src/proxy/iptable.go b/src/proxy/iptable.go new file mode 100644 index 00000000..124426f7 --- /dev/null +++ b/src/proxy/iptable.go @@ -0,0 +1,92 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "config" + "sync" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +// IP tuple. +type IP struct { + ip string +} + +// IPTable tuple. +type IPTable struct { + mu sync.RWMutex + log *xlog.Log + conf *config.ProxyConfig + iptable map[string]*IP +} + +// NewIPTable creates a new IPTable. +func NewIPTable(log *xlog.Log, conf *config.ProxyConfig) *IPTable { + ipt := &IPTable{ + log: log, + conf: conf, + iptable: make(map[string]*IP), + } + + if conf.IPS != nil { + for _, ip := range conf.IPS { + IP := &IP{ip: ip} + ipt.iptable[ip] = IP + } + } + return ipt +} + +// Add used to add a ip to table. +func (ipt *IPTable) Add(ip string) { + ipt.log.Warning("proxy.iptable.add:%s", ip) + ipt.mu.Lock() + defer ipt.mu.Unlock() + IP := &IP{ip: ip} + ipt.iptable[ip] = IP +} + +// Remove used to remove a ip from table. +func (ipt *IPTable) Remove(ip string) { + ipt.log.Warning("proxy.iptable.remove:%s", ip) + ipt.mu.Lock() + defer ipt.mu.Unlock() + delete(ipt.iptable, ip) +} + +// Refresh used to refresh the table. +func (ipt *IPTable) Refresh() { + ipt.log.Warning("proxy.iptable.refresh:%+v", ipt.conf.IPS) + ipt.mu.Lock() + defer ipt.mu.Unlock() + + ipt.iptable = make(map[string]*IP) + if ipt.conf.IPS != nil { + for _, ip := range ipt.conf.IPS { + IP := &IP{ip: ip} + ipt.iptable[ip] = IP + } + } +} + +// Check used to check a whether the ip is in ip table or not. +func (ipt *IPTable) Check(address string) bool { + ipt.mu.Lock() + defer ipt.mu.Unlock() + + if len(ipt.iptable) == 0 { + return true + } + if _, ok := ipt.iptable[address]; !ok { + return false + } + return true +} diff --git a/src/proxy/iptable_test.go b/src/proxy/iptable_test.go new file mode 100644 index 00000000..340cd674 --- /dev/null +++ b/src/proxy/iptable_test.go @@ -0,0 +1,66 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestProxyIptables(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + iptable := proxy.IPTable() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + } + + // OK. + { + _, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + } + + // Add. + { + iptable.Add("127.0.0.1") + } + + // OK. + { + _, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + } + + // Remove. + { + iptable.Remove("127.0.0.1") + iptable.Add("127.0.0.2") + } + + // Check. + { + got := iptable.Check("128.0.0.2") + assert.False(t, got) + } + + // Refresh. + { + proxy.SetAllowIP([]string{"x", "y"}) + iptable.Refresh() + } +} diff --git a/src/proxy/kill.go b/src/proxy/kill.go new file mode 100644 index 00000000..946f3d0b --- /dev/null +++ b/src/proxy/kill.go @@ -0,0 +1,27 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "github.com/xelabs/go-mysqlstack/driver" + + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +// handleKill used to handle the KILL command. +func (spanner *Spanner) handleKill(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + log := spanner.log + kill := node.(*sqlparser.Kill) + id := uint32(kill.QueryID.AsUint64()) + log.Warning("proxy.handleKill[%d].from.session[%v]", id, session.ID()) + sessions := spanner.sessions + sessions.Kill(id, "kill.query.from.client") + return &sqltypes.Result{}, nil +} diff --git a/src/proxy/kill_test.go b/src/proxy/kill_test.go new file mode 100644 index 00000000..eb451f63 --- /dev/null +++ b/src/proxy/kill_test.go @@ -0,0 +1,89 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestProxyKill(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + iptable := proxy.IPTable() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("select * .*", &sqltypes.Result{}) + fakedbs.AddQueryDelay("select * from test.t1_0002", &sqltypes.Result{}, 100000000) + fakedbs.AddQueryDelay("select * from test.t1_0004", &sqltypes.Result{}, 100000000) + } + + // IPTables. + { + iptable.Add("127.0.0.1") + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "create table t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + client.Quit() + } + + var wg sync.WaitGroup + var clients []driver.Conn + nums := 1 + // long query. + { + for i := 0; i < nums; i++ { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + wg.Add(1) + go func(c driver.Conn) { + defer wg.Done() + query := "select * from t1" + _, err = client.FetchAll(query, -1) + log.Debug("%+v", err) + }(client) + clients = append(clients, client) + } + } + + // kill. + { + time.Sleep(time.Second * 1) + for i := 0; i < nums; i++ { + kill, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + wg.Add(1) + go func(c driver.Conn, id uint32) { + defer wg.Done() + query := fmt.Sprintf("kill %d", id) + _, err = kill.FetchAll(query, -1) + assert.Nil(t, err) + }(kill, clients[i].ConnectionID()) + } + } + wg.Wait() +} diff --git a/src/proxy/mock.go b/src/proxy/mock.go new file mode 100644 index 00000000..7eb3c4e2 --- /dev/null +++ b/src/proxy/mock.go @@ -0,0 +1,145 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "config" + "fakedb" + "fmt" + "math/rand" + "os" + "path" + "time" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +func randomPort(min int, max int) int { + rand := rand.New(rand.NewSource(time.Now().UnixNano())) + d, delta := min, (max - min) + if delta > 0 { + d += rand.Intn(int(delta)) + } + return d +} + +// MockDefaultConfig mocks the default config. +func MockDefaultConfig() *config.Config { + conf := &config.Config{ + Proxy: config.DefaultProxyConfig(), + Audit: config.DefaultAuditConfig(), + Router: config.DefaultRouterConfig(), + Binlog: config.DefaultBinlogConfig(), + Log: config.DefaultLogConfig(), + } + return conf +} + +// MockConfigMax16 mocks the config with MaxConnections=16. +func MockConfigMax16() *config.Config { + conf := MockDefaultConfig() + conf.Proxy.IPS = []string{"127.0.0.2"} + conf.Proxy.MetaDir = "/tmp/test_radonmeta" + conf.Proxy.TwopcEnable = false + conf.Proxy.Endpoint = "127.0.0.1:3306" + conf.Proxy.MaxConnections = 16 + conf.Proxy.MaxResultSize = 1024 * 1024 * 1024 // 1GB + conf.Proxy.DDLTimeout = 10 * 3600 * 1000 // 10 hours + conf.Proxy.QueryTimeout = 5 * 60 * 1000 // 5 minutes + conf.Log = &config.LogConfig{ + Level: "ERROR", + } + return conf +} + +// MockProxy mocks a proxy. +func MockProxy(log *xlog.Log) (*fakedb.DB, *Proxy, func()) { + return MockProxy1(log, MockDefaultConfig()) +} + +// MockProxyWithBackup mocks the proxy with backup. +func MockProxyWithBackup(log *xlog.Log) (*fakedb.DB, *Proxy, func()) { + conf := MockDefaultConfig() + conf.Binlog.RelayWaitMs = 100 + conf.Binlog.EnableBinlog = true + conf.Binlog.EnableRelay = true + os.RemoveAll(conf.Binlog.LogDir) + return MockProxy2(log, conf) +} + +// MockProxy1 mocks the proxy with config. +func MockProxy1(log *xlog.Log, conf *config.Config) (*fakedb.DB, *Proxy, func()) { + // Fake backends. + fakedbs := fakedb.New(log, 5) + + port := randomPort(15000, 20000) + addr := fmt.Sprintf(":%d", port) + + conf.Proxy.Endpoint = addr + + fileFormat := "20060102150405.000" + t := time.Now().UTC() + timestamp := t.Format(fileFormat) + metaDir := "/tmp/test_radonmeta_" + timestamp + conf.Proxy.MetaDir = metaDir + + if x := os.MkdirAll(metaDir, 0777); x != nil { + log.Panic("%+v", x) + } + + backendsConf := &config.BackendsConfig{Backends: fakedbs.BackendConfs()} + if err := config.WriteConfig(path.Join(conf.Proxy.MetaDir, "backend.json"), backendsConf); err != nil { + log.Panic("mock.proxy.write.backends.config.error:%+v", err) + } + + // Proxy. + proxy := NewProxy(log, "/tmp/radon_mock.json", conf) + proxy.Start() + return fakedbs, proxy, func() { + proxy.Stop() + fakedbs.Close() + os.RemoveAll(metaDir) + } +} + +// MockProxy2 mocks the proxy with the conf. +func MockProxy2(log *xlog.Log, conf *config.Config) (*fakedb.DB, *Proxy, func()) { + // Fake backends. + fakedbs := fakedb.New(log, 5) + + port := randomPort(15000, 20000) + addr := fmt.Sprintf(":%d", port) + + conf.Proxy.Endpoint = addr + metaDir := "/tmp/test_radonmeta" + conf.Proxy.MetaDir = metaDir + + os.RemoveAll(metaDir) + if x := os.MkdirAll(metaDir, 0777); x != nil { + log.Panic("%+v", x) + } + + backends := fakedbs.BackendConfs() + backendLen := len(backends) + backendsConf := &config.BackendsConfig{ + Backends: backends[0 : backendLen-1], + Backup: backends[backendLen-1], + } + if err := config.WriteConfig(path.Join(conf.Proxy.MetaDir, "backend.json"), backendsConf); err != nil { + log.Panic("mock.proxy.write.backends.config.error:%+v", err) + } + + // Proxy. + proxy := NewProxy(log, "/tmp/radon_mock.json", conf) + proxy.Start() + return fakedbs, proxy, func() { + proxy.Stop() + fakedbs.Close() + } +} diff --git a/src/proxy/proxy.go b/src/proxy/proxy.go new file mode 100644 index 00000000..5c8f30f2 --- /dev/null +++ b/src/proxy/proxy.go @@ -0,0 +1,259 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "audit" + "backend" + "binlog" + "config" + "router" + "sync" + "syncer" + "xbase" + + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/xlog" +) + +// Proxy tuple. +type Proxy struct { + mu sync.RWMutex + log *xlog.Log + conf *config.Config + confPath string + audit *audit.Audit + router *router.Router + scatter *backend.Scatter + syncer *syncer.Syncer + binlog *binlog.Binlog + iptable *IPTable + spanner *Spanner + sessions *Sessions + listener *driver.Listener + throttle *xbase.Throttle +} + +// NewProxy creates new proxy. +func NewProxy(log *xlog.Log, path string, conf *config.Config) *Proxy { + audit := audit.NewAudit(log, conf.Audit) + router := router.NewRouter(log, conf.Proxy.MetaDir, conf.Router) + scatter := backend.NewScatter(log, conf.Proxy.MetaDir) + syncer := syncer.NewSyncer(log, conf.Proxy.MetaDir, conf.Proxy.PeerAddress, router, scatter) + binlog := binlog.NewBinlog(log, conf.Binlog) + return &Proxy{ + log: log, + conf: conf, + confPath: path, + audit: audit, + router: router, + scatter: scatter, + syncer: syncer, + binlog: binlog, + sessions: NewSessions(log), + iptable: NewIPTable(log, conf.Proxy), + throttle: xbase.NewThrottle(0), + } +} + +// Start used to start the proxy. +func (p *Proxy) Start() { + log := p.log + conf := p.conf + audit := p.audit + iptable := p.iptable + syncer := p.syncer + router := p.router + scatter := p.scatter + binlog := p.binlog + sessions := p.sessions + endpoint := conf.Proxy.Endpoint + throttle := p.throttle + + log.Info("proxy.config[%+v]...", conf.Proxy) + log.Info("log.config[%+v]...", conf.Log) + + if err := audit.Init(); err != nil { + log.Panic("proxy.audit.init.panic:%+v", err) + } + if err := syncer.Init(); err != nil { + log.Panic("proxy.syncer.init.panic:%+v", err) + } + if err := binlog.Init(); err != nil { + log.Panic("proxy.binlog.init.panic:%+v", err) + } + if err := router.LoadConfig(); err != nil { + log.Panic("proxy.router.load.panic:%+v", err) + } + if err := scatter.LoadConfig(); err != nil { + log.Panic("proxy.scatter.load.config.panic:%+v", err) + } + + spanner := NewSpanner(log, conf, iptable, router, scatter, binlog, sessions, audit, throttle) + if err := spanner.Init(); err != nil { + log.Panic("proxy.spanner.init.panic:%+v", err) + } + svr, err := driver.NewListener(log, endpoint, spanner) + if err != nil { + log.Panic("proxy.start.error[%+v]", err) + } + p.spanner = spanner + p.listener = svr + log.Info("proxy.start[%v]...", endpoint) + go svr.Accept() +} + +// Stop used to stop the proxy. +func (p *Proxy) Stop() { + log := p.log + + log.Info("proxy.starting.shutdown...") + p.sessions.Close() + p.spanner.Close() + p.listener.Close() + p.scatter.Close() + p.audit.Close() + p.syncer.Close() + p.binlog.Close() + log.Info("proxy.shutdown.complete...") +} + +// Config returns the config. +func (p *Proxy) Config() *config.Config { + return p.conf +} + +// Address returns the proxy endpoint. +func (p *Proxy) Address() string { + return p.conf.Proxy.Endpoint +} + +// IPTable returns the ip table. +func (p *Proxy) IPTable() *IPTable { + return p.iptable +} + +// Scatter returns the scatter. +func (p *Proxy) Scatter() *backend.Scatter { + return p.scatter +} + +// Router returns the router. +func (p *Proxy) Router() *router.Router { + return p.router +} + +// Syncer returns the syncer. +func (p *Proxy) Syncer() *syncer.Syncer { + return p.syncer +} + +// Sessions returns the sessions. +func (p *Proxy) Sessions() *Sessions { + return p.sessions +} + +// Spanner returns the spanner. +func (p *Proxy) Spanner() *Spanner { + return p.spanner +} + +// Binlog returns the binlog. +func (p *Proxy) Binlog() *binlog.Binlog { + return p.binlog +} + +// SetMaxConnections used to set the max connections. +func (p *Proxy) SetMaxConnections(connections int) { + p.mu.Lock() + defer p.mu.Unlock() + p.log.Info("proxy.SetMaxResultSize:[%d->%d]", p.conf.Proxy.MaxConnections, connections) + p.conf.Proxy.MaxConnections = connections +} + +// SetMaxResultSize used to set the max result size. +func (p *Proxy) SetMaxResultSize(size int) { + p.mu.Lock() + defer p.mu.Unlock() + p.log.Info("proxy.SetMaxResultSize:[%d->%d]", p.conf.Proxy.MaxResultSize, size) + p.conf.Proxy.MaxResultSize = size +} + +// SetDDLTimeout used to set the ddl timeout. +func (p *Proxy) SetDDLTimeout(timeout int) { + p.mu.Lock() + defer p.mu.Unlock() + p.log.Info("proxy.SetDDLTimeout:[%d->%d]", p.conf.Proxy.DDLTimeout, timeout) + p.conf.Proxy.DDLTimeout = timeout +} + +// SetQueryTimeout used to set query timeout. +func (p *Proxy) SetQueryTimeout(timeout int) { + p.mu.Lock() + defer p.mu.Unlock() + p.log.Info("proxy.SetQueryTimeout:[%d->%d]", p.conf.Proxy.QueryTimeout, timeout) + p.conf.Proxy.QueryTimeout = timeout +} + +// SetTwoPC used to set twopc to enable or disable. +func (p *Proxy) SetTwoPC(enable bool) { + p.mu.Lock() + defer p.mu.Unlock() + p.log.Info("proxy.SetTwoPC:[%v->%v]", p.conf.Proxy.TwopcEnable, enable) + p.conf.Proxy.TwopcEnable = enable +} + +// SetAllowIP used to set allow ips. +func (p *Proxy) SetAllowIP(ips []string) { + p.mu.Lock() + defer p.mu.Unlock() + p.log.Info("proxy.SetAllowIP:[%v->%v]", p.conf.Proxy.IPS, ips) + p.conf.Proxy.IPS = ips +} + +// SetAuditMode used to set the mode of audit. +func (p *Proxy) SetAuditMode(mode string) { + p.mu.Lock() + defer p.mu.Unlock() + p.log.Info("proxy.SetAuditMode:[%s->%s]", p.conf.Audit.Mode, mode) + p.conf.Audit.Mode = mode +} + +// SetReadOnly used to enable/disable readonly. +func (p *Proxy) SetReadOnly(val bool) { + p.mu.Lock() + defer p.mu.Unlock() + p.log.Info("proxy.SetReadOnly:[%v->%v]", p.spanner.ReadOnly(), val) + p.spanner.SetReadOnly(val) +} + +// PeerAddress returns the peer address. +func (p *Proxy) PeerAddress() string { + return p.conf.Proxy.PeerAddress +} + +// FlushConfig used to flush the config to disk. +func (p *Proxy) FlushConfig() error { + p.mu.Lock() + defer p.mu.Unlock() + p.log.Info("proxy.flush.config.to.file:%v, config:%+v", p.confPath, p.conf.Proxy) + if err := config.WriteConfig(p.confPath, p.conf); err != nil { + p.log.Error("proxy.flush.config.to.file[%v].error:%v", p.confPath, err) + return err + } + return nil +} + +// SetThrottle used to set the throttle. +func (p *Proxy) SetThrottle(val int) { + p.mu.Lock() + defer p.mu.Unlock() + p.log.Info("proxy.SetThrottle:[%v->%v]", p.throttle.Limits(), val) + p.throttle.Set(val) +} diff --git a/src/proxy/proxy_test.go b/src/proxy/proxy_test.go new file mode 100644 index 00000000..3d6fba0a --- /dev/null +++ b/src/proxy/proxy_test.go @@ -0,0 +1,111 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "testing" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestProxy1(t *testing.T) { + defer leaktest.Check(t)() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + _, proxy, cleanup := MockProxy(log) + defer cleanup() + + assert.NotNil(t, proxy.IPTable()) + assert.NotNil(t, proxy.Scatter()) + assert.NotNil(t, proxy.Router()) + assert.NotNil(t, proxy.Sessions()) + + // SetMaxConnections + { + proxy.SetMaxConnections(6666) + assert.Equal(t, 6666, proxy.conf.Proxy.MaxConnections) + } + + // SetMaxResultSize + { + proxy.SetMaxResultSize(6666) + assert.Equal(t, 6666, proxy.conf.Proxy.MaxResultSize) + } + + // SetDDLTimeout + { + proxy.SetDDLTimeout(6666) + assert.Equal(t, 6666, proxy.conf.Proxy.DDLTimeout) + } + + // SetQueryTimeout + { + proxy.SetQueryTimeout(6666) + assert.Equal(t, 6666, proxy.conf.Proxy.QueryTimeout) + } + + // SetTwoPC + { + proxy.SetTwoPC(true) + assert.Equal(t, true, proxy.conf.Proxy.TwopcEnable) + } + + // SetAllowIP + { + ips := []string{"x", "y"} + proxy.SetAllowIP(ips) + assert.Equal(t, ips, proxy.conf.Proxy.IPS) + } + + // SetAuditMode + { + proxy.SetAuditMode("A") + assert.Equal(t, "A", proxy.conf.Audit.Mode) + } + + // SetThrottle + { + proxy.SetThrottle(100) + assert.Equal(t, 100, proxy.throttle.Limits()) + } + + // SetReadOnly + { + assert.Equal(t, false, proxy.spanner.ReadOnly()) + proxy.SetReadOnly(true) + assert.Equal(t, true, proxy.spanner.ReadOnly()) + proxy.SetReadOnly(false) + assert.Equal(t, false, proxy.spanner.ReadOnly()) + } + + // FlushConfig. + { + err := proxy.FlushConfig() + assert.Nil(t, err) + } + + // Config + { + config := proxy.Config() + assert.NotNil(t, config) + } + + // Syncer + { + syncer := proxy.Syncer() + assert.NotNil(t, syncer) + } + + // PeerAddress + { + addr := proxy.PeerAddress() + assert.NotNil(t, addr) + } +} diff --git a/src/proxy/query.go b/src/proxy/query.go new file mode 100644 index 00000000..799c6101 --- /dev/null +++ b/src/proxy/query.go @@ -0,0 +1,276 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "strings" + "xbase" + + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/driver" + + "github.com/xelabs/go-mysqlstack/sqldb" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +func returnQuery(qr *sqltypes.Result, callback func(qr *sqltypes.Result) error, err error) error { + if err != nil { + return err + } + callback(qr) + return nil +} + +// ComQuery impl. +// Supports statements are: +// 1. DDL +// 2. DML +// 3. USE DB +func (spanner *Spanner) ComQuery(session *driver.Session, query string, callback func(qr *sqltypes.Result) error) error { + var qr *sqltypes.Result + log := spanner.log + throttle := spanner.throttle + diskChecker := spanner.diskChecker + hasBackup := spanner.scatter.HasBackup() + + // Throttle. + throttle.Acquire() + defer throttle.Release() + + // Disk usage check. + if diskChecker.HighWater() { + return sqldb.NewSQLError(sqldb.ER_UNKNOWN_ERROR, "%s", "no space left on device") + } + + // Support for JDBC driver. + if strings.HasPrefix(query, "/*") { + qr, err := spanner.handleJDBCShows(session, query, nil) + qr.Warnings = 1 + return returnQuery(qr, callback, err) + } + query = strings.TrimSpace(query) + query = strings.TrimSuffix(query, ";") + + node, err := sqlparser.Parse(query) + if err != nil { + log.Error("query[%v].parser.error: %v", query, err) + return sqldb.NewSQLError(sqldb.ER_SYNTAX_ERROR, "", err.Error()) + } + + // Readonly check. + if spanner.ReadOnly() { + // DML Write denied. + if spanner.IsDMLWrite(node) { + return sqldb.NewSQLError(sqldb.ER_OPTION_PREVENTS_STATEMENT, "", "--read-only") + } + // DDL denied. + if spanner.IsDDL(node) { + return sqldb.NewSQLError(sqldb.ER_OPTION_PREVENTS_STATEMENT, "", "--read-only") + } + } + + switch node.(type) { + case *sqlparser.Use: + if qr, err = spanner.handleUseDB(session, query, node); err != nil { + log.Error("proxy.usedb[%s].from.session[%v].error:%+v", query, session.ID(), err) + } + spanner.auditLog(session, R, xbase.USEDB, query, qr) + return returnQuery(qr, callback, err) + case *sqlparser.DDL: + if qr, err = spanner.handleDDL(session, query, node); err != nil { + log.Error("proxy.DDL[%s].from.session[%v].error:%+v", query, session.ID(), err) + } else { + // Binlog. + spanner.logEvent(session, xbase.DDL, query) + } + spanner.auditLog(session, W, xbase.DDL, query, qr) + return returnQuery(qr, callback, err) + case *sqlparser.Show: + show := node.(*sqlparser.Show) + switch show.Type { + case sqlparser.ShowDatabasesStr: + if qr, err = spanner.handleShowDatabases(session, query, node); err != nil { + log.Error("proxy.show.databases[%s].from.session[%v].error:%+v", query, session.ID(), err) + } + case sqlparser.ShowStatusStr: + if qr, err = spanner.handleShowStatus(session, query, node); err != nil { + log.Error("proxy.show.status[%s].from.session[%v].error:%+v", query, session.ID(), err) + } + case sqlparser.ShowVersionsStr: + if qr, err = spanner.handleShowVersions(session, query, node); err != nil { + log.Error("proxy.show.verions[%s].from.session[%v].error:%+v", query, session.ID(), err) + } + case sqlparser.ShowEnginesStr: + if qr, err = spanner.handleShowEngines(session, query, node); err != nil { + log.Error("proxy.show.engines[%s].from.session[%v].error:%+v", query, session.ID(), err) + } + case sqlparser.ShowTablesStr: + if qr, err = spanner.handleShowTables(session, query, node); err != nil { + log.Error("proxy.show.tables[%s].from.session[%v].error:%+v", query, session.ID(), err) + } + case sqlparser.ShowCreateTableStr: + if qr, err = spanner.handleShowCreateTable(session, query, node); err != nil { + log.Error("proxy.show.create.table[%s].from.session[%v].error:%+v", query, session.ID(), err) + } + case sqlparser.ShowProcesslistStr: + if qr, err = spanner.handleShowProcesslist(session, query, node); err != nil { + log.Error("proxy.show.processlist[%s].from.session[%v].error:%+v", query, session.ID(), err) + } + case sqlparser.ShowQueryzStr: + if qr, err = spanner.handleShowQueryz(session, query, node); err != nil { + log.Error("proxy.show.queryz[%s].from.session[%v].error:%+v", query, session.ID(), err) + } + case sqlparser.ShowTxnzStr: + if qr, err = spanner.handleShowTxnz(session, query, node); err != nil { + log.Error("proxy.show.txnz[%s].from.session[%v].error:%+v", query, session.ID(), err) + } + case sqlparser.ShowCreateDatabaseStr: + // Support for myloader. + if qr, err = spanner.handleShowCreateDatabase(session, query, node); err != nil { + log.Error("proxy.show.create.database[%s].from.session[%v].error:%+v", query, session.ID(), err) + } + case sqlparser.ShowWarningsStr, sqlparser.ShowVariablesStr: + // Support for JDBC. + if qr, err = spanner.handleJDBCShows(session, query, node); err != nil { + log.Error("proxy.JDBC.shows[%s].from.session[%v].error:%+v", query, session.ID(), err) + } + case sqlparser.ShowBinlogEventsStr: + if qr, err = spanner.handleShowBinlogEvents(session, query, node); err != nil { + log.Error("proxy.show.binlogevents[%s].error:%+v", query, err) + } + default: + log.Error("proxy.show.unsupported[%s].from.session[%v]", query, session.ID()) + err = sqldb.NewSQLError(sqldb.ER_UNKNOWN_ERROR, "unsupported.query:%v", query) + } + spanner.auditLog(session, R, xbase.SHOW, query, qr) + return returnQuery(qr, callback, err) + case *sqlparser.Insert: + if qr, err = spanner.handleInsert(session, query, node); err != nil { + log.Error("proxy.insert[%s].from.session[%v].error:%+v", xbase.TruncateQuery(query, 256), session.ID(), err) + } else { + // Binlog. + spanner.logEvent(session, xbase.INSERT, query) + } + inode := node.(*sqlparser.Insert) + switch inode.Action { + case sqlparser.InsertStr: + spanner.auditLog(session, W, xbase.INSERT, query, qr) + case sqlparser.ReplaceStr: + spanner.auditLog(session, W, xbase.REPLACE, query, qr) + } + return returnQuery(qr, callback, err) + case *sqlparser.Delete: + if qr, err = spanner.handleDelete(session, query, node); err != nil { + log.Error("proxy.delete[%s].from.session[%v].error:%+v", query, session.ID(), err) + } else { + // Binlog. + spanner.logEvent(session, xbase.DELETE, query) + } + spanner.auditLog(session, W, xbase.DELETE, query, qr) + return returnQuery(qr, callback, err) + case *sqlparser.Update: + if qr, err = spanner.handleUpdate(session, query, node); err != nil { + log.Error("proxy.update[%s].from.session[%v].error:%+v", xbase.TruncateQuery(query, 256), session.ID(), err) + } else { + // Binlog. + spanner.logEvent(session, xbase.UPDATE, query) + } + spanner.auditLog(session, W, xbase.UPDATE, query, qr) + return returnQuery(qr, callback, err) + case *sqlparser.Select: + typ := "" + backupType := "/*backup*/" + + snode := node.(*sqlparser.Select) + if len(snode.Comments) > 0 { + if common.BytesToString(snode.Comments[0]) == backupType { + typ = backupType + } + } + + switch typ { + case backupType: + log.Warning("proxy.select.for.backup:[%s].prepare", query) + if err = spanner.handleSelectStream(session, query, node, callback); err != nil { + log.Error("proxy.select.for.backup:[%s].error:%+v", xbase.TruncateQuery(query, 256), err) + return err + } + log.Warning("proxy.select.for.backup:[%s].done", query) + return nil + default: + if qr, err = spanner.handleSelect(session, query, node); err != nil { + log.Error("proxy.select[%s].from.session[%v].error:%+v", query, session.ID(), err) + // Send to AP node if we have. + if hasBackup { + if qr, err = spanner.handleBackupQuery(session, query, node); err != nil { + log.Error("proxy.backup.select[%s].error:%+v", xbase.TruncateQuery(query, 256), err) + } + } + } + spanner.auditLog(session, R, xbase.SELECT, query, qr) + return returnQuery(qr, callback, err) + } + case *sqlparser.Kill: + if qr, err = spanner.handleKill(session, query, node); err != nil { + log.Error("proxy.kill[%s].from.session[%v].error:%+v", query, session.ID(), err) + } + spanner.auditLog(session, R, xbase.KILL, query, qr) + return returnQuery(qr, callback, err) + case *sqlparser.Explain: + if qr, err = spanner.handleExplain(session, query, node); err != nil { + log.Error("proxy.explain[%s].from.session[%v].error:%+v", query, session.ID(), err) + } + spanner.auditLog(session, R, xbase.EXPLAIN, query, qr) + return returnQuery(qr, callback, err) + case *sqlparser.Transaction: + // Support for myloader. + log.Warning("proxy.query.transaction.query:%s", query) + spanner.auditLog(session, R, xbase.TRANSACTION, query, qr) + qr = &sqltypes.Result{Warnings: 1} + return returnQuery(qr, callback, nil) + case *sqlparser.Set: + // Support for JDBC/myloader. + log.Warning("proxy.query.set.query:%s", query) + qr = &sqltypes.Result{Warnings: 1} + spanner.auditLog(session, R, xbase.SET, query, qr) + return returnQuery(qr, callback, nil) + default: + log.Error("proxy.unsupported[%s].from.session[%v]", query, session.ID()) + spanner.auditLog(session, R, xbase.UNSUPPORT, query, qr) + return sqldb.NewSQLError(sqldb.ER_UNKNOWN_ERROR, "unsupported.query:%v", query) + } +} + +// IsDML returns the DML query or not. +func (spanner *Spanner) IsDML(node sqlparser.Statement) bool { + switch node.(type) { + case *sqlparser.Select, *sqlparser.Insert, *sqlparser.Delete, *sqlparser.Update: + return true + } + return false +} + +// IsDMLWrite returns the DML write or not. +func (spanner *Spanner) IsDMLWrite(node sqlparser.Statement) bool { + switch node.(type) { + case *sqlparser.Insert, *sqlparser.Delete, *sqlparser.Update: + return true + } + return false +} + +// IsDDL returns the DDL query or not. +func (spanner *Spanner) IsDDL(node sqlparser.Statement) bool { + switch node.(type) { + case *sqlparser.DDL: + return true + } + return false +} diff --git a/src/proxy/query_test.go b/src/proxy/query_test.go new file mode 100644 index 00000000..b22de822 --- /dev/null +++ b/src/proxy/query_test.go @@ -0,0 +1,230 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestProxyQueryTxn(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + querys := []string{ + "start transaction", + "commit", + "SET autocommit=0", + } + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + for _, query := range querys { + fakedbs.AddQueryPattern(query, &sqltypes.Result{}) + } + } + + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + defer client.Close() + + for _, query := range querys { + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + } +} + +func TestProxyQuerySet(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + querys := []string{ + "SET autocommit=0", + "SET SESSION wait_timeout = 2147483", + "SET NAMES utf8", + } + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + for _, query := range querys { + fakedbs.AddQueryPattern(query, &sqltypes.Result{}) + } + } + + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + defer client.Close() + + // Support. + for _, query := range querys { + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + } +} + +func TestProxyQueryComments(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + querys := []string{ + "/*!40014 SET FOREIGN_KEY_CHECKS=0*/", + "select a /*xx*/ from t1", + } + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("select .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + for _, query := range querys { + fakedbs.AddQuery(query, &sqltypes.Result{}) + } + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + defer client.Close() + + // Support. + for _, query := range querys { + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + } +} + +func TestProxyQueryStream(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + result11 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: make([][]sqltypes.Value, 0, 256)} + + for i := 0; i < 2017; i++ { + row := []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("11")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("1nice name")), + } + result11.Rows = append(result11.Rows, row) + } + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("select .*", result11) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // select. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "select /*backup*/ * from test.t1" + qr, err := client.FetchAll(query, -1) + assert.Nil(t, err) + want := 60510 + got := int(qr.RowsAffected) + assert.Equal(t, want, got) + } +} + +/* +func TestProxyQueryBench(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.ERROR)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("insert .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // insert. + { + var wg sync.WaitGroup + + l := 1000 + threads := 64 + now := time.Now() + for k := 0; k < threads; k++ { + wg.Add(1) + go func() { + defer wg.Done() + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + for i := 0; i < l; i++ { + query := "insert into test.t1(id, b) values(1,1)" + _, err := client.FetchAll(query, -1) + assert.Nil(t, err) + } + }() + + } + wg.Wait() + n := l * threads + took := time.Since(now) + fmt.Printf(" LOOP\t%v COST %v, avg:%v/s\n", n, took, (int64(n)/(took.Nanoseconds()/1e6))*1000) + } +} +*/ diff --git a/src/proxy/relay.go b/src/proxy/relay.go new file mode 100644 index 00000000..b7d34904 --- /dev/null +++ b/src/proxy/relay.go @@ -0,0 +1,407 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "binlog" + "config" + "fmt" + "strings" + "sync" + "time" + "xbase" + "xbase/stats" + "xbase/sync2" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + relayInfoFile = "relay-log.info" + parallelSame = 1 + parallelAll = 2 +) + +// BackupRelay tuple. +type BackupRelay struct { + log *xlog.Log + conf *config.BinlogConfig + spanner *Spanner + relayRates *stats.Rates + relayTimings *stats.Timings + relayInfo *binlog.Info + stateWg sync.WaitGroup + backupWorkerWg sync.WaitGroup + relayWorkerWg sync.WaitGroup + sqlworker *binlog.SQLWorker + eventQueue chan *binlog.Event + stop sync2.AtomicBool + stopRelay sync2.AtomicBool + relayBinlog sync2.AtomicString + relayGTID sync2.AtomicInt64 + initGTID sync2.AtomicInt64 // Init GTID. + resetGTID sync2.AtomicInt64 // Reset GTID. + state sync2.AtomicString + limits sync2.AtomicInt32 + paralles sync2.AtomicInt32 + counts sync2.AtomicInt64 + parallelType sync2.AtomicInt32 +} + +// NewBackupRelay creates new BackupRelay tuple. +func NewBackupRelay(log *xlog.Log, conf *config.BinlogConfig, spanner *Spanner) *BackupRelay { + return &BackupRelay{ + log: log, + conf: conf, + spanner: spanner, + } +} + +// Init used to init all the workers. +func (br *BackupRelay) Init() error { + log := br.log + conf := br.conf + br.parallelType.Set(int32(conf.ParallelType)) + br.limits.Set(int32(conf.RelayWorkers)) + br.relayTimings = stats.NewTimings("Relay") + br.relayRates = stats.NewRates("RelayRates", br.relayTimings, 1, time.Second) + + // relay info. + br.relayInfo = binlog.NewInfo(log, conf, relayInfoFile) + if err := br.relayInfo.Init(); err != nil { + return err + } + + // Init the SQLWorker. + br.initSQLWorker() + // Init the backup workers. + br.initBackupWorkers() + + if conf.EnableRelay { + // Start the backup relay worker. + br.relayWorkerWg.Add(1) + go func(br *BackupRelay) { + defer br.relayWorkerWg.Done() + br.relayToEventQueue() + }(br) + } + return nil +} + +func (br *BackupRelay) initSQLWorker() { + log := br.log + spanner := br.spanner + binlog := spanner.binlog + relayInfo := br.relayInfo + + ts, err := relayInfo.ReadTs() + if err != nil { + log.Panic("backup.relay.read.ts.error:%v", err) + } + br.initGTID.Set(ts) + br.resetGTID.Set(ts) + + // Get the ts from the relay.info file. + sqlworker, err := binlog.NewSQLWorker(ts) + if err != nil { + log.Panic("backup.relay.to.backup.new.sqlworker.error:%v", err) + } + br.sqlworker = sqlworker + log.Info("backup.relay[binlog:%v, pos:%v].sqlworker.init.from[%v]", sqlworker.RelayName(), sqlworker.RelayPosition(), ts) +} + +func (br *BackupRelay) closeSQLWorker() { + if br.sqlworker != nil { + binlog := br.spanner.binlog + defer binlog.CloseSQLWorker(br.sqlworker) + } +} + +func (br *BackupRelay) initBackupWorkers() { + log := br.log + workers := br.conf.RelayWorkers + br.eventQueue = make(chan *binlog.Event, workers*2) + br.backupWorkerWg.Add(1) + go func(br *BackupRelay) { + defer br.backupWorkerWg.Done() + br.backupWorker(0) + }(br) + log.Info("backup.relay.workers[nums:%d].start...", workers) +} + +func (br *BackupRelay) waitForBackupWorkerDone() { + log := br.log + + log.Info("backup.relay.wait.relay.workers.done...") + i := 0 + for len(br.eventQueue) > 0 { + log.Info("backup.relay.wait.for.relay.worker.done.live.events:[%d].wait.seconds:%d", len(br.eventQueue), i) + time.Sleep(time.Second) + i++ + } + close(br.eventQueue) + br.backupWorkerWg.Wait() + log.Info("backup.relay.workers.all.done...") +} + +// RelayBinlog returns the current relay binlog name. +func (br *BackupRelay) RelayBinlog() string { + return br.relayBinlog.Get() +} + +// RelayGTID returns the current relay GTID. +func (br *BackupRelay) RelayGTID() int64 { + return br.relayGTID.Get() +} + +// RelayRates returns the relay rates. +func (br *BackupRelay) RelayRates() string { + return br.relayRates.String() +} + +// RelayStatus returns the stop status. +func (br *BackupRelay) RelayStatus() bool { + return !br.stopRelay.Get() +} + +// RelayCounts returns the counts have relayed. +func (br *BackupRelay) RelayCounts() int64 { + return br.counts.Get() +} + +// MaxWorkers returns the max parallel worker numbers. +func (br *BackupRelay) MaxWorkers() int32 { + return br.limits.Get() +} + +// SetMaxWorkers used to set the limits number. +func (br *BackupRelay) SetMaxWorkers(n int32) { + if n > 0 { + br.limits.Set(n) + } +} + +// ParallelWorkers returns the number of the parallel workers. +func (br *BackupRelay) ParallelWorkers() int32 { + return br.paralles.Get() +} + +// SetParallelType used to set the parallel type. +func (br *BackupRelay) SetParallelType(n int32) { + br.parallelType.Set(n) +} + +// ParallelType returns the type of parallel. +func (br *BackupRelay) ParallelType() int32 { + return br.parallelType.Get() +} + +// StopRelayWorker used to stop the relay worker. +func (br *BackupRelay) StopRelayWorker() { + br.stopRelay.Set(true) +} + +// StartRelayWorker used to restart the relay worker. +func (br *BackupRelay) StartRelayWorker() { + br.stopRelay.Set(false) +} + +// RestartGTID returns the restart GTID of next relay. +func (br *BackupRelay) RestartGTID() int64 { + return br.resetGTID.Get() +} + +// ResetRelayWorker used to reset the relay gtid. +// Then the relay worker should relay from the new gtid point. +func (br *BackupRelay) ResetRelayWorker(gtid int64) { + br.log.Info("backup.relay.reset.relay.worker.from[%v].to[%v]", br.resetGTID.Get(), gtid) + br.resetGTID.Set(gtid) +} + +func (br *BackupRelay) checkRelayWorkerRestart() { + if br.initGTID.Get() < br.resetGTID.Get() { + log := br.log + spanner := br.spanner + binlog := spanner.binlog + + oldGTID := br.initGTID.Get() + newGTID := br.resetGTID.Get() + old := br.sqlworker + new, err := binlog.NewSQLWorker(newGTID) + if err != nil { + log.Panic("backup.relay.restart.new.sqlworker.error:%v", err) + } + + log.Info("backup.relay.restart.sqlworker.old[bin:%v, pos:%v].new[bin:%v, pos:%v].GTID.from[%v].to[%v]", old.RelayName(), old.RelayPosition(), new.RelayName(), new.RelayPosition(), oldGTID, newGTID) + binlog.CloseSQLWorker(old) + br.sqlworker = new + br.initGTID.Set(newGTID) + } +} + +func (br *BackupRelay) relayToEventQueue() { + log := br.log + relayInfo := br.relayInfo + spanner := br.spanner + scatter := spanner.scatter + + for !br.stop.Get() { + // Check relay stop. + if br.stopRelay.Get() { + log.Warning("backup.relay.is.stopped,please.check...") + time.Sleep(time.Millisecond * time.Duration(br.conf.RelayWaitMs)) + continue + } + + // Check backup ready. + if !scatter.HasBackup() { + log.Warning("backup.relay.but.we.don't.have.backup...") + time.Sleep(time.Millisecond * time.Duration(br.conf.RelayWaitMs)) + continue + } + + // Check to see the sqlworker need restarting after the GTID reset. + br.checkRelayWorkerRestart() + + sqlworker := br.sqlworker + event, err := sqlworker.NextEvent() + if err != nil { + log.Error("backup.relay.read.next.event[binlog:%v, pos:%v].error.degrade.to.readonly.err:%v", sqlworker.RelayName(), sqlworker.RelayPosition(), err) + spanner.SetReadOnly(true) + br.StopRelayWorker() + time.Sleep(time.Second) + continue + } + + // We have dry run all the events. + if event == nil { + time.Sleep(time.Millisecond * 500) + continue + } + + // Sync the relay info to file. + if err := relayInfo.Sync(event.LogName, int64(event.Timestamp)); err != nil { + log.Panic("backup.sync.relay.info[%+v].error:%v", event, err) + } + + // Write to queue. + br.eventQueue <- event + br.relayGTID.Set(int64(event.Timestamp)) + br.relayBinlog.Set(event.LogName) + } + log.Warning("backup.relay[binlog:%v, pos:%v].normal.exit", br.sqlworker.RelayName(), br.sqlworker.RelayPosition()) +} + +func (br *BackupRelay) backupExecuteDDL(event *binlog.Event) { + log := br.log + spanner := br.spanner + + br.counts.Add(1) + if _, err := spanner.handleBackupDDL(event.Schema, event.Query); err != nil { + log.Error("backup.relay.worker.execute.the.event[%+v].error.degrade.to.readonly.err:%v", event, err) + spanner.SetReadOnly(true) + br.StopRelayWorker() + } +} + +func (br *BackupRelay) backupExecuteDML(event *binlog.Event) { + log := br.log + spanner := br.spanner + + br.counts.Add(1) + t0 := time.Now() + if _, err := spanner.handleBackupWrite(event.Schema, event.Query); err != nil { + log.Error("backup.relay.worker.execute.the.event[%+v].error.degrade.to.readonly.err:%v", event, err) + spanner.SetReadOnly(true) + br.StopRelayWorker() + } + br.relayTimings.Add(fmt.Sprintf("relay.%s.rates", strings.ToLower(event.Type)), time.Since(t0)) +} + +func (br *BackupRelay) backupWorker(n int) { + log := br.log + + ddlWorker := func(br *BackupRelay, event *binlog.Event) { + defer br.stateWg.Done() + defer br.paralles.Add(-1) + br.backupExecuteDDL(event) + br.state.Set(event.Type) + } + + dmlWorker := func(br *BackupRelay, event *binlog.Event) { + defer br.stateWg.Done() + defer br.paralles.Add(-1) + br.backupExecuteDML(event) + br.state.Set(event.Type) + } + + for event := range br.eventQueue { + switch event.Type { + case xbase.DDL: + br.stateWg.Wait() + br.paralles.Add(1) + br.stateWg.Add(1) + ddlWorker(br, event) + case xbase.INSERT, xbase.DELETE, xbase.UPDATE, xbase.REPLACE: + switch br.parallelType.Get() { + case int32(parallelSame): + if br.state.Get() != event.Type { + // Wait the prev State done. + br.stateWg.Wait() + } + + for { + // Check the parallel worker number. + if br.paralles.Get() < br.limits.Get() { + br.paralles.Add(1) + br.stateWg.Add(1) + go dmlWorker(br, event) + break + } + time.Sleep(50 * time.Nanosecond) + } + case int32(parallelAll): + for { + // Check the parallel worker number. + if br.paralles.Get() < br.limits.Get() { + br.paralles.Add(1) + br.stateWg.Add(1) + go dmlWorker(br, event) + break + } + time.Sleep(50 * time.Nanosecond) + } + default: + br.stateWg.Wait() + br.paralles.Add(1) + br.stateWg.Add(1) + dmlWorker(br, event) + } + default: + log.Error("backup.worker.relay.unsupport.event[%+v]", event) + br.spanner.SetReadOnly(true) + br.StopRelayWorker() + } + } + br.stateWg.Wait() +} + +// Close used to close all the backgroud workers. +func (br *BackupRelay) Close() { + // Wait for relay worker done. + br.stopRelay.Set(true) + br.stop.Set(true) + br.relayWorkerWg.Wait() + br.closeSQLWorker() + br.relayInfo.Close() + + // Wait for backup workers done. + br.waitForBackupWorkerDone() + br.relayRates.Close() +} diff --git a/src/proxy/relay_test.go b/src/proxy/relay_test.go new file mode 100644 index 00000000..9b6bb989 --- /dev/null +++ b/src/proxy/relay_test.go @@ -0,0 +1,425 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "errors" + "sync" + "testing" + "time" + "xbase" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestRelay1(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxyWithBackup(log) + defer cleanup() + address := proxy.Address() + backupRelay := proxy.spanner.BackupRelay() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table `test`.`t1_0.*", &sqltypes.Result{}) + fakedbs.AddQuery("create table test.t1 (\n\t`id` int,\n\t`b` int\n) engine=tokudb", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + backupRelay.StopRelayWorker() + backupRelay.StartRelayWorker() + backupRelay.RelayRates() + backupRelay.RelayStatus() + backupRelay.RelayGTID() + backupRelay.RelayCounts() + backupRelay.RelayBinlog() + backupRelay.RestartGTID() +} + +func TestRelayDDLEngine(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + conf := MockDefaultConfig() + conf.Proxy.BackupDefaultEngine = "innodb" + fakedbs, proxy, cleanup := MockProxy2(log, conf) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table `test`.`t1_0.*", &sqltypes.Result{}) + fakedbs.AddQuery("create table test.t1 (\n\t`id` int,\n\t`b` int\n) engine=innodb", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + time.Sleep(time.Second) +} + +func TestRelayDDLAlter(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxyWithBackup(log) + address := proxy.Address() + defer cleanup() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table `test`.`t1_0.*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("alter table `test`.`t1_0.*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("alter table test.t1 engine=tokudb", &sqltypes.Result{}) + fakedbs.AddQuery("create table test.t1 (\n\t`id` int,\n\t`b` int\n) engine=tokudb", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // alter test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "alter table test.t1 engine=tokudb" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + time.Sleep(time.Second) +} + +func TestRelayDML1(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.ERROR)) + fakedbs, proxy, cleanup := MockProxyWithBackup(log) + defer cleanup() + address := proxy.Address() + backupRelay := proxy.Spanner().BackupRelay() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table `test`.`t.*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create table test.t.*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("insert into.*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("replace into.*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("delete from.*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("update.*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // Set the max worker nums. + backupRelay.SetMaxWorkers(3) + n := 456 + dmls := []string{ + "insert into test.t1 (id, b) values(1,1)", + "insert into test.t1 (id, b) values(1,1)", + "insert into test.t1 (id, b) values(1,1)", + "insert into test.t1 (id, b) values(1,1)", + "insert into test.t1 (id, b) values(1,1)", + "insert into test.t1 (id, b) values(1,1)", + "insert into test.t1 (id, b) values(1,1)", + "delete from test.t1 where id=1", + "delete from test.t1 where id=1", + "insert into test.t1 (id, b) values(1,1)", + "delete from test.t1 where id=1", + "delete from test.t1 where id=1", + "update test.t1 set b=1 where id=1", + "insert into test.t1 (id, b) values(1,1)", + "update test.t1 set b=1 where id=1", + "update test.t1 set b=1 where id=1", + "update test.t1 set b=1 where id=1", + } + + ddls := []string{ + "create table test.t2(id int, b int) partition by hash(id)", + "create table test.t3(id int, b int) partition by hash(id)", + "create table test.t4(id int, b int) partition by hash(id)", + "create table test.t5(id int, b int) partition by hash(id)", + "create table test.t6(id int, b int) partition by hash(id)", + } + want := (len(dmls)*n + (len(ddls) + 1)) + + var wg sync.WaitGroup + + // dml routine. + { + wg.Add(1) + go func() { + defer wg.Done() + + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + for i := 0; i < n; i++ { + for _, dml := range dmls { + if _, err = client.FetchAll(dml, -1); err != nil { + log.Panic("---%v", err) + } + } + } + }() + } + + // set parallel type. + { + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < 50; i++ { + backupRelay.SetParallelType(int32(i % 5)) + time.Sleep(time.Millisecond * 33) + } + }() + } + + // ddl routine. + { + wg.Add(1) + go func() { + defer wg.Done() + + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + for _, ddl := range ddls { + _, err = client.FetchAll(ddl, -1) + assert.Nil(t, err) + time.Sleep(time.Millisecond * 300) + } + }() + } + + // check routine. + { + wg.Add(1) + go func() { + defer wg.Done() + for { + counts := int(backupRelay.RelayCounts()) + if counts == want { + break + } + time.Sleep(time.Millisecond * 100) + } + }() + } + + backupRelay.ParallelWorkers() + assert.Equal(t, 3, int(backupRelay.MaxWorkers())) + + wg.Wait() + // 49 %5 = 4 + assert.Equal(t, 4, int(backupRelay.ParallelType())) +} + +func TestRelayWaitForBackupWorkerDone(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxyWithBackup(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table `test`.`t1_0.*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("insert into test.t1_0.*", &sqltypes.Result{}) + fakedbs.AddQueryDelay("insert into test.t1 (id, b) values(1,1)", &sqltypes.Result{}, 1000) + fakedbs.AddQuery("create table test.t1 (\n\t`id` int,\n\t`b` int\n) engine=tokudb", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // dml. + { + querys := []string{ + "insert into test.t1 (id, b) values(1,1)", + } + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + for i := 0; i < 100; i++ { + for _, query := range querys { + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + } + } + time.Sleep(time.Second) +} + +func TestRelayDMLErrorAndRelayAgain(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxyWithBackup(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table `test`.`t1_0.*", &sqltypes.Result{}) + fakedbs.AddQuery("create table test.t1 (\n\t`id` int,\n\t`b` int\n) engine=tokudb", &sqltypes.Result{}) + fakedbs.AddQueryPattern("insert into test.t1_0.*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("delete from.*", &sqltypes.Result{}) + // Relay will be error. + fakedbs.AddQueryErrorPattern("insert into test.t1 \\(id.*", errors.New("mock.relay.insert.error")) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // dml. + { + querys := []string{ + "insert into test.t1 (id, b) values(1,1)", + "delete from test.t1 where id=1", + } + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + for _, query := range querys { + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + // Unsupport Event. + proxy.spanner.binlog.LogEvent(xbase.SELECT, "test", "unsupport") + } + time.Sleep(time.Second) + + // Restart replay again. + { + proxy.spanner.backupRelay.StartRelayWorker() + } + time.Sleep(time.Second) +} + +func TestRelayDDLError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxyWithBackup(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table `test`.`t1_0.*", &sqltypes.Result{}) + // Relay will be error. + fakedbs.AddQueryError("create table test.t1 (\n\t`id` int,\n\t`b` int\n) engine=tokudb", errors.New("mock.relay.create.table.error")) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + time.Sleep(time.Second) +} + +func TestRelayWithNoBackup(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + conf := MockDefaultConfig() + conf.Binlog.EnableBinlog = true + conf.Binlog.EnableRelay = true + fakedbs, proxy, cleanup := MockProxy1(log, conf) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table `test`.`t1_0.*", &sqltypes.Result{}) + fakedbs.AddQuery("create table test.t1 (\n\t`id` int,\n\t`b` int\n) engine=innodb", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + time.Sleep(time.Second) +} + +func TestRelayRestartSQLWorker(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxyWithBackup(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table `test`.`t1_0.*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("insert into test.t1_0.*", &sqltypes.Result{}) + fakedbs.AddQueryDelay("insert into test.t1 (id, b) values(1,1)", &sqltypes.Result{}, 1000) + fakedbs.AddQuery("create table test.t1 (\n\t`id` int,\n\t`b` int\n) engine=tokudb", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // dml. + { + querys := []string{ + "insert into test.t1 (id, b) values(1,1)", + } + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + for i := 0; i < 100; i++ { + for _, query := range querys { + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + } + } + time.Sleep(time.Second) + + proxy.spanner.backupRelay.StopRelayWorker() + proxy.spanner.backupRelay.ResetRelayWorker(time.Now().UTC().UnixNano()) + proxy.spanner.backupRelay.StartRelayWorker() + time.Sleep(time.Second) +} diff --git a/src/proxy/replace_test.go b/src/proxy/replace_test.go new file mode 100644 index 00000000..8c9c7515 --- /dev/null +++ b/src/proxy/replace_test.go @@ -0,0 +1,51 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "fakedb" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestProxyreplace(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("replace .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // Delete. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "replace into test.t1 (id, b) values(1,2),(3,4)" + fakedbs.AddQuery(query, fakedb.Result3) + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } +} diff --git a/src/proxy/select.go b/src/proxy/select.go new file mode 100644 index 00000000..2c964e15 --- /dev/null +++ b/src/proxy/select.go @@ -0,0 +1,28 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "github.com/xelabs/go-mysqlstack/driver" + + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +// handleSelect used to handle the select command. +func (spanner *Spanner) handleSelect(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + database := session.Schema() + return spanner.Execute(session, database, query, node) +} + +func (spanner *Spanner) handleSelectStream(session *driver.Session, query string, node sqlparser.Statement, callback func(qr *sqltypes.Result) error) error { + streamBufferSize := 1024 * 1024 * 16 // 64MB + database := session.Schema() + return spanner.ExecuteStreamFetch(session, database, query, node, callback, streamBufferSize) +} diff --git a/src/proxy/sessions.go b/src/proxy/sessions.go new file mode 100644 index 00000000..fa82c442 --- /dev/null +++ b/src/proxy/sessions.go @@ -0,0 +1,258 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "backend" + "sort" + "sync" + "time" + + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +type session struct { + mu sync.Mutex + node sqlparser.Statement + query string + session *driver.Session + timestamp int64 + transaction backend.Transaction +} + +// Sessions tuple. +type Sessions struct { + log *xlog.Log + mu sync.RWMutex + // Key is session ID. + sessions map[uint32]*session +} + +// NewSessions creates new session. +func NewSessions(log *xlog.Log) *Sessions { + return &Sessions{ + log: log, + sessions: make(map[uint32]*session), + } +} + +// Add used to add the session to map when session created. +func (ss *Sessions) Add(s *driver.Session) { + ss.mu.Lock() + defer ss.mu.Unlock() + ss.sessions[s.ID()] = &session{ + session: s, + timestamp: time.Now().Unix()} +} + +func (ss *Sessions) txnAbort(txn backend.Transaction, node sqlparser.Statement) { + log := ss.log + // If transaction is not nil, means we can abort it when the session exit. + // Here there is some races: + // 1. if txn has finished, abort do nothing. + // 2. if txn has aborted, finished do nothing. + // + // Txn abortable case: + // 1. select query. + // 2. DDL query. + // If the client closed, txn will be abort by backend. + if txn != nil && node != nil { + switch node.(type) { + case *sqlparser.Select, *sqlparser.DDL: + if err := txn.Abort(); err != nil { + log.Error("proxy.session.txn.abort.error:%+v", err) + return + } + } + } +} + +// Remove used to remove the session from the map when session exit. +func (ss *Sessions) Remove(s *driver.Session) { + ss.mu.Lock() + session, ok := ss.sessions[s.ID()] + if !ok { + ss.mu.Unlock() + return + } + session.mu.Lock() + txn := session.transaction + node := session.node + session.mu.Unlock() + delete(ss.sessions, s.ID()) + ss.mu.Unlock() + + // txn abort. + ss.txnAbort(txn, node) +} + +// Kill used to kill a live session. +// 1. remove from sessions list. +// 2. close the session from the server side. +// 3. abort the session's txn. +func (ss *Sessions) Kill(id uint32, reason string) { + log := ss.log + ss.mu.Lock() + session, ok := ss.sessions[id] + if !ok { + ss.mu.Unlock() + return + } + log.Warning("session.id[%v].killed.reason:%s", id, reason) + session.mu.Lock() + txn := session.transaction + node := session.node + sess := session.session + session.mu.Unlock() + + delete(ss.sessions, id) + ss.mu.Unlock() + + // 1.close the session connection from the server side. + sess.Close() + + // 2. abort the txn. + ss.txnAbort(txn, node) +} + +// Reaches used to check whether the sessions count reaches(>=) the quota. +func (ss *Sessions) Reaches(quota int) bool { + ss.mu.RLock() + defer ss.mu.RUnlock() + return (len(ss.sessions) >= quota) +} + +// TxnBinding used to bind txn to the session. +func (ss *Sessions) TxnBinding(s *driver.Session, txn backend.Transaction, node sqlparser.Statement, query string) { + ss.mu.RLock() + session, ok := ss.sessions[s.ID()] + if !ok { + ss.mu.RUnlock() + return + } + ss.mu.RUnlock() + + session.mu.Lock() + defer session.mu.Unlock() + q := query + if len(query) > 128 { + q = query[:128] + } + session.query = q + session.node = node + session.transaction = txn + session.timestamp = time.Now().Unix() +} + +// TxnUnBinding used to set transaction and node to nil. +func (ss *Sessions) TxnUnBinding(s *driver.Session) { + ss.mu.RLock() + session, ok := ss.sessions[s.ID()] + if !ok { + ss.mu.RUnlock() + return + } + ss.mu.RUnlock() + + session.mu.Lock() + defer session.mu.Unlock() + session.node = nil + session.query = "" + session.transaction = nil + session.timestamp = time.Now().Unix() +} + +// Close used to close all sessions. +func (ss *Sessions) Close() { + i := 0 + for { + ss.mu.Lock() + for k, v := range ss.sessions { + v.mu.Lock() + txn := v.transaction + sess := v.session + node := v.node + v.mu.Unlock() + if txn == nil { + delete(ss.sessions, k) + sess.Close() + } else { + // Try to abort READ-ONLY or DDL statement. + ss.txnAbort(txn, node) + } + } + c := len(ss.sessions) + ss.mu.Unlock() + + if c > 0 { + ss.log.Warning("session.wait.for.shutdown.live.txn:[%d].wait.seconds:%d", c, i) + time.Sleep(time.Second) + i++ + } else { + break + } + } +} + +// SessionInfo tuple. +type SessionInfo struct { + ID uint32 + User string + Host string + DB string + Command string + Time uint32 + State string + Info string + RowsSent uint64 + RowsExamined uint64 +} + +// Sort by id. +type sessionInfos []SessionInfo + +// Len impl. +func (q sessionInfos) Len() int { return len(q) } + +// Swap impl. +func (q sessionInfos) Swap(i, j int) { q[i], q[j] = q[j], q[i] } + +// Less impl. +func (q sessionInfos) Less(i, j int) bool { return q[i].ID < q[j].ID } + +// Snapshot returns all session info. +func (ss *Sessions) Snapshot() []SessionInfo { + var infos sessionInfos + + now := time.Now().Unix() + ss.mu.Lock() + for _, v := range ss.sessions { + v.mu.Lock() + defer v.mu.Unlock() + info := SessionInfo{ + ID: v.session.ID(), + User: v.session.User(), + Host: v.session.Addr(), + DB: v.session.Schema(), + Command: "Sleep", + Time: uint32(now - v.timestamp), + } + + if v.node != nil { + info.Command = "Query" + info.Info = v.query + } + infos = append(infos, info) + } + ss.mu.Unlock() + sort.Sort(infos) + return infos +} diff --git a/src/proxy/sessions_test.go b/src/proxy/sessions_test.go new file mode 100644 index 00000000..4b58d0c4 --- /dev/null +++ b/src/proxy/sessions_test.go @@ -0,0 +1,59 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestProxySessionWaitForShutdown(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("select * .*", &sqltypes.Result{}) + fakedbs.AddQueryDelay("select * from test.t1_0002", &sqltypes.Result{}, 30000) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "create table t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + client.Quit() + } + + var wg sync.WaitGroup + { + wg.Add(1) + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + go func(c driver.Conn) { + defer wg.Done() + query := "select * from t1" + _, err = client.FetchAll(query, -1) + }(client) + } + time.Sleep(time.Second) + cleanup() + wg.Wait() +} diff --git a/src/proxy/show.go b/src/proxy/show.go new file mode 100644 index 00000000..ae8ab517 --- /dev/null +++ b/src/proxy/show.go @@ -0,0 +1,403 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "build" + "bytes" + "encoding/json" + "fmt" + "sort" + "strings" + "time" + + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqldb" + "github.com/xelabs/go-mysqlstack/sqlparser" + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +// handleShowDatabases used to handle the 'SHOW DATABASES' command. +func (spanner *Spanner) handleShowDatabases(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + return spanner.ExecuteSingle(query) +} + +// handleShowEngines used to handle the 'SHOW ENGINES' command. +func (spanner *Spanner) handleShowEngines(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + return spanner.ExecuteSingle(query) +} + +// handleShowCreateDatabase used to handle the 'SHOW CREATE DATABASE' command. +func (spanner *Spanner) handleShowCreateDatabase(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + return spanner.ExecuteSingle(query) +} + +// handleShowTables used to handle the 'SHOW TABLES' command. +func (spanner *Spanner) handleShowTables(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + router := spanner.router + ast := node.(*sqlparser.Show) + + database := session.Schema() + if !ast.Database.IsEmpty() { + database = ast.Database.Name.String() + } + if database == "" { + return nil, sqldb.NewSQLError(sqldb.ER_NO_DB_ERROR, "") + } + // Check the database ACL. + if err := router.DatabaseACL(database); err != nil { + return nil, err + } + + // For validating the query works, we send it to the backend and check the error. + rewritten := fmt.Sprintf("SHOW TABLES FROM %s", database) + _, err := spanner.ExecuteScatter(rewritten) + if err != nil { + return nil, err + } + + qr := &sqltypes.Result{} + tblList := router.Tables() + tables, ok := tblList[database] + if ok { + qr.Fields = []*querypb.Field{ + {Name: fmt.Sprintf("Tables_in_%s", database), Type: querypb.Type_VARCHAR}, + } + for _, table := range tables { + row := []sqltypes.Value{sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(table))} + qr.Rows = append(qr.Rows, row) + } + } + return qr, nil +} + +func (spanner *Spanner) handleShowCreateTable(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + router := spanner.router + ast := node.(*sqlparser.Show) + + table := ast.Table.Name.String() + database := session.Schema() + if !ast.Table.Qualifier.IsEmpty() { + database = ast.Table.Qualifier.String() + } + if database == "" { + return nil, sqldb.NewSQLError(sqldb.ER_NO_DB_ERROR, "") + } + // Check the database ACL. + if err := router.DatabaseACL(database); err != nil { + return nil, err + } + + // Get one table from the router. + parts, err := router.Lookup(database, table, nil, nil) + if err != nil { + return nil, err + } + partTable := parts[0].Table + backend := parts[0].Backend + rewritten := fmt.Sprintf("SHOW CREATE TABLE %s.%s", database, partTable) + qr, err := spanner.ExecuteOnThisBackend(backend, rewritten) + if err != nil { + return nil, err + } + + // 'show create table' has two columns. + c1 := qr.Rows[0][0] + c2 := qr.Rows[0][1] + // Replace the partition table to raw table. + c1Val := strings.Replace(string(c1.Raw()), partTable, table, 1) + c2Val := strings.Replace(string(c2.Raw()), partTable, table, 1) + qr.Rows[0][0] = sqltypes.MakeTrusted(c1.Type(), []byte(c1Val)) + qr.Rows[0][1] = sqltypes.MakeTrusted(c2.Type(), []byte(c2Val)) + return qr, nil +} + +// handleShowProcesslist used to handle the query "SHOW PROCESSLIST". +func (spanner *Spanner) handleShowProcesslist(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + sessions := spanner.sessions + qr := &sqltypes.Result{} + qr.Fields = []*querypb.Field{ + {Name: "Id", Type: querypb.Type_INT64}, + {Name: "User", Type: querypb.Type_VARCHAR}, + {Name: "Host", Type: querypb.Type_VARCHAR}, + {Name: "db", Type: querypb.Type_VARCHAR}, + {Name: "Command", Type: querypb.Type_VARCHAR}, + {Name: "Time", Type: querypb.Type_INT32}, + {Name: "State", Type: querypb.Type_VARCHAR}, + {Name: "Info", Type: querypb.Type_VARCHAR}, + {Name: "Rows_sent", Type: querypb.Type_INT64}, + {Name: "Rows_examined", Type: querypb.Type_INT64}, + } + sessionInfos := sessions.Snapshot() + for _, info := range sessionInfos { + row := []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_INT64, []byte(fmt.Sprintf("%v", info.ID))), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(info.User)), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(info.Host)), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(info.DB)), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(info.Command)), + sqltypes.MakeTrusted(querypb.Type_INT32, []byte(fmt.Sprintf("%v", info.Time))), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(info.State)), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(info.Info)), + sqltypes.MakeTrusted(querypb.Type_INT64, []byte(fmt.Sprintf("%v", 0))), + sqltypes.MakeTrusted(querypb.Type_INT64, []byte(fmt.Sprintf("%v", 0))), + } + qr.Rows = append(qr.Rows, row) + } + return qr, nil +} + +// handleShowStatus used to handle the query "SHOW STATUS". +func (spanner *Spanner) handleShowStatus(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + var varname string + log := spanner.log + scatter := spanner.scatter + + qr := &sqltypes.Result{} + qr.Fields = []*querypb.Field{ + {Name: "Variable_name", Type: querypb.Type_VARCHAR}, + {Name: "Value", Type: querypb.Type_VARCHAR}, + } + + // 1. radon_rate row. + varname = "radon_rate" + rate := scatter.QueryRates() + qr.Rows = append(qr.Rows, []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(varname)), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(rate.String())), + }) + + // 2. radon_config row. + var confJSON []byte + varname = "radon_config" + type confShow struct { + MaxConnections int `json:"max-connections"` + MaxResultSize int `json:"max-result-size"` + DDLTimeout int `json:"ddl-timeout"` + QueryTimeout int `json:"query-timeout"` + TwopcEnable bool `json:"twopc-enable"` + AllowIP []string `json:"allow-ip"` + AuditMode string `json:"audit-log-mode"` + ReadOnly bool `json:"readonly"` + Throttle int `json:"throttle"` + } + conf := confShow{ + MaxConnections: spanner.conf.Proxy.MaxConnections, + MaxResultSize: spanner.conf.Proxy.MaxResultSize, + DDLTimeout: spanner.conf.Proxy.DDLTimeout, + QueryTimeout: spanner.conf.Proxy.QueryTimeout, + TwopcEnable: spanner.conf.Proxy.TwopcEnable, + AllowIP: spanner.conf.Proxy.IPS, + AuditMode: spanner.conf.Audit.Mode, + ReadOnly: spanner.readonly.Get(), + Throttle: spanner.throttle.Limits(), + } + if b, err := json.Marshal(conf); err != nil { + confJSON = []byte(err.Error()) + } else { + confJSON = b + } + qr.Rows = append(qr.Rows, []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(varname)), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(confJSON)), + }) + + // 3. radon_counter row. + varname = "radon_transaction" + txnCounter := scatter.TxnCounters() + qr.Rows = append(qr.Rows, []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(varname)), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(txnCounter.String())), + }) + + // 4. relay info row. + bin := spanner.binlog + backupRelay := spanner.backupRelay + type relayStatus struct { + Status bool `json:"status"` + MaxWorkers int32 `json:"max-workers"` + ParallelWorkers int32 `json:"parallel-workers"` + SecondBehinds int64 `json:"second-behinds"` + RelayBinlog string `json:"relay-binlog"` + RelayGTID int64 `json:"relay-gtid"` + Rates string `json:"rates"` + } + relay := &relayStatus{ + Status: backupRelay.RelayStatus(), + MaxWorkers: backupRelay.MaxWorkers(), + ParallelWorkers: backupRelay.ParallelWorkers(), + SecondBehinds: (bin.LastGTID() - backupRelay.RelayGTID()) / int64(time.Second), + RelayBinlog: backupRelay.RelayBinlog(), + RelayGTID: backupRelay.RelayGTID(), + Rates: backupRelay.RelayRates(), + } + varname = "radon_relay" + var relayStatusJSON []byte + if b, err := json.Marshal(relay); err != nil { + relayStatusJSON = []byte(err.Error()) + } else { + relayStatusJSON = b + } + qr.Rows = append(qr.Rows, []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(varname)), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(relayStatusJSON)), + }) + + // 5. radon_backend_pool row. + var poolJSON []byte + varname = "radon_backendpool" + type poolShow struct { + Pools []string + } + be := poolShow{} + poolz := scatter.PoolClone() + for _, v := range poolz { + be.Pools = append(be.Pools, v.JSON()) + } + + // backup node. + if scatter.HasBackup() { + be.Pools = append(be.Pools, scatter.BackupPool().JSON()) + } + sort.Strings(be.Pools) + if b, err := json.MarshalIndent(be, "", "\t\t\t"); err != nil { + poolJSON = []byte(err.Error()) + } else { + poolJSON = b + } + qr.Rows = append(qr.Rows, []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(varname)), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(poolJSON)), + }) + + // 6. backends row. + var backendsJSON []byte + varname = "radon_backend" + type backendShow struct { + Backends []string + } + bs := backendShow{} + + backShowFunc := func(backend string, qr *sqltypes.Result) { + tables := "0" + datasize := "0MB" + + if len(qr.Rows) > 0 { + tables = string(qr.Rows[0][0].Raw()) + if string(qr.Rows[0][1].Raw()) != "" { + datasize = string(qr.Rows[0][1].Raw()) + "MB" + } + } + buff := bytes.NewBuffer(make([]byte, 0, 256)) + fmt.Fprintf(buff, `{"name": "%s","tables": "%s", "datasize":"%s"}`, backend, tables, datasize) + bs.Backends = append(bs.Backends, buff.String()) + } + + sql := "select count(0), round((sum(data_length) + sum(index_length)) / 1024/ 1024, 0) from information_schema.TABLES where table_schema not in ('sys', 'information_schema', 'mysql', 'performance_schema')" + backends := spanner.scatter.Backends() + for _, backend := range backends { + qr, err := spanner.ExecuteOnThisBackend(backend, sql) + if err != nil { + log.Error("proxy.show.execute.on.this.backend[%x].error:%+v", backend, err) + } else { + backShowFunc(backend, qr) + } + } + + // backup node + if scatter.HasBackup() { + qr, err := spanner.ExecuteOnBackup("information_schema", sql) + if err != nil { + log.Error("proxy.show.execute.on.backend.error:%+v", err) + } else { + backShowFunc(scatter.BackupBackend(), qr) + } + } + + sort.Strings(bs.Backends) + if b, err := json.MarshalIndent(bs, "", "\t\t\t"); err != nil { + backendsJSON = []byte(err.Error()) + } else { + backendsJSON = b + } + qr.Rows = append(qr.Rows, []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(varname)), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(backendsJSON)), + }) + + return qr, nil +} + +// handleShowQueryz used to handle the query "SHOW QUERYZ". +func (spanner *Spanner) handleShowQueryz(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + qr := &sqltypes.Result{} + qr.Fields = []*querypb.Field{ + {Name: "ConnID", Type: querypb.Type_INT64}, + {Name: "Host", Type: querypb.Type_VARCHAR}, + {Name: "Start", Type: querypb.Type_VARCHAR}, + {Name: "Duration", Type: querypb.Type_INT32}, + {Name: "Query", Type: querypb.Type_VARCHAR}, + } + rows := spanner.scatter.Queryz().GetQueryzRows() + for _, row := range rows { + row := []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_INT64, []byte(fmt.Sprintf("%v", uint64(row.ConnID)))), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(row.Address)), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(row.Start.Format("20060102150405.000"))), + sqltypes.MakeTrusted(querypb.Type_INT32, []byte(fmt.Sprintf("%v", row.Duration))), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(row.Query)), + } + qr.Rows = append(qr.Rows, row) + } + return qr, nil +} + +// handleShowTxnz used to handle the query "SHOW TXNZ". +func (spanner *Spanner) handleShowTxnz(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + qr := &sqltypes.Result{} + qr.Fields = []*querypb.Field{ + {Name: "TxnID", Type: querypb.Type_INT64}, + {Name: "Start", Type: querypb.Type_VARCHAR}, + {Name: "Duration", Type: querypb.Type_INT32}, + {Name: "XaState", Type: querypb.Type_VARCHAR}, + {Name: "TxnState", Type: querypb.Type_VARCHAR}, + } + + rows := spanner.scatter.Txnz().GetTxnzRows() + for _, row := range rows { + row := []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_INT64, []byte(fmt.Sprintf("%v", uint64(row.TxnID)))), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(row.Start.Format("20060102150405.000"))), + sqltypes.MakeTrusted(querypb.Type_INT32, []byte(fmt.Sprintf("%v", row.Duration))), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(row.XaState)), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(row.State)), + } + qr.Rows = append(qr.Rows, row) + } + return qr, nil +} + +func (spanner *Spanner) handleShowVersions(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + qr := &sqltypes.Result{} + qr.Fields = []*querypb.Field{ + {Name: "Versions", Type: querypb.Type_VARCHAR}, + } + + build := build.GetInfo() + row := []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte(fmt.Sprintf("radon:%+v", build))), + } + qr.Rows = append(qr.Rows, row) + return qr, nil +} + +func (spanner *Spanner) handleJDBCShows(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + return spanner.ExecuteSingle(query) +} diff --git a/src/proxy/show_test.go b/src/proxy/show_test.go new file mode 100644 index 00000000..604a0bc1 --- /dev/null +++ b/src/proxy/show_test.go @@ -0,0 +1,407 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "fmt" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestProxyShowDatabases(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("show databases", &sqltypes.Result{}) + } + + // show databases. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + defer client.Close() + query := "show databases" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } +} + +func TestProxyShowEngines(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("show engines", &sqltypes.Result{}) + } + + // show databases. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + defer client.Close() + query := "show engines" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } +} + +func TestProxyShowCreateDatabase(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("show create database xx", &sqltypes.Result{}) + } + + // show databases. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + defer client.Close() + query := "show create database xx" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } +} + +func TestProxyShowTables(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("show .*", &sqltypes.Result{}) + } + + // show tables. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + defer client.Close() + query := "show tables from test" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // show tables error with null database. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + defer client.Close() + query := "show tables" + _, err = client.FetchAll(query, -1) + assert.NotNil(t, err) + } +} + +func TestProxyShowCreateTable(t *testing.T) { + r1 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "table", + Type: querypb.Type_VARCHAR, + }, + { + Name: "create table", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("t1_0000")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("show create table t1_0000")), + }, + }, + } + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("show create .*", r1) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "create table t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + client.Quit() + } + + // show create table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + defer client.Close() + query := "show create table test.t1" + qr, err := client.FetchAll(query, -1) + assert.Nil(t, err) + want := "[t1 show create table t1]" + got := fmt.Sprintf("%+v", qr.Rows[0]) + assert.Equal(t, want, got) + } +} + +func TestProxyShowProcesslist(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, scleanup := MockProxy(log) + defer scleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("select * .*", &sqltypes.Result{}) + fakedbs.AddQueryDelay("select * from test.t1_0002", &sqltypes.Result{}, 3000) + fakedbs.AddQueryDelay("select * from test.t1_0004", &sqltypes.Result{}, 3000) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "create table t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + client.Quit() + } + + var wg sync.WaitGroup + var clients []driver.Conn + nums := 10 + // long query. + { + for i := 0; i < nums; i++ { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + wg.Add(1) + go func(c driver.Conn) { + defer wg.Done() + query := "select * from t1" + _, err = client.FetchAll(query, -1) + }(client) + clients = append(clients, client) + } + + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + clients = append(clients, client) + _ = clients + } + + // show processlist. + { + time.Sleep(time.Second) + show, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + _, err = show.FetchAll("show processlist", -1) + assert.Nil(t, err) + } + + // show queryz. + { + show, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + qr, err := show.FetchAll("show queryz", -1) + assert.Nil(t, err) + log.Info("%+v", qr.Rows) + } + + // show txnz. + { + show, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + qr, err := show.FetchAll("show txnz", -1) + assert.Nil(t, err) + log.Info("%+v", qr.Rows) + } + wg.Wait() +} + +func TestProxyShowStatus(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("select * .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "create table t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + client.Quit() + } + + // show status. + { + show, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + qr, err := show.FetchAll("show status", -1) + assert.Nil(t, err) + want := `{"max-connections":1024,"max-result-size":1073741824,"ddl-timeout":36000000,"query-timeout":300000,"twopc-enable":false,"allow-ip":null,"audit-log-mode":"N","readonly":false,"throttle":0}` + got := string(qr.Rows[1][1].Raw()) + assert.Equal(t, want, got) + } +} + +func TestProxyShowStatusWithBackup(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxyWithBackup(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("select * .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + query := "create table t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + client.Quit() + } + + // show status. + { + show, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + qr, err := show.FetchAll("show status", -1) + assert.Nil(t, err) + want := `{"max-connections":1024,"max-result-size":1073741824,"ddl-timeout":36000000,"query-timeout":300000,"twopc-enable":false,"allow-ip":null,"audit-log-mode":"N","readonly":false,"throttle":0}` + got := string(qr.Rows[1][1].Raw()) + assert.Equal(t, want, got) + } +} + +func TestProxyShowVersions(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + } + + // show versions. + { + show, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + qr, err := show.FetchAll("show versions", -1) + assert.Nil(t, err) + got := string(qr.Rows[0][0].Raw()) + assert.True(t, strings.Contains(got, "GoVersion")) + } +} + +func TestProxyShowWarnings(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + querys := []string{"show warnings", "show variables"} + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + for _, query := range querys { + fakedbs.AddQuery(query, &sqltypes.Result{}) + } + } + + // show versions. + { + for _, query := range querys { + show, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + qr, err := show.FetchAll(query, -1) + assert.Nil(t, err) + + want := &sqltypes.Result{} + assert.Equal(t, want, qr) + } + } +} + +func TestProxyShowUnsupports(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + } + querys := []string{ + "show test", + } + + // show test. + { + show, err := driver.NewConn("mock", "mock", address, "test", "utf8") + assert.Nil(t, err) + for _, query := range querys { + _, err = show.FetchAll(query, -1) + assert.NotNil(t, err) + want := fmt.Sprintf("unsupported.query:%s (errno 1105) (sqlstate HY000)", query) + got := err.Error() + assert.Equal(t, want, got) + } + } +} diff --git a/src/proxy/spanner.go b/src/proxy/spanner.go new file mode 100644 index 00000000..b845d624 --- /dev/null +++ b/src/proxy/spanner.go @@ -0,0 +1,110 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "audit" + "backend" + "binlog" + "config" + "router" + "xbase" + "xbase/sync2" + + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/xlog" +) + +// Spanner tuple. +type Spanner struct { + log *xlog.Log + audit *audit.Audit + conf *config.Config + router *router.Router + scatter *backend.Scatter + binlog *binlog.Binlog + sessions *Sessions + iptable *IPTable + throttle *xbase.Throttle + backupRelay *BackupRelay + diskChecker *DiskCheck + readonly sync2.AtomicBool +} + +// NewSpanner creates a new spanner. +func NewSpanner(log *xlog.Log, conf *config.Config, + iptable *IPTable, router *router.Router, scatter *backend.Scatter, binlog *binlog.Binlog, sessions *Sessions, audit *audit.Audit, throttle *xbase.Throttle) *Spanner { + return &Spanner{ + log: log, + conf: conf, + audit: audit, + iptable: iptable, + router: router, + scatter: scatter, + binlog: binlog, + sessions: sessions, + throttle: throttle, + } +} + +// Init used to init the async worker. +func (spanner *Spanner) Init() error { + log := spanner.log + conf := spanner.conf + + backupRelay := NewBackupRelay(log, conf.Binlog, spanner) + if err := backupRelay.Init(); err != nil { + return err + } + spanner.backupRelay = backupRelay + + diskChecker := NewDiskCheck(log, conf.Binlog.LogDir) + if err := diskChecker.Init(); err != nil { + return err + } + spanner.diskChecker = diskChecker + return nil +} + +// Close used to close spanner. +func (spanner *Spanner) Close() error { + spanner.backupRelay.Close() + spanner.diskChecker.Close() + spanner.log.Info("spanner.closed...") + return nil +} + +// ReadOnly returns the readonly or not. +func (spanner *Spanner) ReadOnly() bool { + return spanner.readonly.Get() +} + +// SetReadOnly used to set readonly. +func (spanner *Spanner) SetReadOnly(val bool) { + spanner.readonly.Set(val) +} + +// NewSession impl. +func (spanner *Spanner) NewSession(s *driver.Session) { + spanner.sessions.Add(s) +} + +// SessionClosed impl. +func (spanner *Spanner) SessionClosed(s *driver.Session) { + spanner.sessions.Remove(s) +} + +// BackupRelay returns BackupRelay tuple. +func (spanner *Spanner) BackupRelay() *BackupRelay { + return spanner.backupRelay +} + +func (spanner *Spanner) isTwoPC() bool { + return spanner.conf.Proxy.TwopcEnable +} diff --git a/src/proxy/update.go b/src/proxy/update.go new file mode 100644 index 00000000..b9640a94 --- /dev/null +++ b/src/proxy/update.go @@ -0,0 +1,22 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "github.com/xelabs/go-mysqlstack/driver" + + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +// handleUpdate used to handle the update command. +func (spanner *Spanner) handleUpdate(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + database := session.Schema() + return spanner.Execute(session, database, query, node) +} diff --git a/src/proxy/update_test.go b/src/proxy/update_test.go new file mode 100644 index 00000000..8d2a2d21 --- /dev/null +++ b/src/proxy/update_test.go @@ -0,0 +1,51 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "fakedb" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestProxyUpdate(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + address := proxy.Address() + + // fakedbs. + { + fakedbs.AddQueryPattern("create table .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("update .*", &sqltypes.Result{}) + } + + // create test table. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "create table test.t1(id int, b int) partition by hash(id)" + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } + + // Delete. + { + client, err := driver.NewConn("mock", "mock", address, "", "utf8") + assert.Nil(t, err) + query := "update test.t1 set b =1 where id > 5" + fakedbs.AddQuery(query, fakedb.Result3) + _, err = client.FetchAll(query, -1) + assert.Nil(t, err) + } +} diff --git a/src/proxy/usedb.go b/src/proxy/usedb.go new file mode 100644 index 00000000..f1640ab4 --- /dev/null +++ b/src/proxy/usedb.go @@ -0,0 +1,34 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "github.com/xelabs/go-mysqlstack/driver" + + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +// handleUseDB used to handle the UseDB command. +// Here, we will send a fake query 'SELECT 1' to the backend and check the 'USE DB'. +func (spanner *Spanner) handleUseDB(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) { + usedb := node.(*sqlparser.Use) + db := usedb.DBName.String() + router := spanner.router + // Check the database ACL. + if err := router.DatabaseACL(db); err != nil { + return nil, err + } + + if _, err := spanner.ExecuteSingle(query); err != nil { + return nil, err + } + session.SetSchema(db) + return &sqltypes.Result{}, nil +} diff --git a/src/proxy/usedb_test.go b/src/proxy/usedb_test.go new file mode 100644 index 00000000..92d0e4ec --- /dev/null +++ b/src/proxy/usedb_test.go @@ -0,0 +1,67 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package proxy + +import ( + "errors" + "fakedb" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestProxyUseDB(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + fakedbs, proxy, cleanup := MockProxy(log) + defer cleanup() + + // Client. + client, err := driver.NewConn("mock", "mock", proxy.Address(), "", "utf8") + assert.Nil(t, err) + + // Use test. + { + query := "use test" + fakedbs.AddQuery(query, fakedb.Result3) + _, err := client.FetchAll(query, -1) + assert.Nil(t, err) + + want := 1 + got := fakedbs.GetQueryCalledNum(query) + assert.Equal(t, want, got) + } + + // Use mysql. + { + query := "use mysql" + fakedbs.AddQuery(query, fakedb.Result3) + _, err := client.FetchAll(query, -1) + want := "Access denied; lacking privileges for database mysql (errno 1227) (sqlstate 42000)" + got := err.Error() + assert.Equal(t, want, got) + } + + // Use test error. + { + query := "use test" + fakedbs.AddQueryError(query, errors.New("mock use test error")) + _, err := client.FetchAll(query, -1) + want := "mock use test error (errno 1105) (sqlstate HY000)" + got := err.Error() + assert.Equal(t, want, got) + } + + // test db not exists. + { + _, err := driver.NewConn("mock", "mock", proxy.Address(), "xx", "utf8") + assert.NotNil(t, err) + } +} diff --git a/src/radon/radon.go b/src/radon/radon.go new file mode 100644 index 00000000..09092571 --- /dev/null +++ b/src/radon/radon.go @@ -0,0 +1,76 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package main + +import ( + "build" + "config" + "ctl" + "flag" + "fmt" + "os" + "os/signal" + "proxy" + "runtime" + "syscall" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + flag_conf string +) + +func init() { + flag.StringVar(&flag_conf, "c", "", "radon config file") + flag.StringVar(&flag_conf, "config", "", "radon config file") +} + +func usage() { + fmt.Println("Usage: " + os.Args[0] + " [-c|--config] ") +} + +func main() { + runtime.GOMAXPROCS(runtime.NumCPU()) + log := xlog.NewStdLog(xlog.Level(xlog.DEBUG)) + + build := build.GetInfo() + fmt.Printf("radon:[%+v]\n", build) + + // config + flag.Usage = func() { usage() } + flag.Parse() + if flag_conf == "" { + usage() + os.Exit(0) + } + + conf, err := config.LoadConfig(flag_conf) + if err != nil { + log.Panic("radon.load.config.error[%v]", err) + } + log.SetLevel(conf.Log.Level) + + // Proxy. + proxy := proxy.NewProxy(log, flag_conf, conf) + proxy.Start() + + // Admin portal. + admin := ctl.NewAdmin(log, proxy) + admin.Start() + + // Handle SIGINT and SIGTERM. + ch := make(chan os.Signal) + signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) + log.Info("radon.signal:%+v", <-ch) + + // Stop the proxy and httpserver. + proxy.Stop() + admin.Stop() +} diff --git a/src/router/acl.go b/src/router/acl.go new file mode 100644 index 00000000..d598e869 --- /dev/null +++ b/src/router/acl.go @@ -0,0 +1,40 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package router + +import ( + "strings" +) + +var ( + systemDatabases = []string{"SYS", "MYSQL", "INFORMATION_SCHEMA", "PERFORMANCE_SCHEMA"} +) + +// DatabaseACL tuple. +type DatabaseACL struct { + acls map[string]string +} + +// NewDatabaseACL creates new database acl. +func NewDatabaseACL() *DatabaseACL { + acls := make(map[string]string) + for _, db := range systemDatabases { + acls[db] = db + } + return &DatabaseACL{acls} +} + +// Allow used to check to see if the db is system database. +func (acl *DatabaseACL) Allow(db string) bool { + db = strings.ToUpper(db) + if _, ok := acl.acls[db]; !ok { + return true + } + return false +} diff --git a/src/router/api.go b/src/router/api.go new file mode 100644 index 00000000..3fc9274d --- /dev/null +++ b/src/router/api.go @@ -0,0 +1,144 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package router + +import ( + "config" + + "github.com/pkg/errors" +) + +// RDatabase tuple. +type RDatabase struct { + DB string + Tables []*Table +} + +// Rule tuple. +type Rule struct { + Schemas []RDatabase +} + +// Rules returns router's schemas. +func (r *Router) Rules() *Rule { + r.mu.RLock() + defer r.mu.RUnlock() + rule := &Rule{} + + for key, schema := range r.Schemas { + rdb := RDatabase{DB: key} + for _, v := range schema.Tables { + rdb.Tables = append(rdb.Tables, v) + } + rule.Schemas = append(rule.Schemas, rdb) + } + return rule +} + +// PartitionRuleShift used to shift a rule from backend to another. +// The processes as: +// 1. change the backend in memory. +// 2. flush the table config to disk. +// 3. reload the config to memory. +// Note: +// If the reload fails, panic it since the config is in chaos. +func (r *Router) PartitionRuleShift(fromBackend string, toBackend string, database string, partitionTable string) error { + log := r.log + + log.Warning("router.partition.rule.shift.from[%s].to[%s].database[%s].partitionTable[%s]", fromBackend, toBackend, database, partitionTable) + table, err := r.changeTheRuleBackend(fromBackend, toBackend, database, partitionTable) + if err != nil { + log.Error("router.partition.rule.shift.changeTheRuleBackend.error:%+v", err) + return err + } + log.Warning("router.partition.rule.shift.change.the.rule.done") + + log.Warning("router.partition.rule.shift.RefreshTable.prepare") + if err := r.RefreshTable(database, table); err != nil { + log.Panic("router.partition.rule.shift.RefreshTable.error:%+v", err) + return err + } + log.Warning("router.partition.rule.shift.RefreshTable.done") + return nil +} + +// +// 1. Find the table config and partition config. +// 2. Change the backend. +// 3. Write tableconfig to disk. +func (r *Router) changeTheRuleBackend(fromBackend string, toBackend string, database string, partitionTable string) (string, error) { + var table string + var tableConfig *config.TableConfig + var partitionConfig *config.PartitionConfig + + log := r.log + r.mu.RLock() + defer r.mu.RUnlock() + + if fromBackend == toBackend { + return "", errors.Errorf("router.rule.change.from[%s].cant.equal.to[%s]", fromBackend, toBackend) + } + + schema, ok := r.Schemas[database] + if !ok { + return "", errors.Errorf("router.rule.change.cant.found.database:%s", database) + } + + // 1. Find the table config. + found := false + for _, v := range schema.Tables { + if found { + break + } + for _, partition := range v.TableConfig.Partitions { + if (partition.Backend == fromBackend) && (partition.Table == partitionTable) { + log.Warning("router.rule[%s:%s].change.from[%s].to[%s].found:%+v", database, partitionTable, fromBackend, toBackend, partition) + + found = true + table = v.Name + tableConfig = v.TableConfig + partitionConfig = partition + break + } + } + } + if !found { + return "", errors.Errorf("router.rule.change.cant.found.backend[%s]+table:[%s]", fromBackend, partitionTable) + } + + // 2. Change the backend to to-backend. + partitionConfig.Backend = toBackend + + // 3. Flush table config to disk. + if err := r.writeFrmData(database, table, tableConfig); err != nil { + // Memory config reset. + partitionConfig.Backend = fromBackend + return "", err + } + + // 4. Update the version. + if err := config.UpdateVersion(r.metadir); err != nil { + log.Panicf("change.the.rule.table.update.version.error:%v", err) + return "", err + } + return table, nil +} + +// ReLoad used to re-load the config files from disk to cache. +func (r *Router) ReLoad() error { + log := r.log + + // Clear the cache. + log.Warning("router.reload.clear...") + r.clear() + + // ReLoad the meta from disk. + log.Warning("router.reload.load.meta.from.disk...") + return r.LoadConfig() +} diff --git a/src/router/api_test.go b/src/router/api_test.go new file mode 100644 index 00000000..11a3faaa --- /dev/null +++ b/src/router/api_test.go @@ -0,0 +1,195 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package router + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestApiRules(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + + // add router of sbtest.A + { + err := router.add("sbtest", MockTableAConfig()) + assert.Nil(t, err) + + tConf, err := router.TableConfig("sbtest", "A") + assert.Nil(t, err) + assert.NotNil(t, tConf) + } + rules := router.Rules() + want := "sbtest" + got := rules.Schemas[0].DB + assert.Equal(t, want, got) +} + +func TestApiPartitionRuleShift(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + + // add router of sbtest.A + { + err := router.add("sbtest", MockTableAConfig()) + assert.Nil(t, err) + + tConf, err := router.TableConfig("sbtest", "A") + assert.Nil(t, err) + assert.NotNil(t, tConf) + } + + // Shift backend from backend8 to backend88 for sbtest/A8. + { + from := "backend8" + to := "backend88" + database := "sbtest" + table := "A8" + err := router.PartitionRuleShift(from, to, database, table) + assert.Nil(t, err) + want := `{ + "Schemas": { + "sbtest": { + "DB": "sbtest", + "Tables": { + "A": { + "Name": "A", + "ShardKey": "id", + "Partition": { + "Segments": [ + { + "Table": "A0", + "Backend": "backend0", + "Range": { + "Start": 0, + "End": 2 + } + }, + { + "Table": "A2", + "Backend": "backend2", + "Range": { + "Start": 2, + "End": 4 + } + }, + { + "Table": "A4", + "Backend": "backend4", + "Range": { + "Start": 4, + "End": 8 + } + }, + { + "Table": "A8", + "Backend": "backend88", + "Range": { + "Start": 8, + "End": 4096 + } + } + ] + } + } + } + } + } +}` + got := router.JSON() + assert.Equal(t, want, got) + } + + // Drop. + { + err := router.DropDatabase("sbtest") + assert.Nil(t, err) + } +} + +func TestApiPartitionRuleShiftErrors(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + + // add router of sbtest.A + { + err := router.add("sbtest", MockTableAConfig()) + assert.Nil(t, err) + + tConf, err := router.TableConfig("sbtest", "A") + assert.Nil(t, err) + assert.NotNil(t, tConf) + } + + // from == to. + { + from := "backend8" + to := "backend8" + database := "sbtest" + table := "A8" + err := router.PartitionRuleShift(from, to, database, table) + want := "router.rule.change.from[backend8].cant.equal.to[backend8]" + got := err.Error() + assert.Equal(t, want, got) + } + + // database can't found. + { + from := "backend8" + to := "backend88" + database := "sbtestx" + table := "A8" + err := router.PartitionRuleShift(from, to, database, table) + want := "router.rule.change.cant.found.database:sbtestx" + got := err.Error() + assert.Equal(t, want, got) + } + + // table can't found. + { + from := "backend8" + to := "backend88" + database := "sbtest" + table := "A88" + err := router.PartitionRuleShift(from, to, database, table) + want := "router.rule.change.cant.found.backend[backend8]+table:[A88]" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestApiReLoad(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + + // add router of sbtest.A + { + + backends := []string{"backend1", "backend2", "backend3"} + err := router.CreateTable("sbtest", "t1", "id", backends) + assert.Nil(t, err) + } + + for i := 0; i < 13; i++ { + err := router.ReLoad() + assert.Nil(t, err) + } + + rules := router.Rules() + want := "sbtest" + got := rules.Schemas[0].DB + assert.Equal(t, want, got) +} diff --git a/src/router/compute.go b/src/router/compute.go new file mode 100644 index 00000000..21087454 --- /dev/null +++ b/src/router/compute.go @@ -0,0 +1,71 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package router + +import ( + "config" + "fmt" + "sort" + + "github.com/pkg/errors" +) + +// HashUniform used to uniform the hash slots to backends. +func (r *Router) HashUniform(table, shardkey string, backends []string) (*config.TableConfig, error) { + if table == "" { + return nil, errors.New("table.cant.be.null") + } + if shardkey == "" { + return nil, errors.New("shard.key.cant.be.null") + } + + slots := r.conf.Slots + blocks := r.conf.Blocks + nums := len(backends) + if nums == 0 { + return nil, errors.New("router.compute.backends.is.null") + } + if nums >= slots { + return nil, errors.Errorf("router.compute.backends[%d].too.many:[max:%d]", nums, slots) + } + + // sort backends. + sort.Strings(backends) + tableConf := &config.TableConfig{ + Name: table, + ShardType: "HASH", + ShardKey: shardkey, + Partitions: make([]*config.PartitionConfig, 0, 16), + } + + slotsPerShard := slots / nums + tablesPerShard := slotsPerShard / blocks + for s := 0; s < nums; s++ { + for i := 0; i < tablesPerShard; i++ { + step := s * slotsPerShard + min := i*blocks + step + max := (i+1)*blocks + step + if i == tablesPerShard-1 { + if s == nums-1 { + max = slots + } else { + max = step + slotsPerShard + } + } + name := s*tablesPerShard + i + partConf := &config.PartitionConfig{ + Table: fmt.Sprintf("%s_%04d", table, name), + Segment: fmt.Sprintf("%d-%d", min, max), + Backend: backends[s], + } + tableConf.Partitions = append(tableConf.Partitions, partConf) + } + } + return tableConf, nil +} diff --git a/src/router/compute_test.go b/src/router/compute_test.go new file mode 100644 index 00000000..4a848207 --- /dev/null +++ b/src/router/compute_test.go @@ -0,0 +1,241 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package router + +import ( + "config" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestRouterCompute(t *testing.T) { + datas := `{ + "name": "t1", + "shardtype": "HASH", + "shardkey": "id", + "partitions": [ + { + "table": "t1_0000", + "segment": "0-128", + "backend": "192.168.0.1" + }, + { + "table": "t1_0001", + "segment": "128-256", + "backend": "192.168.0.1" + }, + { + "table": "t1_0002", + "segment": "256-384", + "backend": "192.168.0.1" + }, + { + "table": "t1_0003", + "segment": "384-512", + "backend": "192.168.0.1" + }, + { + "table": "t1_0004", + "segment": "512-640", + "backend": "192.168.0.1" + }, + { + "table": "t1_0005", + "segment": "640-819", + "backend": "192.168.0.1" + }, + { + "table": "t1_0006", + "segment": "819-947", + "backend": "192.168.0.2" + }, + { + "table": "t1_0007", + "segment": "947-1075", + "backend": "192.168.0.2" + }, + { + "table": "t1_0008", + "segment": "1075-1203", + "backend": "192.168.0.2" + }, + { + "table": "t1_0009", + "segment": "1203-1331", + "backend": "192.168.0.2" + }, + { + "table": "t1_0010", + "segment": "1331-1459", + "backend": "192.168.0.2" + }, + { + "table": "t1_0011", + "segment": "1459-1638", + "backend": "192.168.0.2" + }, + { + "table": "t1_0012", + "segment": "1638-1766", + "backend": "192.168.0.3" + }, + { + "table": "t1_0013", + "segment": "1766-1894", + "backend": "192.168.0.3" + }, + { + "table": "t1_0014", + "segment": "1894-2022", + "backend": "192.168.0.3" + }, + { + "table": "t1_0015", + "segment": "2022-2150", + "backend": "192.168.0.3" + }, + { + "table": "t1_0016", + "segment": "2150-2278", + "backend": "192.168.0.3" + }, + { + "table": "t1_0017", + "segment": "2278-2457", + "backend": "192.168.0.3" + }, + { + "table": "t1_0018", + "segment": "2457-2585", + "backend": "192.168.0.4" + }, + { + "table": "t1_0019", + "segment": "2585-2713", + "backend": "192.168.0.4" + }, + { + "table": "t1_0020", + "segment": "2713-2841", + "backend": "192.168.0.4" + }, + { + "table": "t1_0021", + "segment": "2841-2969", + "backend": "192.168.0.4" + }, + { + "table": "t1_0022", + "segment": "2969-3097", + "backend": "192.168.0.4" + }, + { + "table": "t1_0023", + "segment": "3097-3276", + "backend": "192.168.0.4" + }, + { + "table": "t1_0024", + "segment": "3276-3404", + "backend": "192.168.0.5" + }, + { + "table": "t1_0025", + "segment": "3404-3532", + "backend": "192.168.0.5" + }, + { + "table": "t1_0026", + "segment": "3532-3660", + "backend": "192.168.0.5" + }, + { + "table": "t1_0027", + "segment": "3660-3788", + "backend": "192.168.0.5" + }, + { + "table": "t1_0028", + "segment": "3788-3916", + "backend": "192.168.0.5" + }, + { + "table": "t1_0029", + "segment": "3916-4096", + "backend": "192.168.0.5" + } + ] +}` + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + assert.NotNil(t, router) + + backends := []string{ + "192.168.0.1", + "192.168.0.2", + "192.168.0.3", + "192.168.0.4", + "192.168.0.5", + } + got, err := router.HashUniform("t1", "id", backends) + assert.Nil(t, err) + //config.WriteConfig("/tmp/c.json", got) + want, err := config.ReadTableConfig(datas) + assert.Nil(t, err) + assert.Equal(t, want, got) +} + +func TestRouterComputeHashError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + // backends is NULL. + { + assert.NotNil(t, router) + backends := []string{} + _, err := router.HashUniform("t1", "id", backends) + assert.NotNil(t, err) + } + // backends is too manys. + { + assert.NotNil(t, router) + backends := []string{} + for i := 0; i < router.conf.Slots; i++ { + backends = append(backends, fmt.Sprintf("%d", i)) + } + _, err := router.HashUniform("t1", "id", backends) + assert.NotNil(t, err) + } +} + +func TestRouterComputeHashError1(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + + // Table is null. + { + assert.NotNil(t, router) + backends := []string{"backend1"} + _, err := router.HashUniform("", "id", backends) + assert.NotNil(t, err) + } + + // Shardkey is null. + { + assert.NotNil(t, router) + backends := []string{"backend1"} + _, err := router.HashUniform("t1", "", backends) + assert.NotNil(t, err) + } +} diff --git a/src/router/frm.go b/src/router/frm.go new file mode 100644 index 00000000..b9914467 --- /dev/null +++ b/src/router/frm.go @@ -0,0 +1,256 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package router + +import ( + "config" + "fmt" + "io/ioutil" + "os" + "path" +) + +// writeFrmData used to write table's json schema to file. +// The file name is : [schema-dir]/[database]/[table].json. +// If the [schema-dir]/[database] directoryis not exists, we will create it first. +func (r *Router) writeFrmData(db string, table string, tconf *config.TableConfig) error { + log := r.log + dir := path.Join(r.metadir, db) + log.Warning("frm.write.data[db:%s, table:%s]", db, table) + // Create dir. + if _, err := os.Stat(dir); os.IsNotExist(err) { + if x := os.MkdirAll(dir, os.ModePerm); x != nil { + log.Error("frm.write.mkdir[%v].error:%v", dir, err) + return x + } + } + file := path.Join(dir, fmt.Sprintf("%s.json", table)) + if err := config.WriteConfig(file, tconf); err != nil { + log.Error("frm.write.to.file[%v].error:%v", file, err) + return err + } + return nil +} + +// readFrmData used to read json file to TableConfig. +func (r *Router) readFrmData(file string) (*config.TableConfig, error) { + log := r.log + data, err := ioutil.ReadFile(file) + if err != nil { + log.Error("frm.read.from.file[%v].error:%v", file, err) + return nil, err + } + conf, err := config.ReadTableConfig(string(data)) + if err != nil { + log.Error("frm.read.parse.json.file[%v].error:%v", file, err) + return nil, err + } + return conf, nil +} + +// removeFrmData used to remove table json file. +func (r *Router) removeFrmData(db string, table string) error { + log := r.log + dir := path.Join(r.metadir, db) + file := path.Join(dir, fmt.Sprintf("%s.json", table)) + log.Warning("frm.remove.file[%v].for.[db:%s, table:%s]", db, table, file) + return os.Remove(file) +} + +// loadTable used to add a table read from the json file. +func (r *Router) loadTableFromFile(db, file string) error { + log := r.log + log.Warning("frm.load.table.from.file:%v", file) + + conf, err := r.readFrmData(file) + if err != nil { + log.Error("frm.load.table.read.file[%v].error:%+v", file, err) + return err + } + if err := r.add(db, conf); err != nil { + log.Error("frm.load.table.add.router[%v].error:%+v", file, err) + return err + } + return nil +} + +// loadTable used to add a table read from the json file. +func (r *Router) loadTable(db string, table string) error { + log := r.log + log.Warning("frm.load.table[db:%s, table:%s]", db, table) + + dir := path.Join(r.metadir, db) + file := path.Join(dir, fmt.Sprintf("%s.json", table)) + return r.loadTableFromFile(db, file) +} + +// DropDatabase used to remove a database-schema from the schemas +// and remove all the table-schema files who belongs to this database. +func (r *Router) DropDatabase(db string) error { + r.mu.Lock() + // remove + delete(r.Schemas, db) + r.mu.Unlock() + + log := r.log + // Delete database dir. + dir := path.Join(r.metadir, db) + log.Warning("frm.drop.database.file[%v]", dir) + if err := os.RemoveAll(dir); err != nil { + r.log.Error("frm.drop.database[%v].error:%v", dir, err) + return err + } + + if err := config.UpdateVersion(r.metadir); err != nil { + log.Panicf("frm.drop.database.update.version.error:%v", err) + return err + } + return nil +} + +// CreateTable used to add a table to router and flush the schema to disk. +// Lock. +func (r *Router) CreateTable(db, table, shardKey string, backends []string) error { + r.mu.Lock() + defer r.mu.Unlock() + + log := r.log + // Compute the shards config. + tableConf, err := r.HashUniform(table, shardKey, backends) + if err != nil { + log.Error("frm.create.table[%s.%s].compute.error:%v", db, table, err) + return err + } + // add config to router. + if err := r.add(db, tableConf); err != nil { + log.Error("frm.create.add.route.error:%v", err) + return err + } + if err := r.writeFrmData(db, table, tableConf); err != nil { + log.Error("frm.create.table[db:%v, table:%v].file.error:%+v", db, tableConf.Name, err) + return err + } + + if err := config.UpdateVersion(r.metadir); err != nil { + log.Panicf("frm.create.table.update.version.error:%v", err) + return err + } + return nil +} + +// AddForTest used to add table config for test. +func (r *Router) AddForTest(db string, confs ...*config.TableConfig) error { + r.mu.Lock() + defer r.mu.Unlock() + + log := r.log + // add config to router. + for _, conf := range confs { + if err := r.add(db, conf); err != nil { + log.Error("frm.for.test.addroute.error:%v", err) + return err + } + } + return nil +} + +// DropTable used to remove a table from router and remove the schema file from disk. +func (r *Router) DropTable(db, table string) error { + r.mu.Lock() + defer r.mu.Unlock() + + log := r.log + if err := r.remove(db, table); err != nil { + log.Error("frm.drop.table[%s.%s].remove.route.error:%v", db, table, err) + return err + } + if err := r.removeFrmData(db, table); err != nil { + log.Error("frm.drop.table[%s.%s].remove.frmdata.error:%v", db, table, err) + return err + } + + if err := config.UpdateVersion(r.metadir); err != nil { + log.Panicf("frm.drop.table.update.version.error:%v", err) + return err + } + return nil +} + +// RefreshTable used to re-update the table from file. +// Lock. +func (r *Router) RefreshTable(db, table string) error { + r.mu.Lock() + defer r.mu.Unlock() + + log := r.log + if err := r.remove(db, table); err != nil { + log.Error("frm.refresh.table[%s.%s].remove.route.error:%v", db, table, err) + return err + } + if err := r.loadTable(db, table); err != nil { + log.Error("frm.refresh.table[%s.%s].load.table.error:%v", db, table, err) + return err + } + return nil +} + +// LoadConfig used to load all schemas stored in metadir. +// When an IO error occurs during the file reading, panic me. +func (r *Router) LoadConfig() error { + log := r.log + r.mu.Lock() + defer r.mu.Unlock() + + // Clear the router first. + r.clear() + + // Check the schemadir, create it if not exists. + if _, err := os.Stat(r.metadir); os.IsNotExist(err) { + if x := os.MkdirAll(r.metadir, os.ModePerm); x != nil { + log.Error("router.load.create.dir[%v].error:%v", r.metadir, x) + return x + } + return nil + } + + frms := make(map[string][]string) + files, err := ioutil.ReadDir(r.metadir) + if err != nil { + log.Error("router.load.readdir[%v].error:%v", r.metadir, err) + return err + } + for _, f := range files { + if f.IsDir() { + dbName := f.Name() + jsons := []string{} + subdir := path.Join(r.metadir, dbName) + subFiles, err := ioutil.ReadDir(subdir) + if err != nil { + log.Error("router.load.readsubdir[%v].error:%v", subdir, err) + return err + } + for _, subFile := range subFiles { + if !subFile.IsDir() { + jsons = append(jsons, path.Join(subdir, subFile.Name())) + } + } + frms[dbName] = jsons + } + } + + for k, v := range frms { + for _, file := range v { + if err := r.loadTableFromFile(k, file); err != nil { + log.Error("router.load.table..from.file[%v].error:%+v", file, err) + return err + } + } + } + return nil +} diff --git a/src/router/frm_test.go b/src/router/frm_test.go new file mode 100644 index 00000000..a768e203 --- /dev/null +++ b/src/router/frm_test.go @@ -0,0 +1,259 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package router + +import ( + "fmt" + "os" + "path" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func checkFileExistsForTest(db, table string) bool { + file := path.Join(_mockRouterSchemaDir, db, fmt.Sprintf("%s.json", table)) + if _, err := os.Stat(file); err != nil { + return false + } + return true +} + +func makeFileBrokenForTest(db, table string) { + file := path.Join(_mockRouterSchemaDir, db, fmt.Sprintf("%s.json", table)) + fd, err := os.OpenFile(file, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644) + if err != nil { + panic(err) + } + fd.Write([]byte("wtf")) + fd.Close() +} + +func TestFrmTable(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + + // Add 1. + { + backends := []string{"backend1", "backend2", "backend3"} + err := router.CreateTable("test", "t1", "id", backends) + assert.Nil(t, err) + assert.True(t, checkFileExistsForTest("test", "t1")) + } + + // Add 2. + { + backends := []string{"backend1", "backend2"} + err := router.CreateTable("test", "t2", "id", backends) + assert.Nil(t, err) + assert.True(t, checkFileExistsForTest("test", "t2")) + } + + // Add 2. + { + backends := []string{"backend1", "backend2"} + err := router.CreateTable("test", "t2", "id", backends) + assert.NotNil(t, err) + } + + // Remove 2. + { + err := router.DropTable("test", "t2") + assert.Nil(t, err) + assert.False(t, checkFileExistsForTest("test", "t2")) + } + + // Refresh table. + { + { + err := router.RefreshTable("test", "t1") + assert.Nil(t, err) + } + + { + err := router.RefreshTable("test", "t2") + assert.NotNil(t, err) + } + } +} + +func TestFrmTableError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + + // Add 1. + { + backends := []string{"backend1", "backend2", "backend3"} + err := router.CreateTable("", "t1", "id", backends) + assert.NotNil(t, err) + } + + // Add 2. + { + backends := []string{"backend1", "backend2"} + err := router.CreateTable("test", "", "id", backends) + assert.NotNil(t, err) + } + + // Add 2. + { + backends := []string{"backend1", "backend2"} + err := router.CreateTable("test", "t2", "", backends) + assert.NotNil(t, err) + } + + // Drop table. + { + err := router.DropTable("testxx", "t2") + assert.NotNil(t, err) + } + + // Add 1. + { + backends := []string{"backend1", "backend2", "backend3"} + err := router.CreateTable("test", "t1", "id", backends) + assert.Nil(t, err) + assert.True(t, checkFileExistsForTest("test", "t1")) + } + + // Drop table. + { + router.metadir = "/u00000000001/" + err := router.DropTable("test", "t1") + assert.NotNil(t, err) + } +} + +func TestFrmDropDatabase(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + + // Add 1. + { + backends := []string{"backend1", "backend2", "backend3"} + err := router.CreateTable("test", "t1", "id", backends) + assert.Nil(t, err) + assert.True(t, checkFileExistsForTest("test", "t1")) + } + + // Add 2. + { + backends := []string{"backend1", "backend2"} + err := router.CreateTable("test", "t2", "id", backends) + assert.Nil(t, err) + assert.True(t, checkFileExistsForTest("test", "t2")) + } + + { + err := router.DropDatabase("test") + assert.Nil(t, err) + assert.False(t, checkFileExistsForTest("test", "t1")) + assert.False(t, checkFileExistsForTest("test", "t2")) + } +} + +func TestFrmLoad(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + + // Add 1. + { + backends := []string{"backend1", "backend2", "backend3"} + err := router.CreateTable("test", "t1", "id", backends) + assert.Nil(t, err) + assert.True(t, checkFileExistsForTest("test", "t1")) + } + + // Add 2. + { + backends := []string{"backend1", "backend2"} + err := router.CreateTable("test", "t2", "id", backends) + assert.Nil(t, err) + assert.True(t, checkFileExistsForTest("test", "t2")) + } + + { + router1, _ := MockNewRouter(log) + assert.NotNil(t, router1) + + // load. + err := router1.LoadConfig() + assert.Nil(t, err) + assert.Equal(t, router, router1) + + // load again. + err = router1.LoadConfig() + assert.Nil(t, err) + assert.Equal(t, router, router1) + } +} + +func TestFrmReadFrmError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + { + _, err := router.readFrmData("/u10000/xx.xx") + assert.NotNil(t, err) + } +} + +func TestFrmWriteFrmError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + { + router.metadir = "/u100000/xx" + err := router.writeFrmData("test", "t1", nil) + assert.NotNil(t, err) + } +} + +func TestFrmReadFileBroken(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + + // Add 1. + { + backends := []string{"backend1", "backend2", "backend3"} + err := router.CreateTable("test", "t1", "id", backends) + assert.Nil(t, err) + assert.True(t, checkFileExistsForTest("test", "t1")) + // Make file broken. + makeFileBrokenForTest("test", "t1") + } + + // Refresh table. + { + { + err := router.RefreshTable("test", "t1") + assert.NotNil(t, err) + } + + { + err := router.RefreshTable("test", "t2") + assert.NotNil(t, err) + } + } +} + +func TestFrmAddTableForTest(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + + err := router.AddForTest("test", nil) + assert.NotNil(t, err) +} diff --git a/src/router/hash.go b/src/router/hash.go new file mode 100644 index 00000000..ef0ee66c --- /dev/null +++ b/src/router/hash.go @@ -0,0 +1,177 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package router + +import ( + "bytes" + "config" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/pkg/errors" + jump "github.com/renstrom/go-jump-consistent-hash" + + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +// HashRange tuple. +// [Start, End) +type HashRange struct { + Start int + End int +} + +// String returns start-end info. +func (r *HashRange) String() string { + return fmt.Sprintf("[%v-%v)", r.Start, r.End) +} + +// Less impl. +func (r *HashRange) Less(b KeyRange) bool { + v := b.(*HashRange) + return r.Start < v.Start +} + +// Hash tuple. +type Hash struct { + log *xlog.Log + + // hash slots + slots int + + // hash method + typ MethodType + + // table config + conf *config.TableConfig + + // Partition map + partitions map[int]Segment + Segments []Segment `json:",omitempty"` +} + +// NewHash creates new hash. +func NewHash(log *xlog.Log, slots int, conf *config.TableConfig) *Hash { + return &Hash{ + log: log, + conf: conf, + slots: slots, + typ: methodTypeHash, + partitions: make(map[int]Segment), + Segments: make([]Segment, 0, 16), + } +} + +// Build used to build hash bitmap from schema config +func (h *Hash) Build() error { + var err error + var start, end int + + for _, part := range h.conf.Partitions { + segments := strings.Split(part.Segment, "-") + if len(segments) != 2 { + return errors.Errorf("hash.partition.segment.malformed[%v]", part.Segment) + } + + // parse partition spec + if start, err = strconv.Atoi(segments[0]); err != nil { + return errors.Errorf("hash.partition.segment.malformed[%v].start.can.not.parser.to.int", part.Segment) + } + if end, err = strconv.Atoi(segments[1]); err != nil { + return errors.Errorf("hash.partition.segment.malformed[%v].end.can.not.parser.to.int", part.Segment) + } + if end <= start { + return errors.Errorf("hash.partition.segment.malformed[%v].start[%v]>=end[%v]", part.Segment, start, end) + } + + partition := Segment{ + Table: part.Table, + Backend: part.Backend, + Range: &HashRange{ + Start: start, + End: end, + }, + } + + // bitmap + for i := start; i < end; i++ { + if _, ok := h.partitions[i]; ok { + return errors.Errorf("hash.partition.segment[%v].overlapped[%v]", part.Segment, i) + } + h.partitions[i] = partition + } + + // Segments + h.Segments = append(h.Segments, partition) + } + + if len(h.partitions) != h.slots { + return errors.Errorf("hash.partition.last.segment[%v].upper.bound.must.be[%v]", len(h.partitions), h.slots) + } + sort.Sort(Segments(h.Segments)) + return nil +} + +// Clear used to clean hash partitions +func (h *Hash) Clear() error { + for k := range h.partitions { + delete(h.partitions, k) + } + return nil +} + +// Lookup used to lookup partition(s) through the sharding-key range +// Hash.Lookup only supports the type uint64/string +func (h *Hash) Lookup(start *sqlparser.SQLVal, end *sqlparser.SQLVal) ([]Segment, error) { + // if open interval we returns all partitions + if start == nil || end == nil { + return h.Segments, nil + } + + // Check item types. + if start.Type != end.Type { + return nil, errors.Errorf("hash.lookup.key.type.must.be.same:[%v!=%v]", start.Type, end.Type) + } + + // Hash just handle the equal + if bytes.Equal(start.Val, end.Val) { + valStr := common.BytesToString(start.Val) + switch start.Type { + case sqlparser.IntVal: + unsigned, err := strconv.ParseInt(valStr, 0, 64) + if err != nil { + return nil, errors.Errorf("hash.lookup.start.key.parser.uint64.error:[%v]", err) + } + idx := int(jump.Hash(uint64(unsigned), int32(h.slots))) + return []Segment{h.partitions[idx]}, nil + case sqlparser.FloatVal: + unsigned, err := strconv.ParseFloat(valStr, 64) + if err != nil { + return nil, errors.Errorf("hash.lookup.start.key.parser.float.error:[%v]", err) + } + idx := int(jump.Hash(uint64(unsigned), int32(h.slots))) + return []Segment{h.partitions[idx]}, nil + case sqlparser.StrVal: + idx := int(jump.HashString(valStr, int32(h.slots), jump.CRC64)) + return []Segment{h.partitions[idx]}, nil + default: + return nil, errors.Errorf("hash.unsupported.key.type:[%v]", start.Type) + } + } + return h.Segments, nil +} + +// Type returns the hash type. +func (h *Hash) Type() MethodType { + return h.typ +} diff --git a/src/router/hash_test.go b/src/router/hash_test.go new file mode 100644 index 00000000..51480646 --- /dev/null +++ b/src/router/hash_test.go @@ -0,0 +1,246 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package router + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + _mockHashSlots = 4096 +) + +func TestHash(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + hash := NewHash(log, _mockHashSlots, MockTableAConfig()) + { + err := hash.Build() + assert.Nil(t, err) + assert.Equal(t, string(hash.Type()), methodTypeHash) + assert.Equal(t, hash.slots, 4096) + assert.Equal(t, len(hash.partitions), 4096) + } + + { + err := hash.Clear() + assert.Nil(t, err) + err = hash.Build() + assert.Nil(t, err) + } +} + +func TestHashOverlap(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + hash := NewHash(log, _mockHashSlots, MockTableOverlapConfig()) + err := hash.Build() + { + want := "hash.partition.segment[7-9].overlapped[7]" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestHashInvalid(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + hash := NewHash(log, _mockHashSlots, MockTableInvalidConfig()) + err := hash.Build() + { + want := "hash.partition.segment.malformed[8-x].end.can.not.parser.to.int" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestHashGreaterThan(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + hash := NewHash(log, _mockHashSlots, MockTableGreaterThanConfig()) + err := hash.Build() + { + want := "hash.partition.segment.malformed[10-8].start[10]>=end[8]" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestHash64(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + hash := NewHash(log, _mockHashSlots, MockTable64Config()) + err := hash.Build() + { + want := "hash.partition.last.segment[64].upper.bound.must.be[4096]" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestHashLookup(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + hash := NewHash(log, _mockHashSlots, MockTableAConfig()) + { + err := hash.Build() + assert.Nil(t, err) + } + + intVal := sqlparser.NewIntVal([]byte("-65536")) + floatVal := sqlparser.NewFloatVal([]byte("65536.99999")) + strVal := sqlparser.NewStrVal([]byte("shardkey")) + { + parts, err := hash.Lookup(strVal, strVal) + assert.Nil(t, err) + assert.Equal(t, 1, len(parts)) + } + + // int + { + parts, err := hash.Lookup(intVal, intVal) + assert.Nil(t, err) + assert.Equal(t, 1, len(parts)) + assert.Equal(t, "A8", parts[0].Table) + assert.Equal(t, "backend8", parts[0].Backend) + } + + // float + { + parts, err := hash.Lookup(floatVal, floatVal) + assert.Nil(t, err) + assert.Equal(t, 1, len(parts)) + assert.Equal(t, "A8", parts[0].Table) + assert.Equal(t, "backend8", parts[0].Backend) + } + + // str + { + parts, err := hash.Lookup(strVal, strVal) + assert.Nil(t, err) + assert.Equal(t, 1, len(parts)) + assert.Equal(t, "A8", parts[0].Table) + assert.Equal(t, "backend8", parts[0].Backend) + } + + // [nil, endKey] + { + parts, err := hash.Lookup(nil, strVal) + assert.Nil(t, err) + assert.Equal(t, 4, len(parts)) + } + + // [nil, nil] + { + parts, err := hash.Lookup(nil, nil) + assert.Nil(t, err) + assert.Equal(t, 4, len(parts)) + } + + // [start, end) + { + s := sqlparser.NewIntVal([]byte("16")) + e := sqlparser.NewIntVal([]byte("17")) + + parts, err := hash.Lookup(s, e) + assert.Nil(t, err) + assert.Equal(t, 4, len(parts)) + } +} + +func TestHashBuildError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + { + hash := NewHash(log, _mockHashSlots, MockTableSegmentErr1Config()) + err := hash.Build() + want := "hash.partition.segment.malformed[0]" + got := err.Error() + assert.Equal(t, want, got) + } + + { + hash := NewHash(log, _mockHashSlots, MockTableSegmentStartErrConfig()) + err := hash.Build() + want := "hash.partition.segment.malformed[x-0].start.can.not.parser.to.int" + got := err.Error() + assert.Equal(t, want, got) + } + + { + hash := NewHash(log, _mockHashSlots, MockTableSegmentEndErrConfig()) + err := hash.Build() + want := "hash.partition.segment.malformed[0-x].end.can.not.parser.to.int" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestHashLookupError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + hash := NewHash(log, _mockHashSlots, MockTableAConfig()) + { + err := hash.Build() + assert.Nil(t, err) + } + + intVal := sqlparser.NewIntVal([]byte("65536")) + strVal := sqlparser.NewStrVal([]byte("shardkey")) + hexVal := sqlparser.NewHexNum([]byte("3.1415926")) + { + _, err := hash.Lookup(strVal, intVal) + want := "hash.lookup.key.type.must.be.same:[0!=1]" + got := err.Error() + assert.Equal(t, want, got) + } + + { + intVal := sqlparser.NewIntVal([]byte("65536x")) + _, err := hash.Lookup(intVal, intVal) + want := "hash.lookup.start.key.parser.uint64.error:[strconv.ParseInt: parsing \"65536x\": invalid syntax]" + got := err.Error() + assert.Equal(t, want, got) + } + + { + floatVal := sqlparser.NewFloatVal([]byte("65536.x")) + _, err := hash.Lookup(floatVal, floatVal) + want := "hash.lookup.start.key.parser.float.error:[strconv.ParseFloat: parsing \"65536.x\": invalid syntax]" + got := err.Error() + assert.Equal(t, want, got) + } + + { + _, err := hash.Lookup(hexVal, hexVal) + want := "hash.unsupported.key.type:[3]" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestHashLookupBench(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + hash := NewHash(log, _mockHashSlots, MockTableAConfig()) + { + err := hash.Build() + assert.Nil(t, err) + } + + { + N := 1000000 + now := time.Now() + for i := 0; i < N; i++ { + intVal := sqlparser.NewIntVal([]byte(fmt.Sprintf("%d", i))) + _, err := hash.Lookup(intVal, intVal) + assert.Nil(t, err) + } + + took := time.Since(now) + fmt.Printf(" LOOP\t%v COST %v, avg:%v/s\n", N, took, (int64(N)/(took.Nanoseconds()/1e6))*1000) + } +} diff --git a/src/router/mock.go b/src/router/mock.go new file mode 100644 index 00000000..7dbcc8f2 --- /dev/null +++ b/src/router/mock.go @@ -0,0 +1,303 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package router + +import ( + "config" + "os" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + // MockDefaultConfig config. + MockDefaultConfig = []*config.PartitionConfig{ + &config.PartitionConfig{ + Table: "A2", + Segment: "2-4", + Backend: "backend2", + }, + &config.PartitionConfig{ + Table: "A4", + Segment: "4-8", + Backend: "backend4", + }, + } +) + +// MockTableAConfig config. +func MockTableAConfig() *config.TableConfig { + mock := &config.TableConfig{ + Name: "A", + ShardType: "HASH", + ShardKey: "id", + Partitions: MockDefaultConfig, + } + S02 := &config.PartitionConfig{ + Table: "A0", + Segment: "0-2", + Backend: "backend0", + } + S81024 := &config.PartitionConfig{ + Table: "A8", + Segment: "8-4096", + Backend: "backend8", + } + + mock.Partitions = append(mock.Partitions, S02, S81024) + return mock +} + +// MockTableMConfig config. +func MockTableMConfig() *config.TableConfig { + mock := &config.TableConfig{ + Name: "A", + ShardType: "HASH", + ShardKey: "id", + Partitions: make([]*config.PartitionConfig, 0, 16), + } + + S032 := &config.PartitionConfig{ + Table: "A1", + Segment: "0-32", + Backend: "backend1", + } + + S3264 := &config.PartitionConfig{ + Table: "A2", + Segment: "32-64", + Backend: "backend2", + } + + S6496 := &config.PartitionConfig{ + Table: "A3", + Segment: "64-96", + Backend: "backend3", + } + + S96256 := &config.PartitionConfig{ + Table: "A4", + Segment: "96-256", + Backend: "backend4", + } + + S256512 := &config.PartitionConfig{ + Table: "A5", + Segment: "256-512", + Backend: "backend5", + } + + S5121024 := &config.PartitionConfig{ + Table: "A6", + Segment: "512-4096", + Backend: "backend6", + } + + mock.Partitions = append(mock.Partitions, S032, S3264, S6496, S96256, S256512, S5121024) + return mock +} + +// MockTableBConfig config. +func MockTableBConfig() *config.TableConfig { + mock := &config.TableConfig{ + Name: "B", + ShardType: "HASH", + ShardKey: "id", + Partitions: make([]*config.PartitionConfig, 0, 16), + } + S0512 := &config.PartitionConfig{ + Table: "B0", + Segment: "0-512", + Backend: "backend0", + } + S11024 := &config.PartitionConfig{ + Table: "B1", + Segment: "512-4096", + Backend: "backend512", + } + + mock.Partitions = append(mock.Partitions, S0512, S11024) + return mock +} + +// MockTableNULLConfig config. +func MockTableNULLConfig() *config.TableConfig { + mock := &config.TableConfig{ + Name: "B", + ShardType: "HASH", + ShardKey: "id", + } + return mock +} + +// MockTableSegmentErr1Config config. +func MockTableSegmentErr1Config() *config.TableConfig { + mock := &config.TableConfig{ + Name: "A", + ShardType: "HASH", + ShardKey: "id", + Partitions: make([]*config.PartitionConfig, 0, 16), + } + + S032 := &config.PartitionConfig{ + Table: "A1", + Segment: "0", + Backend: "backend1", + } + mock.Partitions = append(mock.Partitions, S032) + return mock +} + +// MockTableSegmentStartErrConfig config. +func MockTableSegmentStartErrConfig() *config.TableConfig { + mock := &config.TableConfig{ + Name: "A", + ShardType: "HASH", + ShardKey: "id", + Partitions: make([]*config.PartitionConfig, 0, 16), + } + + S032 := &config.PartitionConfig{ + Table: "A1", + Segment: "x-0", + Backend: "backend1", + } + mock.Partitions = append(mock.Partitions, S032) + return mock +} + +// MockTableSegmentEndErrConfig config. +func MockTableSegmentEndErrConfig() *config.TableConfig { + mock := &config.TableConfig{ + Name: "A", + ShardType: "HASH", + ShardKey: "id", + Partitions: make([]*config.PartitionConfig, 0, 16), + } + + S032 := &config.PartitionConfig{ + Table: "A1", + Segment: "0-x", + Backend: "backend1", + } + mock.Partitions = append(mock.Partitions, S032) + return mock +} + +// MockTable64Config config. +func MockTable64Config() *config.TableConfig { + mock := &config.TableConfig{ + Name: "A", + ShardKey: "id", + Partitions: MockDefaultConfig, + } + S02 := &config.PartitionConfig{ + Table: "A1", + Segment: "0-2", + Backend: "backend1", + } + S864 := &config.PartitionConfig{ + Table: "A4", + Segment: "8-64", + Backend: "backend2", + } + + mock.Partitions = append(mock.Partitions, S02, S864) + return mock +} + +// MockTableOverlapConfig config. +func MockTableOverlapConfig() *config.TableConfig { + mock := &config.TableConfig{ + Name: "A", + ShardKey: "id", + Partitions: MockDefaultConfig, + } + + S79 := &config.PartitionConfig{ + Table: "A1", + Segment: "7-9", + Backend: "backend1", + } + + mock.Partitions = append(mock.Partitions, S79) + return mock +} + +// MockTableInvalidConfig config. +func MockTableInvalidConfig() *config.TableConfig { + mock := &config.TableConfig{ + Name: "A", + ShardKey: "id", + Partitions: MockDefaultConfig, + } + + S8X := &config.PartitionConfig{ + Table: "A1", + Segment: "8-x", + Backend: "backend1", + } + + mock.Partitions = append(mock.Partitions, S8X) + return mock +} + +// MockTableGreaterThanConfig config. +func MockTableGreaterThanConfig() *config.TableConfig { + mock := &config.TableConfig{ + Name: "A", + ShardKey: "id", + Partitions: MockDefaultConfig, + } + + S108 := &config.PartitionConfig{ + Table: "A1", + Segment: "10-8", + Backend: "backend1", + } + + mock.Partitions = append(mock.Partitions, S108) + return mock +} + +// MockTableE1Config config, unsupport shardtype. +func MockTableE1Config() *config.TableConfig { + mock := &config.TableConfig{ + Name: "E1", + ShardType: "Range", + ShardKey: "id", + Partitions: MockDefaultConfig, + } + S02 := &config.PartitionConfig{ + Table: "A1", + Segment: "0-2", + Backend: "backend1", + } + S81024 := &config.PartitionConfig{ + Table: "A4", + Segment: "8-4096", + Backend: "backend2", + } + + mock.Partitions = append(mock.Partitions, S02, S81024) + return mock +} + +var ( + _mockRouterSchemaDir = "/tmp/router_test" +) + +// MockNewRouter mocks router. +func MockNewRouter(log *xlog.Log) (*Router, func()) { + return NewRouter(log, _mockRouterSchemaDir, config.DefaultRouterConfig()), func() { + if err := os.RemoveAll(_mockRouterSchemaDir); err != nil { + panic(err) + } + } +} diff --git a/src/router/partition.go b/src/router/partition.go new file mode 100644 index 00000000..b84a5660 --- /dev/null +++ b/src/router/partition.go @@ -0,0 +1,49 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package router + +import ( + "github.com/xelabs/go-mysqlstack/sqlparser" +) + +// KeyRange tuple. +type KeyRange interface { + String() string + Less(KeyRange) bool +} + +// Segments slice. +type Segments []Segment + +// Len impl. +func (q Segments) Len() int { return len(q) } + +// Segments impl. +func (q Segments) Swap(i, j int) { q[i], q[j] = q[j], q[i] } + +// Less impl. +func (q Segments) Less(i, j int) bool { + return q[i].Range.Less(q[j].Range) +} + +// Segment tuple. +type Segment struct { + // Segment table name. + Table string `json:",omitempty"` + // Segment backend name. + Backend string `json:",omitempty"` + // key range of this segment. + Range KeyRange `json:",omitempty"` +} + +// Partition interface. +type Partition interface { + Build() error + Lookup(start *sqlparser.SQLVal, end *sqlparser.SQLVal) ([]Segment, error) +} diff --git a/src/router/router.go b/src/router/router.go new file mode 100644 index 00000000..8d951f90 --- /dev/null +++ b/src/router/router.go @@ -0,0 +1,256 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package router + +import ( + "config" + "encoding/json" + "sync" + + "github.com/pkg/errors" + + "github.com/xelabs/go-mysqlstack/sqldb" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +// Table tuple. +type Table struct { + // Table name + Name string `json:",omitempty"` + // Shard key + ShardKey string `json:",omitempty"` + // partition method + Partition Partition `json:",omitempty"` + // table config. + TableConfig *config.TableConfig `json:"-"` +} + +// Schema tuple. +type Schema struct { + // database name + DB string `json:",omitempty"` + // tables map, key is table name + Tables map[string]*Table `json:",omitempty"` +} + +// Router tuple. +type Router struct { + log *xlog.Log + mu sync.RWMutex + metadir string + dbACL *DatabaseACL + conf *config.RouterConfig + + // schemas map, key is database name + Schemas map[string]*Schema `json:",omitempty"` +} + +// NewRouter creates the new router. +func NewRouter(log *xlog.Log, metadir string, conf *config.RouterConfig) *Router { + route := &Router{ + log: log, + metadir: metadir, + conf: conf, + dbACL: NewDatabaseACL(), + Schemas: make(map[string]*Schema), + } + return route +} + +// add used to add a table router to schema map +func (r *Router) add(db string, tbl *config.TableConfig) error { + var ok bool + var schema *Schema + var table *Table + + if db == "" { + return errors.New("db.can't.be.null") + } + if tbl == nil { + return errors.New("table.config..can't.be.nil") + } + + // schema + if schema, ok = r.Schemas[db]; !ok { + schema = &Schema{DB: db, Tables: make(map[string]*Table)} + r.Schemas[db] = schema + } + + // table + if _, ok = schema.Tables[tbl.Name]; !ok { + table = &Table{ + Name: tbl.Name, + ShardKey: tbl.ShardKey, + TableConfig: tbl, + } + schema.Tables[tbl.Name] = table + } else { + return errors.Errorf("router.add.db[%v].table[%v].exists", db, tbl.Name) + } + + // methods + switch tbl.ShardType { + case methodTypeHash: + hash := NewHash(r.log, r.conf.Slots, tbl) + if err := hash.Build(); err != nil { + return err + } + table.Partition = hash + default: + return errors.Errorf("router.unsupport.shardtype:[%v]", tbl.ShardType) + } + + return nil +} + +// Remove used to remvoe a table router from schema map +func (r *Router) remove(db string, table string) error { + var ok bool + var schema *Schema + + // schema + if schema, ok = r.Schemas[db]; !ok { + return errors.Errorf("router.can.not.find.db[%v]", db) + } + // table + if _, ok = schema.Tables[table]; !ok { + return errors.Errorf("router.can.not.find.table[%v]", table) + } + // remove + delete(schema.Tables, table) + return nil +} + +// clear used to reset Schemas to new. +func (r *Router) clear() { + r.Schemas = make(map[string]*Schema) +} + +// DatabaseACL used to check wheather the database is a system database. +func (r *Router) DatabaseACL(database string) error { + if ok := r.dbACL.Allow(database); !ok { + r.log.Warning("router.database.acl.check.fail[db:%s]", database) + return sqldb.NewSQLError(sqldb.ER_SPECIFIC_ACCESS_DENIED_ERROR, "Access denied; lacking privileges for database %s", database) + } + return nil +} + +func (r *Router) getTable(database string, tableName string) (*Table, error) { + var ok bool + var schema *Schema + var table *Table + + // lock + r.mu.RLock() + defer r.mu.RUnlock() + + if database == "" { + return nil, sqldb.NewSQLError(sqldb.ER_NO_DB_ERROR, "") + } + if tableName == "" { + return nil, sqldb.NewSQLError(sqldb.ER_NO_SUCH_TABLE, "", tableName) + } + + // schema + if schema, ok = r.Schemas[database]; !ok { + r.log.Error("router.can.not.find.db[%v]", database) + return nil, sqldb.NewSQLError(sqldb.ER_NO_SUCH_TABLE, "", database+"."+tableName) + } + + // table + if table, ok = schema.Tables[tableName]; !ok { + r.log.Error("router.can.not.find.table[%v]", tableName) + return nil, sqldb.NewSQLError(sqldb.ER_NO_SUCH_TABLE, "", tableName) + } + return table, nil +} + +// ShardKey used to lookup shardkey from given database and table name +func (r *Router) ShardKey(database string, tableName string) (string, error) { + table, err := r.getTable(database, tableName) + if err != nil { + return "", err + } + return table.ShardKey, nil +} + +// TableConfig returns the config by database and tableName. +func (r *Router) TableConfig(database string, tableName string) (*config.TableConfig, error) { + table, err := r.getTable(database, tableName) + if err != nil { + return nil, err + } + return table.TableConfig, nil +} + +// Lookup used to lookup a router(partition table name and backend) through db&table +func (r *Router) Lookup(database string, tableName string, startKey *sqlparser.SQLVal, endKey *sqlparser.SQLVal) ([]Segment, error) { + var ok bool + var err error + var schema *Schema + var table *Table + + // lock + r.mu.RLock() + defer r.mu.RUnlock() + + if database == "" { + return nil, sqldb.NewSQLError(sqldb.ER_NO_DB_ERROR, "") + } + if tableName == "" { + return nil, sqldb.NewSQLError(sqldb.ER_NO_SUCH_TABLE, "", tableName) + } + + // schema + if schema, ok = r.Schemas[database]; !ok { + r.log.Error("router.can.not.find.db[%v]", database) + return nil, sqldb.NewSQLError(sqldb.ER_BAD_DB_ERROR, "", database) + } + + // table + if table, ok = schema.Tables[tableName]; !ok { + r.log.Error("router.can.not.find.table[%v]", tableName) + return nil, sqldb.NewSQLError(sqldb.ER_NO_SUCH_TABLE, "", tableName) + } + + // router info + partInfos, err := table.Partition.Lookup(startKey, endKey) + if err != nil { + r.log.Error("router.partition.lookup.error:%+v", err) + return nil, err + } + return partInfos, nil +} + +// Tables returns all the tables. +func (r *Router) Tables() map[string][]string { + r.mu.RLock() + defer r.mu.RUnlock() + + list := make(map[string][]string) + for _, schema := range r.Schemas { + db := schema.DB + tables := make([]string, 0, 16) + for _, table := range schema.Tables { + tables = append(tables, table.Name) + } + list[db] = tables + } + return list +} + +// JSON returns the info of router. +func (r *Router) JSON() string { + bout, err := json.MarshalIndent(r, "", "\t") + if err != nil { + return err.Error() + } + return string(bout) +} diff --git a/src/router/router_test.go b/src/router/router_test.go new file mode 100644 index 00000000..0163065e --- /dev/null +++ b/src/router/router_test.go @@ -0,0 +1,303 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package router + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqlparser" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestRouter(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + assert.NotNil(t, router) +} + +func TestRouteradd(t *testing.T) { + results := []string{`{ + "Schemas": { + "sbtest": { + "DB": "sbtest", + "Tables": { + "A": { + "Name": "A", + "ShardKey": "id", + "Partition": { + "Segments": [ + { + "Table": "A0", + "Backend": "backend0", + "Range": { + "Start": 0, + "End": 2 + } + }, + { + "Table": "A2", + "Backend": "backend2", + "Range": { + "Start": 2, + "End": 4 + } + }, + { + "Table": "A4", + "Backend": "backend4", + "Range": { + "Start": 4, + "End": 8 + } + }, + { + "Table": "A8", + "Backend": "backend8", + "Range": { + "Start": 8, + "End": 4096 + } + } + ] + } + } + } + } + } +}`} + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + assert.NotNil(t, router) + + // router + { + err := router.add("sbtest", MockTableAConfig()) + assert.Nil(t, err) + want := results[0] + got := router.JSON() + log.Debug(got) + assert.Equal(t, want, got) + } + + // add same routers + { + err := router.add("sbtest", MockTableAConfig()) + want := "router.add.db[sbtest].table[A].exists" + got := err.Error() + assert.Equal(t, want, got) + } + + // unsupport shardtype + { + err := router.add("sbtest", MockTableE1Config()) + want := "router.unsupport.shardtype:[Range]" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestRouterremove(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + assert.NotNil(t, router) + + // router + { + err := router.remove("sbtest", MockTableAConfig().Name) + want := "router.can.not.find.db[sbtest]" + got := err.Error() + assert.Equal(t, want, got) + } + + // add router of sbtest.A + { + err := router.add("sbtest", MockTableAConfig()) + assert.Nil(t, err) + + strVal := sqlparser.NewStrVal([]byte("shardkey")) + _, err = router.Lookup("sbtest", "A", strVal, strVal) + assert.Nil(t, err) + } + + // remove router of xx.A + { + err := router.remove("xx", MockTableAConfig().Name) + want := "router.can.not.find.db[xx]" + got := err.Error() + assert.Equal(t, want, got) + } + + // remove router of sbtest.E1(invalid router) + { + err := router.remove("sbtest", MockTableE1Config().Name) + want := "router.can.not.find.table[E1]" + got := err.Error() + assert.Equal(t, want, got) + } + + // remove router of sbtest.A + { + err := router.remove("sbtest", MockTableAConfig().Name) + assert.Nil(t, err) + + strVal := sqlparser.NewStrVal([]byte("shardkey")) + _, err = router.Lookup("sbtest", "A", strVal, strVal) + want := "Table 'A' doesn't exist (errno 1146) (sqlstate 42S02)" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestRouterLookup(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + assert.NotNil(t, router) + + // add router of sbtest.A + { + err := router.add("sbtest", MockTableAConfig()) + assert.Nil(t, err) + + strVal := sqlparser.NewStrVal([]byte("shardkey")) + _, err = router.Lookup("sbtest", "A", strVal, strVal) + assert.Nil(t, err) + } +} + +func TestRouterLookupError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + assert.NotNil(t, router) + + // add router of sbtest.A + { + err := router.add("sbtest", MockTableAConfig()) + assert.Nil(t, err) + + // database error + { + strVal := sqlparser.NewStrVal([]byte("shardkey")) + _, err = router.Lookup("xx", "A", strVal, strVal) + want := "Unknown database 'xx' (errno 1049) (sqlstate 42000)" + got := err.Error() + assert.Equal(t, want, got) + } + + // database is NULL + { + strVal := sqlparser.NewStrVal([]byte("shardkey")) + _, err = router.Lookup("", "A", strVal, strVal) + want := "No database selected (errno 1046) (sqlstate 3D000)" + got := err.Error() + assert.Equal(t, want, got) + } + } +} + +func TestRouterShardKey(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + assert.NotNil(t, router) + + // add router of sbtest.A + { + err := router.add("sbtest", MockTableAConfig()) + assert.Nil(t, err) + + shardKey, err := router.ShardKey("sbtest", "A") + assert.Nil(t, err) + assert.Equal(t, "id", shardKey) + } +} + +func TestRouterShardKeyError(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + assert.NotNil(t, router) + + // add router of sbtest.A + { + err := router.add("sbtest", MockTableAConfig()) + assert.Nil(t, err) + + // database error + { + _, err = router.ShardKey("xx", "A") + want := "Table 'xx.A' doesn't exist (errno 1146) (sqlstate 42S02)" + got := err.Error() + assert.Equal(t, want, got) + } + + // table error + { + _, err = router.ShardKey("sbtest", "x") + want := "Table 'x' doesn't exist (errno 1146) (sqlstate 42S02)" + got := err.Error() + assert.Equal(t, want, got) + } + + // database is NULL + { + _, err = router.ShardKey("", "A") + want := "No database selected (errno 1046) (sqlstate 3D000)" + got := err.Error() + assert.Equal(t, want, got) + } + } +} + +func TestRouterDatabaseACL(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + assert.NotNil(t, router) + + // Not ok. + { + sysDB := []string{"SYS", "MYSQL", "performance_schema", "information_schema"} + for _, sys := range sysDB { + err := router.DatabaseACL(sys) + assert.NotNil(t, err) + } + } + + // OK. + { + sysDB := []string{"SYS1", "MYSQL1", "performance_schema1", "information_schema1"} + for _, sys := range sysDB { + err := router.DatabaseACL(sys) + assert.Nil(t, err) + } + } +} + +func TestRouterTableConfig(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + router, cleanup := MockNewRouter(log) + defer cleanup() + assert.NotNil(t, router) + + // add router of sbtest.A + { + err := router.add("sbtest", MockTableAConfig()) + assert.Nil(t, err) + + tConf, err := router.TableConfig("sbtest", "A") + assert.Nil(t, err) + assert.NotNil(t, tConf) + } +} diff --git a/src/router/types.go b/src/router/types.go new file mode 100644 index 00000000..daabf985 --- /dev/null +++ b/src/router/types.go @@ -0,0 +1,17 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package router + +// MethodType type. +type MethodType string + +const ( + // methodTypeHash type. + methodTypeHash = "HASH" +) diff --git a/src/syncer/meta.go b/src/syncer/meta.go new file mode 100644 index 00000000..1136a7bb --- /dev/null +++ b/src/syncer/meta.go @@ -0,0 +1,187 @@ +/* + * Radon + * + * Copyright (c) 2017 QingCloud.com. + * Code is licensed under the GPLv3. + * + */ + +package syncer + +import ( + "config" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + "time" + "xbase" + + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/xlog" +) + +const ( + // metaRestURL url. + metaRestURL = "v1/meta/metas" + + // versionRestURL url. + versionRestURL = "v1/meta/versions" +) + +// Meta tuple. +type Meta struct { + Metas map[string]string `json:"metas"` +} + +// readFile used to read file from disk. +func readFile(log *xlog.Log, file string) (string, error) { + data, err := ioutil.ReadFile(file) + if err != nil { + log.Error("syncer.meta.json.read.file[%s].error:%+v", file, err) + return "", err + } + return common.BytesToString(data), nil +} + +// writeFile used to write file to disk. +func writeFile(log *xlog.Log, file string, data string) error { + err := xbase.WriteFile(file, common.StringToBytes(data)) + if err != nil { + log.Error("syncer.write.file[%s].error:%+v", file, err) + return err + } + return nil +} + +// MetaVersion returns the meta version. +func (s *Syncer) MetaVersion() int64 { + s.mu.Lock() + defer s.mu.Unlock() + return config.ReadVersion(s.metadir) +} + +// MetaVersionCheck used to check the version is synced or not. +func (s *Syncer) MetaVersionCheck() (bool, []string) { + s.mu.Lock() + defer s.mu.Unlock() + + log := s.log + maxVer := int64(0) + self := s.peer.self + peers := s.peer.Clone() + for _, peer := range peers { + if peer != self { + versionURL := "http://" + path.Join(peer, versionRestURL) + peerVerStr, err := xbase.HTTPGet(versionURL) + if err != nil { + log.Error("syncer.check.version.get[%s].error:%+v", peerVerStr, err) + continue + } + + version := &config.Version{} + if err := json.Unmarshal([]byte(peerVerStr), version); err != nil { + log.Error("syncer.version.unmarshal[%s].error:%+v", peerVerStr, err) + return false, s.peer.peers + } + peerVer := version.Ts + if peerVer > maxVer { + maxVer = peerVer + } + } + } + + selfVer := config.ReadVersion(s.metadir) + if maxVer > selfVer { + return false, s.peer.peers + } + return true, s.peer.peers +} + +// MetaJSON used to get the meta(in json) from the metadir. +func (s *Syncer) MetaJSON() (*Meta, error) { + s.mu.Lock() + defer s.mu.Unlock() + + log := s.log + meta := &Meta{ + Metas: make(map[string]string), + } + + if err := filepath.Walk(s.metadir, func(path string, info os.FileInfo, err error) error { + if err != nil { + log.Error("syncer.meta.json.walk.read.file[%s].error:%+v", path, err) + return err + } + + if !info.IsDir() { + file := strings.TrimPrefix(strings.TrimPrefix(path, s.metadir), "/") + data, err := readFile(log, path) + if err != nil { + log.Error("syncer.meta.json.walk.read.file[%s].error:%+v", path, err) + return err + } + meta.Metas[file] = data + } + return nil + }); err != nil { + return nil, err + } + log.Warning("syncer.get.meta.json:%+v", meta.Metas) + return meta, nil +} + +// MetaRebuild use to re-build the metadir infos from the meta json. +func (s *Syncer) MetaRebuild(meta *Meta) { + s.mu.Lock() + defer s.mu.Unlock() + + log := s.log + baseDir := path.Dir(strings.TrimSuffix(s.metadir, "/")) + backupName := fmt.Sprintf("_backup_%s_%v", path.Base(s.metadir), time.Now().UTC().Format("20060102150405.000")) + backupMetaDir := path.Join(baseDir, backupName) + log.Warning("syncer.meta.rebuild.mv.metadir.from[%s].to[%s]...", s.metadir, backupMetaDir) + if err := os.Rename(s.metadir, backupMetaDir); err != nil { + log.Panicf("syncer.rebuild.rename.metadir.from[%s].to[%s].error:%v", s.metadir, backupMetaDir, err) + } + + log.Warning("syncer.meta.rebuild.json:%+v", meta.Metas) + for name, data := range meta.Metas { + file := path.Join(s.metadir, name) + dir := filepath.Dir(file) + if _, err := os.Stat(dir); os.IsNotExist(err) { + log.Warning("syncer.meta.rebuild.mkdir[%s]...", dir) + if x := os.MkdirAll(dir, 0777); x != nil { + log.Panicf("syncer.meta.rebuild.mkdir[%v].error:%v", dir, x) + } + } + if err := writeFile(log, file, data); err != nil { + log.Panicf("syncer.meta.rebuild.mkdir[%v].error:%v", dir, err) + } + log.Warning("syncer.meta.rebuild.create.file[%s].done...", file) + } + log.Warning("syncer.meta.rebuild.all.done...") +} + +// MetaReload used to reload the config from metadir. +func (s *Syncer) MetaReload() error { + s.mu.Lock() + defer s.mu.Unlock() + + log := s.log + log.Warning("syncer.meta.reload.prepare...") + if err := s.scatter.LoadConfig(); err != nil { + log.Panicf("syncer.meta.scatter.load.config.error:%+v", err) + } + if err := s.router.LoadConfig(); err != nil { + log.Panicf("syncer.meta.router.load.config.error:%+v", err) + } + if err := s.peer.LoadConfig(); err != nil { + log.Panicf("syncer.meta.peer.load.config.error:%+v", err) + } + log.Warning("syncer.meta.reload.done...") + return nil +} diff --git a/src/syncer/meta_test.go b/src/syncer/meta_test.go new file mode 100644 index 00000000..aa756675 --- /dev/null +++ b/src/syncer/meta_test.go @@ -0,0 +1,134 @@ +/* + * Radon + * + * Copyright (c) 2017 QingCloud.com. + * Code is licensed under the GPLv3. + * + */ + +package syncer + +import ( + "config" + "os" + "path" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +const ( + testMetadir = "/tmp/radon_syncer_meta_test" +) + +func testRemoveMetadir() { + os.RemoveAll(testMetadir) + + deleteFiles := func(p string, f os.FileInfo, err error) (e error) { + if strings.HasPrefix(f.Name(), "_backup_radon") { + os.RemoveAll(p) + } + return + } + filepath.Walk(path.Dir(testMetadir), deleteFiles) +} + +func TestMeta(t *testing.T) { + defer testRemoveMetadir() + + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + syncer := NewSyncer(log, testMetadir, "", nil, nil) + assert.NotNil(t, syncer) + + err := syncer.Init() + assert.Nil(t, err) + + meta := &Meta{ + Metas: make(map[string]string), + } + + // Rebuild. + { + meta.Metas["backends.json"] = ("backends.json") + meta.Metas["version.json"] = ("12345") + meta.Metas["sbtest/t1.json"] = ("t1.json") + meta.Metas["sbtest/t2.json"] = ("t2.json") + meta.Metas["sbtest/t3.json"] = ("t2.json") + syncer.MetaRebuild(meta) + + hasBackup := false + checkFiles := func(p string, f os.FileInfo, err error) (e error) { + if strings.HasPrefix(f.Name(), "_backup_radon") { + hasBackup = true + return + } + return + } + filepath.Walk(path.Dir(testMetadir), checkFiles) + assert.True(t, hasBackup) + } + + // MetaJson. + { + got, err := syncer.MetaJSON() + assert.Nil(t, err) + assert.Equal(t, meta, got) + } + + // MetaVersion. + { + ver := syncer.MetaVersion() + assert.True(t, ver == 0) + } +} + +func TestMetaError(t *testing.T) { + defer testRemoveMetadir() + metadir := "/xx/radon_syncer_meta_test" + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + syncer := NewSyncer(log, metadir, "", nil, nil) + assert.NotNil(t, syncer) + + // MetaJson. + { + _, err := syncer.MetaJSON() + assert.NotNil(t, err) + } +} + +func TestMetaFileError(t *testing.T) { + defer testRemoveMetadir() + file := "/xx/radon_syncer_meta_test.xx" + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + { + _, err := readFile(log, file) + assert.NotNil(t, err) + } + + { + err := writeFile(log, file, "") + assert.NotNil(t, err) + } +} + +func TestMetaVersionCheck(t *testing.T) { + defer testRemoveMetadir() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + syncers, cleanup := mockSyncer(log, 3) + assert.NotNil(t, syncers) + defer cleanup() + + syncer0 := syncers[0] + syncer2 := syncers[2] + config.UpdateVersion(syncer2.metadir) + checked, _ := syncer0.MetaVersionCheck() + assert.False(t, checked) + + time.Sleep(time.Second * 2) + checked, _ = syncer0.MetaVersionCheck() + assert.True(t, checked) +} diff --git a/src/syncer/mock.go b/src/syncer/mock.go new file mode 100644 index 00000000..ef6471fa --- /dev/null +++ b/src/syncer/mock.go @@ -0,0 +1,147 @@ +package syncer + +import ( + "backend" + "config" + "context" + "crypto/sha1" + "fmt" + "net/http" + "os" + "path/filepath" + "router" + "time" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func mockSyncer(log *xlog.Log, n int) ([]*Syncer, func()) { + var peers []string + var httpServers []*http.Server + var syncers []*Syncer + for i := 0; i < n; i++ { + metadir := fmt.Sprintf("/tmp/radon_test_syncer_meta%d", i) + os.Mkdir(metadir, 0777) + peerAddr := fmt.Sprintf("127.0.0.1:%d", 8081+i) + + // scatter. + conf1 := backend.MockBackendConfigDefault(fmt.Sprintf("node%d", i), peerAddr) + scatter := backend.NewScatter(log, metadir) + if err := scatter.Add(conf1); err != nil { + log.Panicf("mock.syncer.error:%+v", err) + } + scatter.FlushConfig() + + // router. + router := router.NewRouter(log, metadir, config.DefaultRouterConfig()) + db := fmt.Sprintf("sbtest%d", i) + tbl := fmt.Sprintf("t%d", i) + if err := router.CreateTable(db, tbl, "id", []string{peerAddr}); err != nil { + log.Panicf("mock.syncer.error:%+v", err) + } + + syncer := NewSyncer(log, metadir, peerAddr, router, scatter) + syncer.Init() + syncers = append(syncers, syncer) + peers = append(peers, peerAddr) + httpSvr := mockHTTP(log, syncer, mockVersions, mockMetas) + httpServers = append(httpServers, httpSvr) + } + + // Add peers for each syncer. + for _, syncer := range syncers { + for _, peer := range peers { + if err := syncer.AddPeer(peer); err != nil { + log.Panicf("mock.syncer.error:%+v", err) + } + } + } + + return syncers, func() { + // Check the SHA of the syncers's metadir. + var oldSha1 [20]byte + for i := 0; i < n; i++ { + syncer := syncers[i] + sha1 := mockSHA(log, syncer) + if i != 0 { + if oldSha1 != sha1 { + log.Panic("syncer.mock.check.sha.error:oldsha1[%+v],sha1:[%+v]", oldSha1, sha1) + } + } + oldSha1 = sha1 + syncer.Close() + os.RemoveAll(syncer.metadir + "/") + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + httpServers[i].Shutdown(ctx) + } + } +} + +type mockHandler func(log *xlog.Log, syncer *Syncer) rest.HandlerFunc + +func mockHTTP(log *xlog.Log, syncer *Syncer, version mockHandler, metas mockHandler) *http.Server { + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + + router, err := rest.MakeRouter( + rest.Get("/v1/meta/versions", version(log, syncer)), + rest.Get("/v1/meta/metas", metas(log, syncer)), + ) + if err != nil { + log.Panicf("mock.rest.make.router.error:%+v", err) + } + api.SetApp(router) + handlers := api.MakeHandler() + h := &http.Server{Addr: syncer.peer.self, Handler: handlers} + go func() { + if err := h.ListenAndServe(); err != nil { + log.Error("mock.rest.error:%+v", err) + return + } + }() + time.Sleep(time.Millisecond * 100) + return h +} + +func mockVersions(log *xlog.Log, syncer *Syncer) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + version := &config.Version{ + Ts: config.ReadVersion(syncer.metadir), + } + log.Debug("syncer.mock.version.handle.call:%+v.", version) + w.WriteJson(version) + } + return f +} + +func mockMetas(log *xlog.Log, syncer *Syncer) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + meta, err := syncer.MetaJSON() + if err != nil { + log.Panicf("mock.metas.meta.json.error:%+v", err) + } + log.Debug("syncer.mock.metas.handle.call:%+v.", meta) + w.WriteJson(meta) + } + return f +} + +func mockSHA(log *xlog.Log, syncer *Syncer) [20]byte { + var datas []byte + if err := filepath.Walk(syncer.metadir, func(path string, info os.FileInfo, err error) error { + if !info.IsDir() { + data, err := readFile(log, path) + if err != nil { + log.Panicf("mock.sha.read.error:%+v", err) + } + datas = append(datas, []byte(data)...) + } + return nil + }); err != nil { + log.Panicf("mock.sha.read.error:%+v", err) + } + return sha1.Sum(datas) +} diff --git a/src/syncer/peer.go b/src/syncer/peer.go new file mode 100644 index 00000000..dd2329c0 --- /dev/null +++ b/src/syncer/peer.go @@ -0,0 +1,148 @@ +/* + * Radon + * + * Copyright (c) 2017 QingCloud.com. + * Code is licensed under the GPLv3. + * + */ + +package syncer + +import ( + "encoding/json" + "errors" + "os" + "path" + "sync" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +const ( + // peersJSONFile file name. + peersJSONFile = "peers.json" +) + +// Peer tuple. +type Peer struct { + log *xlog.Log + metadir string + peers []string + + // host:port + self string + mu sync.Mutex +} + +// NewPeer creates a new peer. +func NewPeer(log *xlog.Log, metadir string, self string) *Peer { + return &Peer{ + log: log, + self: self, + metadir: metadir, + } +} + +// LoadConfig used to load peers info from peersJSONFile. +func (p *Peer) LoadConfig() error { + p.mu.Lock() + defer p.mu.Unlock() + + log := p.log + file := path.Join(p.metadir, peersJSONFile) + if _, err := os.Stat(file); os.IsNotExist(err) { + // peers.json does not exists. + p.peers = append(p.peers, p.self) + return nil + } + peers, err := p.readJSON() + if err != nil { + return err + } + p.peers = peers + log.Warning("syncer.peer.load[%+v]", p.peers) + return p.writeJSON(p.peers) +} + +// Clone used to copy peers info. +func (p *Peer) Clone() []string { + p.mu.Lock() + defer p.mu.Unlock() + + var peers []string + peers = append(peers, p.peers...) + return peers +} + +// Add used to add a new peer to the peer list. +func (p *Peer) Add(peer string) error { + p.mu.Lock() + defer p.mu.Unlock() + + p.log.Warning("peer.add[%s]", peer) + if peer == "" { + return errors.New("add.peer.can.not.be.empty") + } + + for i := range p.peers { + if p.peers[i] == peer { + return nil + } + } + p.peers = append(p.peers, peer) + return p.writeJSON(p.peers) +} + +// Remove used to remove a peer from the peer list. +func (p *Peer) Remove(peer string) error { + p.mu.Lock() + defer p.mu.Unlock() + + p.log.Warning("peer.remove[%s]", peer) + for i := range p.peers { + if p.peers[i] == peer { + p.peers = append(p.peers[:i], p.peers[i+1:]...) + return p.writeJSON(p.peers) + } + } + return nil +} + +func (p *Peer) readJSON() ([]string, error) { + var peers []string + log := p.log + + file := path.Join(p.metadir, peersJSONFile) + buf, err := readFile(log, file) + if err != nil { + log.Error("syncer.peer.read.json[%s].error:%+v", file, err) + return nil, err + } + + err = json.Unmarshal([]byte(buf), &peers) + if err != nil { + log.Error("syncer.peer.unmarshal.json[%s].error:%+v", file, err) + return nil, err + } + log.Warning("syncer.peer.read.json[%s].peers[%+v]", file, peers) + return peers, nil +} + +func (p *Peer) writeJSON(peers []string) error { + log := p.log + + file := path.Join(p.metadir, peersJSONFile) + log.Warning("syncer.peer.write.json[%s].peers[%+v]", file, peers) + + peersJSON, err := json.Marshal(peers) + if err != nil { + log.Error("syncer.peer.marshal.json[%s].error:%+v", file, err) + return err + } + + if err := writeFile(log, file, string(peersJSON)); err != nil { + log.Error("syncer.peer.write.json[%s].error:%+v", file, err) + return err + } + return nil +} diff --git a/src/syncer/peer_test.go b/src/syncer/peer_test.go new file mode 100644 index 00000000..02f1e9ca --- /dev/null +++ b/src/syncer/peer_test.go @@ -0,0 +1,85 @@ +/* + * Radon + * + * Copyright (c) 2017 QingCloud.com. + * Code is licensed under the GPLv3. + * + */ + +package syncer + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestPeer(t *testing.T) { + defer testRemoveMetadir() + defer os.RemoveAll("/tmp/peers.json") + log := xlog.NewStdLog(xlog.Level(xlog.DEBUG)) + peer := NewPeer(log, "/tmp/", "192.168.0.1") + assert.NotNil(t, peer) + err := peer.LoadConfig() + assert.Nil(t, err) + + // Add peers. + { + peer.Add("192.168.0.2") + peer.Add("192.168.0.3") + peer.Add("192.168.0.4") + + want := []string{ + "192.168.0.1", + "192.168.0.2", + "192.168.0.3", + "192.168.0.4", + } + got := peer.peers + assert.Equal(t, want, got) + } + + // Remove peers. + { + peer.Remove("192.168.0.3") + peer.Remove("192.168.0.4") + + want := []string{ + "192.168.0.1", + "192.168.0.2", + } + got := peer.peers + assert.Equal(t, want, got) + } + + // Load. + { + err := peer.LoadConfig() + assert.Nil(t, err) + want := []string{ + "192.168.0.1", + "192.168.0.2", + } + got := peer.peers + assert.Equal(t, want, got) + } +} + +func TestPeerError(t *testing.T) { + defer testRemoveMetadir() + log := xlog.NewStdLog(xlog.Level(xlog.DEBUG)) + peer := NewPeer(log, "/", "192.168.0.1") + { + err := peer.Add("192.168.0.2") + assert.NotNil(t, err) + } + + // Add empty peer. + { + err := peer.Add("") + assert.NotNil(t, err) + } + +} diff --git a/src/syncer/syncer.go b/src/syncer/syncer.go new file mode 100644 index 00000000..2b4353cb --- /dev/null +++ b/src/syncer/syncer.go @@ -0,0 +1,161 @@ +/* + * Radon + * + * Copyright (c) 2017 QingCloud.com. + * Code is licensed under the GPLv3. + * + */ + +package syncer + +import ( + "backend" + "config" + "encoding/json" + "os" + "path" + "router" + "sync" + "time" + "xbase" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +// Syncer tuple. +type Syncer struct { + mu sync.RWMutex + wg sync.WaitGroup + log *xlog.Log + done chan bool + peer *Peer + metadir string + ticker *time.Ticker + router *router.Router + scatter *backend.Scatter +} + +// NewSyncer creates the new syncer. +func NewSyncer(log *xlog.Log, metadir string, peerAddr string, router *router.Router, scatter *backend.Scatter) *Syncer { + return &Syncer{ + log: log, + metadir: metadir, + router: router, + scatter: scatter, + done: make(chan bool), + peer: NewPeer(log, metadir, peerAddr), + ticker: time.NewTicker(time.Duration(time.Millisecond * 500)), // 0.5s + } +} + +// Init used to load the peers from the file and start the check thread. +func (s *Syncer) Init() error { + log := s.log + + log.Info("syncer.init.metadir:%v", s.metadir) + if err := os.MkdirAll(s.metadir, os.ModePerm); err != nil { + return err + } + + // Peers. + if err := s.peer.LoadConfig(); err != nil { + return err + } + log.Info("syncer.init.peers:%v", s.peer.peers) + + s.wg.Add(1) + go func() { + defer s.wg.Done() + defer s.ticker.Stop() + + for { + select { + case <-s.ticker.C: + s.check() + case <-s.done: + return + } + } + }() + log.Info("syncer.init.done") + return nil +} + +// Close used to close the syncer. +func (s *Syncer) Close() { + close(s.done) + s.wg.Wait() +} + +// AddPeer used to add new peer to syncer. +func (s *Syncer) AddPeer(peer string) error { + return s.peer.Add(peer) +} + +// RemovePeer used to remove peer from syncer. +func (s *Syncer) RemovePeer(peer string) error { + return s.peer.Remove(peer) +} + +// Peers returns all the peers. +func (s *Syncer) Peers() []string { + return s.peer.Clone() +} + +// RLock used to acquire the lock of syncer. +func (s *Syncer) RLock() { + s.mu.RLock() +} + +// RUnlock used to release the lock of syncer. +func (s *Syncer) RUnlock() { + s.mu.RUnlock() +} + +func (s *Syncer) check() { + log := s.log + maxVer := int64(0) + maxPeer := "" + self := s.peer.self + peers := s.peer.Clone() + for _, peer := range peers { + if peer != self { + versionURL := "http://" + path.Join(peer, versionRestURL) + peerVerStr, err := xbase.HTTPGet(versionURL) + if err != nil { + log.Error("syncer.check.version.get[%s].error:%+v", peerVerStr, err) + continue + } + + version := &config.Version{} + if err := json.Unmarshal([]byte(peerVerStr), version); err != nil { + log.Error("syncer.version.unmarshal[%s].error:%+v", peerVerStr, err) + return + } + peerVer := version.Ts + if peerVer > maxVer { + maxVer = peerVer + maxPeer = peer + } + } + } + + selfVer := config.ReadVersion(s.metadir) + if maxVer > selfVer { + log.Warning("syncer.version[%v,%s].larger.than.self[%v, %s]", maxVer, maxPeer, selfVer, self) + metaURL := "http://" + path.Join(maxPeer, metaRestURL) + metaStr, err := xbase.HTTPGet(metaURL) + if err != nil { + log.Error("syncer.check.meta.get[%s].error:%+v", metaStr, err) + return + } + + meta := &Meta{} + if err := json.Unmarshal([]byte(metaStr), meta); err != nil { + log.Error("syncer.check.meta.unmarshal[%s].error:%+v", metaStr, err) + return + } + s.MetaRebuild(meta) + s.MetaReload() + } +} diff --git a/src/syncer/syncer_test.go b/src/syncer/syncer_test.go new file mode 100644 index 00000000..c4ef2644 --- /dev/null +++ b/src/syncer/syncer_test.go @@ -0,0 +1,77 @@ +/* + * Radon + * + * Copyright (c) 2017 QingCloud.com. + * Code is licensed under the GPLv3. + * + */ + +package syncer + +import ( + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestSyncer(t *testing.T) { + defer leaktest.Check(t)() + defer testRemoveMetadir() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + syncers, cleanup := mockSyncer(log, 3) + assert.NotNil(t, syncers) + time.Sleep(time.Second * 2) + defer cleanup() +} + +func TestSyncerLock(t *testing.T) { + defer leaktest.Check(t)() + defer testRemoveMetadir() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + syncers, cleanup := mockSyncer(log, 1) + assert.NotNil(t, syncers) + defer cleanup() + + syncers[0].RLock() + time.Sleep(10000) + syncers[0].RUnlock() +} + +func TestSyncerAddRemovePeers(t *testing.T) { + defer leaktest.Check(t)() + defer testRemoveMetadir() + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + syncers, cleanup := mockSyncer(log, 1) + assert.NotNil(t, syncers) + defer cleanup() + + syncer := syncers[0] + + // Add. + { + syncer.AddPeer("127.0.0.1:9901") + syncer.AddPeer("127.0.0.1:9902") + + want := []string{"127.0.0.1:8081", "127.0.0.1:9901", "127.0.0.1:9902"} + got := syncer.peer.peers + assert.Equal(t, want, got) + } + + // Remove. + { + syncer.RemovePeer("127.0.0.1:9901") + + want := []string{"127.0.0.1:8081", "127.0.0.1:9902"} + got := syncer.peer.peers + assert.Equal(t, want, got) + } + + { + want := []string{"127.0.0.1:8081", "127.0.0.1:9902"} + got := syncer.Peers() + assert.Equal(t, want, got) + } +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/.travis.yml b/src/vendor/github.com/ant0ine/go-json-rest/.travis.yml new file mode 100644 index 00000000..f6ca1c97 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/.travis.yml @@ -0,0 +1,9 @@ +sudo: false +language: go +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - 1.8 diff --git a/src/vendor/github.com/ant0ine/go-json-rest/LICENSE b/src/vendor/github.com/ant0ine/go-json-rest/LICENSE new file mode 100644 index 00000000..7800c4b8 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) 2013-2016 Antoine Imbert + +The MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/github.com/ant0ine/go-json-rest/README.md b/src/vendor/github.com/ant0ine/go-json-rest/README.md new file mode 100644 index 00000000..b57663d0 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/README.md @@ -0,0 +1,1806 @@ + +# Go-Json-Rest + +*A quick and easy way to setup a RESTful JSON API* + +[![godoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/ant0ine/go-json-rest/rest) [![license](https://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/ant0ine/go-json-rest/master/LICENSE) [![build](https://img.shields.io/travis/ant0ine/go-json-rest.svg?style=flat)](https://travis-ci.org/ant0ine/go-json-rest) + + +**Go-Json-Rest** is a thin layer on top of `net/http` that helps building RESTful JSON APIs easily. It provides fast and scalable request routing using a Trie based implementation, helpers to deal with JSON requests and responses, and middlewares for functionalities like CORS, Auth, Gzip, Status ... + + +## Table of content + +- [Features](#features) +- [Install](#install) +- [Vendoring](#vendoring) +- [Middlewares](#middlewares) +- [Examples](#examples) + - [Basics](#basics) + - [Hello World!](#hello-world) + - [Lookup](#lookup) + - [Countries](#countries) + - [Users](#users) + - [Applications](#applications) + - [API and static files](#api-and-static-files) + - [GORM](#gorm) + - [CORS](#cors) + - [JSONP](#jsonp) + - [Basic Auth](#basic-auth) + - [Force HTTPS](#forcessl) + - [Status](#status) + - [Status Auth](#status-auth) + - [Advanced](#advanced) + - [JWT](#jwt) + - [Streaming](#streaming) + - [Non JSON payload](#non-json-payload) + - [API Versioning](#api-versioning) + - [Statsd](#statsd) + - [NewRelic](#newrelic) + - [Graceful Shutdown](#graceful-shutdown) + - [SPDY](#spdy) + - [Google App Engine](#gae) + - [Websocket](#websocket) +- [External Documentation](#external-documentation) +- [Version 3 release notes](#version-3-release-notes) +- [Migration guide from v2 to v3](#migration-guide-from-v2-to-v3) +- [Version 2 release notes](#version-2-release-notes) +- [Migration guide from v1 to v2](#migration-guide-from-v1-to-v2) +- [Thanks](#thanks) + + +## Features + +- Many examples. +- Fast and scalable URL routing. It implements the classic route description syntax using a Trie data structure. +- Architecture based on a router(App) sitting on top of a stack of Middlewares. +- The Middlewares implement functionalities like Logging, Gzip, CORS, Auth, Status, ... +- Implemented as a `net/http` Handler. This standard interface allows combinations with other Handlers. +- Test package to help writing tests for your API. +- Monitoring statistics inspired by Memcached. + + +## Install + +This package is "go-gettable", just do: + + go get github.com/ant0ine/go-json-rest/rest + + +## Vendoring + +The recommended way of using this library in your project is to use the **"vendoring"** method, +where this library code is copied in your repository at a specific revision. +[This page](https://nathany.com/go-packages/) is a good summary of package management in Go. + + +## Middlewares + +Core Middlewares: + +| Name | Description | +|------|-------------| +| **AccessLogApache** | Access log inspired by Apache mod_log_config | +| **AccessLogJson** | Access log with records as JSON | +| **AuthBasic** | Basic HTTP auth | +| **ContentTypeChecker** | Verify the request content type | +| **Cors** | CORS server side implementation | +| **Gzip** | Compress the responses | +| **If** | Conditionally execute a Middleware at runtime | +| **JsonIndent** | Easy to read JSON | +| **Jsonp** | Response as JSONP | +| **PoweredBy** | Manage the X-Powered-By response header | +| **Recorder** | Record the status code and content length in the Env | +| **Status** | Memecached inspired stats about the requests | +| **Timer** | Keep track of the elapsed time in the Env | + +Third Party Middlewares: + +| Name | Description | +|------|-------------| +| **[Statsd](https://github.com/ant0ine/go-json-rest-middleware-statsd)** | Send stats to a statsd server | +| **[JWT](https://github.com/StephanDollberg/go-json-rest-middleware-jwt)** | Provides authentication via Json Web Tokens | +| **[AuthToken](https://github.com/grayj/go-json-rest-middleware-tokenauth)** | Provides a Token Auth implementation | +| **[ForceSSL](https://github.com/jadengore/go-json-rest-middleware-force-ssl)** | Forces SSL on requests | +| **[SecureRedirect](https://github.com/clyphub/go-json-rest-middleware)** | Redirect clients from HTTP to HTTPS | + +*If you have a Go-Json-Rest compatible middleware, feel free to submit a PR to add it in this list, and in the examples.* + + +## Examples + +All the following examples can be found in dedicated examples repository: https://github.com/ant0ine/go-json-rest-examples + +### Basics + +First examples to try, as an introduction to go-json-rest. + +#### Hello World! + +Tradition! + +curl demo: +``` sh +curl -i http://127.0.0.1:8080/ +``` + + +code: +``` go +package main + +import ( + "github.com/ant0ine/go-json-rest/rest" + "log" + "net/http" +) + +func main() { + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + api.SetApp(rest.AppSimple(func(w rest.ResponseWriter, r *rest.Request) { + w.WriteJson(map[string]string{"Body": "Hello World!"}) + })) + log.Fatal(http.ListenAndServe(":8080", api.MakeHandler())) +} + +``` + +#### Lookup + +Demonstrate how to use the relaxed placeholder (notation `#paramName`). +This placeholder matches everything until the first `/`, including `.` + +curl demo: +``` +curl -i http://127.0.0.1:8080/lookup/google.com +curl -i http://127.0.0.1:8080/lookup/notadomain +``` + +code: +``` go +package main + +import ( + "github.com/ant0ine/go-json-rest/rest" + "log" + "net" + "net/http" +) + +func main() { + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + router, err := rest.MakeRouter( + rest.Get("/lookup/#host", func(w rest.ResponseWriter, req *rest.Request) { + ip, err := net.LookupIP(req.PathParam("host")) + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteJson(&ip) + }), + ) + if err != nil { + log.Fatal(err) + } + api.SetApp(router) + log.Fatal(http.ListenAndServe(":8080", api.MakeHandler())) +} + +``` + +#### Countries + +Demonstrate simple POST GET and DELETE operations + +curl demo: +``` +curl -i -H 'Content-Type: application/json' \ + -d '{"Code":"FR","Name":"France"}' http://127.0.0.1:8080/countries +curl -i -H 'Content-Type: application/json' \ + -d '{"Code":"US","Name":"United States"}' http://127.0.0.1:8080/countries +curl -i http://127.0.0.1:8080/countries/FR +curl -i http://127.0.0.1:8080/countries/US +curl -i http://127.0.0.1:8080/countries +curl -i -X DELETE http://127.0.0.1:8080/countries/FR +curl -i http://127.0.0.1:8080/countries +curl -i -X DELETE http://127.0.0.1:8080/countries/US +curl -i http://127.0.0.1:8080/countries +``` + +code: +``` go +package main + +import ( + "github.com/ant0ine/go-json-rest/rest" + "log" + "net/http" + "sync" +) + +func main() { + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + router, err := rest.MakeRouter( + rest.Get("/countries", GetAllCountries), + rest.Post("/countries", PostCountry), + rest.Get("/countries/:code", GetCountry), + rest.Delete("/countries/:code", DeleteCountry), + ) + if err != nil { + log.Fatal(err) + } + api.SetApp(router) + log.Fatal(http.ListenAndServe(":8080", api.MakeHandler())) +} + +type Country struct { + Code string + Name string +} + +var store = map[string]*Country{} + +var lock = sync.RWMutex{} + +func GetCountry(w rest.ResponseWriter, r *rest.Request) { + code := r.PathParam("code") + + lock.RLock() + var country *Country + if store[code] != nil { + country = &Country{} + *country = *store[code] + } + lock.RUnlock() + + if country == nil { + rest.NotFound(w, r) + return + } + w.WriteJson(country) +} + +func GetAllCountries(w rest.ResponseWriter, r *rest.Request) { + lock.RLock() + countries := make([]Country, len(store)) + i := 0 + for _, country := range store { + countries[i] = *country + i++ + } + lock.RUnlock() + w.WriteJson(&countries) +} + +func PostCountry(w rest.ResponseWriter, r *rest.Request) { + country := Country{} + err := r.DecodeJsonPayload(&country) + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if country.Code == "" { + rest.Error(w, "country code required", 400) + return + } + if country.Name == "" { + rest.Error(w, "country name required", 400) + return + } + lock.Lock() + store[country.Code] = &country + lock.Unlock() + w.WriteJson(&country) +} + +func DeleteCountry(w rest.ResponseWriter, r *rest.Request) { + code := r.PathParam("code") + lock.Lock() + delete(store, code) + lock.Unlock() + w.WriteHeader(http.StatusOK) +} + +``` + +#### Users + +Demonstrate how to use Method Values. + +Method Values have been [introduced in Go 1.1](https://golang.org/doc/go1.1#method_values). + +This shows how to map a Route to a method of an instantiated object (i.e: receiver of the method) + +curl demo: +``` +curl -i -H 'Content-Type: application/json' \ + -d '{"Name":"Antoine"}' http://127.0.0.1:8080/users +curl -i http://127.0.0.1:8080/users/0 +curl -i -X PUT -H 'Content-Type: application/json' \ + -d '{"Name":"Antoine Imbert"}' http://127.0.0.1:8080/users/0 +curl -i -X DELETE http://127.0.0.1:8080/users/0 +curl -i http://127.0.0.1:8080/users +``` + +code: +``` go +package main + +import ( + "fmt" + "github.com/ant0ine/go-json-rest/rest" + "log" + "net/http" + "sync" +) + +func main() { + + users := Users{ + Store: map[string]*User{}, + } + + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + router, err := rest.MakeRouter( + rest.Get("/users", users.GetAllUsers), + rest.Post("/users", users.PostUser), + rest.Get("/users/:id", users.GetUser), + rest.Put("/users/:id", users.PutUser), + rest.Delete("/users/:id", users.DeleteUser), + ) + if err != nil { + log.Fatal(err) + } + api.SetApp(router) + log.Fatal(http.ListenAndServe(":8080", api.MakeHandler())) +} + +type User struct { + Id string + Name string +} + +type Users struct { + sync.RWMutex + Store map[string]*User +} + +func (u *Users) GetAllUsers(w rest.ResponseWriter, r *rest.Request) { + u.RLock() + users := make([]User, len(u.Store)) + i := 0 + for _, user := range u.Store { + users[i] = *user + i++ + } + u.RUnlock() + w.WriteJson(&users) +} + +func (u *Users) GetUser(w rest.ResponseWriter, r *rest.Request) { + id := r.PathParam("id") + u.RLock() + var user *User + if u.Store[id] != nil { + user = &User{} + *user = *u.Store[id] + } + u.RUnlock() + if user == nil { + rest.NotFound(w, r) + return + } + w.WriteJson(user) +} + +func (u *Users) PostUser(w rest.ResponseWriter, r *rest.Request) { + user := User{} + err := r.DecodeJsonPayload(&user) + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + u.Lock() + id := fmt.Sprintf("%d", len(u.Store)) // stupid + user.Id = id + u.Store[id] = &user + u.Unlock() + w.WriteJson(&user) +} + +func (u *Users) PutUser(w rest.ResponseWriter, r *rest.Request) { + id := r.PathParam("id") + u.Lock() + if u.Store[id] == nil { + rest.NotFound(w, r) + u.Unlock() + return + } + user := User{} + err := r.DecodeJsonPayload(&user) + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + u.Unlock() + return + } + user.Id = id + u.Store[id] = &user + u.Unlock() + w.WriteJson(&user) +} + +func (u *Users) DeleteUser(w rest.ResponseWriter, r *rest.Request) { + id := r.PathParam("id") + u.Lock() + delete(u.Store, id) + u.Unlock() + w.WriteHeader(http.StatusOK) +} + +``` + + +### Applications + +Common use cases, found in many applications. + +#### API and static files + +Combine Go-Json-Rest with other handlers. + +`api.MakeHandler()` is a valid `http.Handler`, and can be combined with other handlers. +In this example the api handler is used under the `/api/` prefix, while a FileServer is instantiated under the `/static/` prefix. + +curl demo: +``` +curl -i http://127.0.0.1:8080/api/message +curl -i http://127.0.0.1:8080/static/main.go +``` + +code: +``` go +package main + +import ( + "github.com/ant0ine/go-json-rest/rest" + "log" + "net/http" +) + +func main() { + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + + router, err := rest.MakeRouter( + rest.Get("/message", func(w rest.ResponseWriter, req *rest.Request) { + w.WriteJson(map[string]string{"Body": "Hello World!"}) + }), + ) + if err != nil { + log.Fatal(err) + } + api.SetApp(router) + + http.Handle("/api/", http.StripPrefix("/api", api.MakeHandler())) + + http.Handle("/static/", http.StripPrefix("/static", http.FileServer(http.Dir(".")))) + + log.Fatal(http.ListenAndServe(":8080", nil)) +} + +``` + +#### GORM + +Demonstrate basic CRUD operation using a store based on MySQL and GORM + +[GORM](https://github.com/jinzhu/gorm) is simple ORM library for Go. +In this example the same struct is used both as the GORM model and as the JSON model. + +curl demo: +``` +curl -i -H 'Content-Type: application/json' \ + -d '{"Message":"this is a test"}' http://127.0.0.1:8080/reminders +curl -i http://127.0.0.1:8080/reminders/1 +curl -i http://127.0.0.1:8080/reminders +curl -i -X PUT -H 'Content-Type: application/json' \ + -d '{"Message":"is updated"}' http://127.0.0.1:8080/reminders/1 +curl -i -X DELETE http://127.0.0.1:8080/reminders/1 +``` + +code: +``` go +package main + +import ( + "github.com/ant0ine/go-json-rest/rest" + _ "github.com/go-sql-driver/mysql" + "github.com/jinzhu/gorm" + "log" + "net/http" + "time" +) + +func main() { + + i := Impl{} + i.InitDB() + i.InitSchema() + + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + router, err := rest.MakeRouter( + rest.Get("/reminders", i.GetAllReminders), + rest.Post("/reminders", i.PostReminder), + rest.Get("/reminders/:id", i.GetReminder), + rest.Put("/reminders/:id", i.PutReminder), + rest.Delete("/reminders/:id", i.DeleteReminder), + ) + if err != nil { + log.Fatal(err) + } + api.SetApp(router) + log.Fatal(http.ListenAndServe(":8080", api.MakeHandler())) +} + +type Reminder struct { + Id int64 `json:"id"` + Message string `sql:"size:1024" json:"message"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + DeletedAt time.Time `json:"-"` +} + +type Impl struct { + DB *gorm.DB +} + +func (i *Impl) InitDB() { + var err error + i.DB, err = gorm.Open("mysql", "gorm:gorm@/gorm?charset=utf8&parseTime=True") + if err != nil { + log.Fatalf("Got error when connect database, the error is '%v'", err) + } + i.DB.LogMode(true) +} + +func (i *Impl) InitSchema() { + i.DB.AutoMigrate(&Reminder{}) +} + +func (i *Impl) GetAllReminders(w rest.ResponseWriter, r *rest.Request) { + reminders := []Reminder{} + i.DB.Find(&reminders) + w.WriteJson(&reminders) +} + +func (i *Impl) GetReminder(w rest.ResponseWriter, r *rest.Request) { + id := r.PathParam("id") + reminder := Reminder{} + if i.DB.First(&reminder, id).Error != nil { + rest.NotFound(w, r) + return + } + w.WriteJson(&reminder) +} + +func (i *Impl) PostReminder(w rest.ResponseWriter, r *rest.Request) { + reminder := Reminder{} + if err := r.DecodeJsonPayload(&reminder); err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if err := i.DB.Save(&reminder).Error; err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteJson(&reminder) +} + +func (i *Impl) PutReminder(w rest.ResponseWriter, r *rest.Request) { + + id := r.PathParam("id") + reminder := Reminder{} + if i.DB.First(&reminder, id).Error != nil { + rest.NotFound(w, r) + return + } + + updated := Reminder{} + if err := r.DecodeJsonPayload(&updated); err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + reminder.Message = updated.Message + + if err := i.DB.Save(&reminder).Error; err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteJson(&reminder) +} + +func (i *Impl) DeleteReminder(w rest.ResponseWriter, r *rest.Request) { + id := r.PathParam("id") + reminder := Reminder{} + if i.DB.First(&reminder, id).Error != nil { + rest.NotFound(w, r) + return + } + if err := i.DB.Delete(&reminder).Error; err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) +} + +``` + +#### CORS + +Demonstrate how to setup CorsMiddleware around all the API endpoints. + +curl demo: +``` +curl -i http://127.0.0.1:8080/countries +``` + +code: +``` go +package main + +import ( + "github.com/ant0ine/go-json-rest/rest" + "log" + "net/http" +) + +func main() { + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + api.Use(&rest.CorsMiddleware{ + RejectNonCorsRequests: false, + OriginValidator: func(origin string, request *rest.Request) bool { + return origin == "http://my.other.host" + }, + AllowedMethods: []string{"GET", "POST", "PUT"}, + AllowedHeaders: []string{ + "Accept", "Content-Type", "X-Custom-Header", "Origin"}, + AccessControlAllowCredentials: true, + AccessControlMaxAge: 3600, + }) + router, err := rest.MakeRouter( + rest.Get("/countries", GetAllCountries), + ) + if err != nil { + log.Fatal(err) + } + api.SetApp(router) + log.Fatal(http.ListenAndServe(":8080", api.MakeHandler())) +} + +type Country struct { + Code string + Name string +} + +func GetAllCountries(w rest.ResponseWriter, r *rest.Request) { + w.WriteJson( + []Country{ + Country{ + Code: "FR", + Name: "France", + }, + Country{ + Code: "US", + Name: "United States", + }, + }, + ) +} + +``` + +#### JSONP + +Demonstrate how to use the JSONP middleware. + +curl demo: +``` sh +curl -i http://127.0.0.1:8080/ +curl -i http://127.0.0.1:8080/?cb=parseResponse +``` + +code: +``` go +package main + +import ( + "github.com/ant0ine/go-json-rest/rest" + "log" + "net/http" +) + +func main() { + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + api.Use(&rest.JsonpMiddleware{ + CallbackNameKey: "cb", + }) + api.SetApp(rest.AppSimple(func(w rest.ResponseWriter, r *rest.Request) { + w.WriteJson(map[string]string{"Body": "Hello World!"}) + })) + log.Fatal(http.ListenAndServe(":8080", api.MakeHandler())) +} + +``` + +#### Basic Auth + +Demonstrate how to setup AuthBasicMiddleware as a pre-routing middleware. + +curl demo: +``` +curl -i http://127.0.0.1:8080/ +curl -i -u admin:admin http://127.0.0.1:8080/ +``` + +code: +``` go +package main + +import ( + "github.com/ant0ine/go-json-rest/rest" + "log" + "net/http" +) + +func main() { + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + api.Use(&rest.AuthBasicMiddleware{ + Realm: "test zone", + Authenticator: func(userId string, password string) bool { + if userId == "admin" && password == "admin" { + return true + } + return false + }, + }) + api.SetApp(rest.AppSimple(func(w rest.ResponseWriter, r *rest.Request) { + w.WriteJson(map[string]string{"Body": "Hello World!"}) + })) + log.Fatal(http.ListenAndServe(":8080", api.MakeHandler())) +} + +``` + +#### ForceSSL + +Demonstrate how to use the [ForceSSL Middleware](https://github.com/jadengore/go-json-rest-middleware-force-ssl) to force HTTPS on requests to a `go-json-rest` API. + +For the purposes of this demo, we are using HTTP for all requests and checking the `X-Forwarded-Proto` header to see if it is set to HTTPS (many routers set this to show what type of connection the client is using, such as Heroku). To do a true HTTPS test, make sure and use [`http.ListenAndServeTLS`](https://golang.org/pkg/net/http/#ListenAndServeTLS) with a valid certificate and key file. + +Additional documentation for the ForceSSL middleware can be found [here](https://github.com/jadengore/go-json-rest-middleware-force-ssl). + +curl demo: +``` sh +curl -i 127.0.0.1:8080/ +curl -H "X-Forwarded-Proto:https" -i 127.0.0.1:8080/ +``` + +code: +``` go +package main + +import ( + "github.com/ant0ine/go-json-rest/rest" + "github.com/jadengore/go-json-rest-middleware-force-ssl" + "log" + "net/http" +) + +func main() { + api := rest.NewApi() + api.Use(&forceSSL.Middleware{ + TrustXFPHeader: true, + Enable301Redirects: false, + }) + api.SetApp(rest.AppSimple(func(w rest.ResponseWriter, r *rest.Request) { + w.WriteJson(map[string]string{"body": "Hello World!"}) + })) + + // For the purposes of this demo, only HTTP connections accepted. + // For true HTTPS, use ListenAndServeTLS. + // https://golang.org/pkg/net/http/#ListenAndServeTLS + log.Fatal(http.ListenAndServe(":8080", api.MakeHandler())) +} + +``` + +#### Status + +Demonstrate how to setup a `/.status` endpoint + +Inspired by memcached "stats", this optional feature can be enabled to help monitoring the service. +This example shows how to enable the stats, and how to setup the `/.status` route. + +curl demo: +``` +curl -i http://127.0.0.1:8080/.status +curl -i http://127.0.0.1:8080/.status +... +``` + +Output example: +``` +{ + "Pid": 21732, + "UpTime": "1m15.926272s", + "UpTimeSec": 75.926272, + "Time": "2013-03-04 08:00:27.152986 +0000 UTC", + "TimeUnix": 1362384027, + "StatusCodeCount": { + "200": 53, + "404": 11 + }, + "TotalCount": 64, + "TotalResponseTime": "16.777ms", + "TotalResponseTimeSec": 0.016777, + "AverageResponseTime": "262.14us", + "AverageResponseTimeSec": 0.00026214 +} +``` + +code: +``` go +package main + +import ( + "github.com/ant0ine/go-json-rest/rest" + "log" + "net/http" +) + +func main() { + api := rest.NewApi() + statusMw := &rest.StatusMiddleware{} + api.Use(statusMw) + api.Use(rest.DefaultDevStack...) + router, err := rest.MakeRouter( + rest.Get("/.status", func(w rest.ResponseWriter, r *rest.Request) { + w.WriteJson(statusMw.GetStatus()) + }), + ) + if err != nil { + log.Fatal(err) + } + api.SetApp(router) + log.Fatal(http.ListenAndServe(":8080", api.MakeHandler())) +} + +``` + +#### Status Auth + +Demonstrate how to setup a /.status endpoint protected with basic authentication. + +This is a good use case of middleware applied to only one API endpoint. + +curl demo: +``` +curl -i http://127.0.0.1:8080/countries +curl -i http://127.0.0.1:8080/.status +curl -i -u admin:admin http://127.0.0.1:8080/.status +... +``` + +code: +``` go +package main + +import ( + "github.com/ant0ine/go-json-rest/rest" + "log" + "net/http" +) + +func main() { + api := rest.NewApi() + statusMw := &rest.StatusMiddleware{} + api.Use(statusMw) + api.Use(rest.DefaultDevStack...) + auth := &rest.AuthBasicMiddleware{ + Realm: "test zone", + Authenticator: func(userId string, password string) bool { + if userId == "admin" && password == "admin" { + return true + } + return false + }, + } + router, err := rest.MakeRouter( + rest.Get("/countries", GetAllCountries), + rest.Get("/.status", auth.MiddlewareFunc( + func(w rest.ResponseWriter, r *rest.Request) { + w.WriteJson(statusMw.GetStatus()) + }, + )), + ) + if err != nil { + log.Fatal(err) + } + api.SetApp(router) + log.Fatal(http.ListenAndServe(":8080", api.MakeHandler())) +} + +type Country struct { + Code string + Name string +} + +func GetAllCountries(w rest.ResponseWriter, r *rest.Request) { + w.WriteJson( + []Country{ + Country{ + Code: "FR", + Name: "France", + }, + Country{ + Code: "US", + Name: "United States", + }, + }, + ) +} + +``` + + +### Advanced + +More advanced use cases. + +#### JWT + +Demonstrates how to use the [Json Web Token Auth Middleware](https://github.com/StephanDollberg/go-json-rest-middleware-jwt) to authenticate via a JWT token. + +curl demo: +``` sh +curl -d '{"username": "admin", "password": "admin"}' -H "Content-Type:application/json" http://localhost:8080/api/login +curl -H "Authorization:Bearer TOKEN_RETURNED_FROM_ABOVE" http://localhost:8080/api/auth_test +curl -H "Authorization:Bearer TOKEN_RETURNED_FROM_ABOVE" http://localhost:8080/api/refresh_token +``` + +code: +``` go +package main + +import ( + "log" + "net/http" + "time" + + "github.com/StephanDollberg/go-json-rest-middleware-jwt" + "github.com/ant0ine/go-json-rest/rest" +) + +func handle_auth(w rest.ResponseWriter, r *rest.Request) { + w.WriteJson(map[string]string{"authed": r.Env["REMOTE_USER"].(string)}) +} + +func main() { + jwt_middleware := &jwt.JWTMiddleware{ + Key: []byte("secret key"), + Realm: "jwt auth", + Timeout: time.Hour, + MaxRefresh: time.Hour * 24, + Authenticator: func(userId string, password string) bool { + return userId == "admin" && password == "admin" + }} + + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + // we use the IfMiddleware to remove certain paths from needing authentication + api.Use(&rest.IfMiddleware{ + Condition: func(request *rest.Request) bool { + return request.URL.Path != "/login" + }, + IfTrue: jwt_middleware, + }) + api_router, _ := rest.MakeRouter( + rest.Post("/login", jwt_middleware.LoginHandler), + rest.Get("/auth_test", handle_auth), + rest.Get("/refresh_token", jwt_middleware.RefreshHandler), + ) + api.SetApp(api_router) + + http.Handle("/api/", http.StripPrefix("/api", api.MakeHandler())) + + log.Fatal(http.ListenAndServe(":8080", nil)) +} + +``` + +#### Streaming + +Demonstrate a streaming REST API, where the data is "flushed" to the client ASAP. + +The stream format is a Line Delimited JSON. + +curl demo: +``` +curl -i http://127.0.0.1:8080/stream +``` + +Output: +``` +HTTP/1.1 200 OK +Content-Type: application/json +Date: Sun, 16 Feb 2014 00:39:19 GMT +Transfer-Encoding: chunked + +{"Name":"thing #1"} +{"Name":"thing #2"} +{"Name":"thing #3"} +``` + +code: +``` go +package main + +import ( + "fmt" + "github.com/ant0ine/go-json-rest/rest" + "log" + "net/http" + "time" +) + +func main() { + api := rest.NewApi() + api.Use(&rest.AccessLogApacheMiddleware{}) + api.Use(rest.DefaultCommonStack...) + router, err := rest.MakeRouter( + rest.Get("/stream", StreamThings), + ) + if err != nil { + log.Fatal(err) + } + api.SetApp(router) + log.Fatal(http.ListenAndServe(":8080", api.MakeHandler())) +} + +type Thing struct { + Name string +} + +func StreamThings(w rest.ResponseWriter, r *rest.Request) { + cpt := 0 + for { + cpt++ + w.WriteJson( + &Thing{ + Name: fmt.Sprintf("thing #%d", cpt), + }, + ) + w.(http.ResponseWriter).Write([]byte("\n")) + // Flush the buffer to client + w.(http.Flusher).Flush() + // wait 3 seconds + time.Sleep(time.Duration(3) * time.Second) + } +} + +``` + +#### Non JSON payload + +Exceptional use of non JSON payloads. + +The ResponseWriter implementation provided by go-json-rest is designed +to build JSON responses. In order to serve different kind of content, +it is recommended to either: +a) use another server and configure CORS + (see the cors/ example) +b) combine the api.MakeHandler() with another http.Handler + (see api-and-static/ example) + +That been said, exceptionally, it can be convenient to return a +different content type on a JSON endpoint. In this case, setting the +Content-Type and using the type assertion to access the Write method +is enough. As shown in this example. + +curl demo: +``` +curl -i http://127.0.0.1:8080/message.txt +``` + +code: +``` go +package main + +import ( + "github.com/ant0ine/go-json-rest/rest" + "log" + "net/http" +) + +func main() { + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + router, err := rest.MakeRouter( + rest.Get("/message.txt", func(w rest.ResponseWriter, req *rest.Request) { + w.Header().Set("Content-Type", "text/plain") + w.(http.ResponseWriter).Write([]byte("Hello World!")) + }), + ) + if err != nil { + log.Fatal(err) + } + api.SetApp(router) + log.Fatal(http.ListenAndServe(":8080", api.MakeHandler())) +} + +``` + +#### API Versioning + +First, API versioning is not easy and you may want to favor a mechanism that uses only backward compatible changes and deprecation cycles. + +That been said, here is an example of API versioning using [Semver](http://semver.org/) + +It defines a middleware that parses the version, checks a min and a max, and makes it available in the `request.Env`. + +curl demo: +``` sh +curl -i http://127.0.0.1:8080/api/1.0.0/message +curl -i http://127.0.0.1:8080/api/2.0.0/message +curl -i http://127.0.0.1:8080/api/2.0.1/message +curl -i http://127.0.0.1:8080/api/0.0.1/message +curl -i http://127.0.0.1:8080/api/4.0.1/message + +``` + +code: +``` go +package main + +import ( + "github.com/ant0ine/go-json-rest/rest" + "github.com/coreos/go-semver/semver" + "log" + "net/http" +) + +type SemVerMiddleware struct { + MinVersion string + MaxVersion string +} + +func (mw *SemVerMiddleware) MiddlewareFunc(handler rest.HandlerFunc) rest.HandlerFunc { + + minVersion, err := semver.NewVersion(mw.MinVersion) + if err != nil { + panic(err) + } + + maxVersion, err := semver.NewVersion(mw.MaxVersion) + if err != nil { + panic(err) + } + + return func(writer rest.ResponseWriter, request *rest.Request) { + + version, err := semver.NewVersion(request.PathParam("version")) + if err != nil { + rest.Error( + writer, + "Invalid version: "+err.Error(), + http.StatusBadRequest, + ) + return + } + + if version.LessThan(*minVersion) { + rest.Error( + writer, + "Min supported version is "+minVersion.String(), + http.StatusBadRequest, + ) + return + } + + if maxVersion.LessThan(*version) { + rest.Error( + writer, + "Max supported version is "+maxVersion.String(), + http.StatusBadRequest, + ) + return + } + + request.Env["VERSION"] = version + handler(writer, request) + } +} + +func main() { + + svmw := SemVerMiddleware{ + MinVersion: "1.0.0", + MaxVersion: "3.0.0", + } + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + router, err := rest.MakeRouter( + rest.Get("/#version/message", svmw.MiddlewareFunc( + func(w rest.ResponseWriter, req *rest.Request) { + version := req.Env["VERSION"].(*semver.Version) + if version.Major == 2 { + // https://en.wikipedia.org/wiki/Second-system_effect + w.WriteJson(map[string]string{ + "Body": "Hello broken World!", + }) + } else { + w.WriteJson(map[string]string{ + "Body": "Hello World!", + }) + } + }, + )), + ) + if err != nil { + log.Fatal(err) + } + api.SetApp(router) + http.Handle("/api/", http.StripPrefix("/api", api.MakeHandler())) + log.Fatal(http.ListenAndServe(":8080", nil)) +} + +``` + +#### Statsd + +Demonstrate how to use the [Statsd Middleware](https://github.com/ant0ine/go-json-rest-middleware-statsd) to collect statistics about the requests/reponses. +This middleware is based on the [g2s](https://github.com/peterbourgon/g2s) statsd client. + +curl demo: +``` sh +# start statsd server +# monitor network +ngrep -d any port 8125 + +curl -i http://127.0.0.1:8080/message +curl -i http://127.0.0.1:8080/doesnotexist + +``` + +code: +``` go +package main + +import ( + "github.com/ant0ine/go-json-rest-middleware-statsd" + "github.com/ant0ine/go-json-rest/rest" + "log" + "net/http" + "time" +) + +func main() { + api := rest.NewApi() + api.Use(&statsd.StatsdMiddleware{}) + api.Use(rest.DefaultDevStack...) + api.SetApp(rest.AppSimple(func(w rest.ResponseWriter, req *rest.Request) { + + // take more than 1ms so statsd can report it + time.Sleep(100 * time.Millisecond) + + w.WriteJson(map[string]string{"Body": "Hello World!"}) + })) + log.Fatal(http.ListenAndServe(":8080", api.MakeHandler())) +} + +``` + +#### NewRelic + +NewRelic integration based on the GoRelic plugin: [github.com/yvasiyarov/gorelic](https://github.com/yvasiyarov/gorelic) + +curl demo: +``` sh +curl -i http://127.0.0.1:8080/ +``` + +code: +``` go +package main + +import ( + "github.com/ant0ine/go-json-rest/rest" + "github.com/yvasiyarov/go-metrics" + "github.com/yvasiyarov/gorelic" + "log" + "net/http" + "time" +) + +type NewRelicMiddleware struct { + License string + Name string + Verbose bool + agent *gorelic.Agent +} + +func (mw *NewRelicMiddleware) MiddlewareFunc(handler rest.HandlerFunc) rest.HandlerFunc { + + mw.agent = gorelic.NewAgent() + mw.agent.NewrelicLicense = mw.License + mw.agent.HTTPTimer = metrics.NewTimer() + mw.agent.Verbose = mw.Verbose + mw.agent.NewrelicName = mw.Name + mw.agent.CollectHTTPStat = true + mw.agent.Run() + + return func(writer rest.ResponseWriter, request *rest.Request) { + + handler(writer, request) + + // the timer middleware keeps track of the time + startTime := request.Env["START_TIME"].(*time.Time) + mw.agent.HTTPTimer.UpdateSince(*startTime) + } +} + +func main() { + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + api.Use(&NewRelicMiddleware{ + License: "", + Name: "", + Verbose: true, + }) + api.SetApp(rest.AppSimple(func(w rest.ResponseWriter, r *rest.Request) { + w.WriteJson(map[string]string{"Body": "Hello World!"}) + })) + log.Fatal(http.ListenAndServe(":8080", api.MakeHandler())) +} + +``` + +#### Graceful Shutdown + +This example uses [https://github.com/tylerb/graceful](https://github.com/tylerb/graceful) to try to be nice with the clients waiting for responses during a server shutdown (or restart). +The HTTP response takes 10 seconds to be completed, printing a message on the wire every second. +10 seconds is also the timeout set for the graceful shutdown. +You can play with these numbers to show that the server waits for the responses to complete. + +curl demo: +``` sh +curl -i http://127.0.0.1:8080/message +``` + +code: +``` go +package main + +import ( + "fmt" + "github.com/ant0ine/go-json-rest/rest" + "gopkg.in/tylerb/graceful.v1" + "log" + "net/http" + "time" +) + +func main() { + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + router, err := rest.MakeRouter( + rest.Get("/message", func(w rest.ResponseWriter, req *rest.Request) { + for cpt := 1; cpt <= 10; cpt++ { + + // wait 1 second + time.Sleep(time.Duration(1) * time.Second) + + w.WriteJson(map[string]string{ + "Message": fmt.Sprintf("%d seconds", cpt), + }) + w.(http.ResponseWriter).Write([]byte("\n")) + + // Flush the buffer to client + w.(http.Flusher).Flush() + } + }), + ) + if err != nil { + log.Fatal(err) + } + api.SetApp(router) + + server := &graceful.Server{ + Timeout: 10 * time.Second, + Server: &http.Server{ + Addr: ":8080", + Handler: api.MakeHandler(), + }, + } + + log.Fatal(server.ListenAndServe()) +} + +``` + +#### SPDY + +Demonstrate how to use SPDY with https://github.com/shykes/spdy-go + +For a command line client, install spdycat from: +https://github.com/tatsuhiro-t/spdylay + +spdycat demo: +``` +spdycat -v --no-tls -2 http://localhost:8080/users/0 +``` + +code: +``` go +package main + +import ( + "github.com/ant0ine/go-json-rest/rest" + "github.com/shykes/spdy-go" + "log" +) + +type User struct { + Id string + Name string +} + +func GetUser(w rest.ResponseWriter, req *rest.Request) { + user := User{ + Id: req.PathParam("id"), + Name: "Antoine", + } + w.WriteJson(&user) +} + +func main() { + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + router, err := rest.MakeRouter( + rest.Get("/users/:id", GetUser), + ) + if err != nil { + log.Fatal(err) + } + api.SetApp(router) + log.Fatal(spdy.ListenAndServeTCP(":8080", api.MakeHandler())) +} + +``` + +#### GAE + +Demonstrate a simple Google App Engine app + +Here are my steps to make it work with the GAE SDK. +(Probably not the best ones) + +Assuming that go-json-rest is installed using "go get" +and that the GAE SDK is also installed. + +Setup: + * copy this examples/gae/ dir outside of the go-json-rest/ tree + * cd gae/ + * mkdir -p github.com/ant0ine + * cp -r $GOPATH/src/github.com/ant0ine/go-json-rest github.com/ant0ine/go-json-rest + * rm -rf github.com/ant0ine/go-json-rest/examples/ + * path/to/google_appengine/dev_appserver.py . + +curl demo: +``` +curl -i http://127.0.0.1:8080/message +``` + +code: +``` go +package gaehelloworld + +import ( + "github.com/ant0ine/go-json-rest/rest" + "log" + "net/http" +) + +func init() { + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + router, err := rest.MakeRouter( + &rest.Get("/message", func(w rest.ResponseWriter, req *rest.Request) { + w.WriteJson(map[string]string{"Body": "Hello World!"}) + }), + ) + if err != nil { + log.Fatal(err) + } + api.SetApp(router) + http.Handle("/", api.MakeHandler()) +} + +``` + +#### Websocket + +Demonstrate how to run websocket in go-json-rest + +go client demo: +```go +origin := "http://localhost:8080/" +url := "ws://localhost:8080/ws" +ws, err := websocket.Dial(url, "", origin) +if err != nil { + log.Fatal(err) +} +if _, err := ws.Write([]byte("hello, world\n")); err != nil { + log.Fatal(err) +} +var msg = make([]byte, 512) +var n int +if n, err = ws.Read(msg); err != nil { + log.Fatal(err) +} +log.Printf("Received: %s.", msg[:n]) +``` + +code: +``` go +package main + +import ( + "io" + "log" + "net/http" + + "github.com/ant0ine/go-json-rest/rest" + "golang.org/x/net/websocket" +) + +func main() { + wsHandler := websocket.Handler(func(ws *websocket.Conn) { + io.Copy(ws, ws) + }) + + router, err := rest.MakeRouter( + rest.Get("/ws", func(w rest.ResponseWriter, r *rest.Request) { + wsHandler.ServeHTTP(w.(http.ResponseWriter), r.Request) + }), + ) + if err != nil { + log.Fatal(err) + } + + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + api.SetApp(router) + log.Fatal(http.ListenAndServe(":8080", api.MakeHandler())) +} + +``` + + + +## External Documentation + +- [Online Documentation (godoc.org)](https://godoc.org/github.com/ant0ine/go-json-rest/rest) + +Old v1 blog posts: + +- [(Blog Post) Introducing Go-Json-Rest] (https://www.ant0ine.com/post/introducing-go-json-rest.html) +- [(Blog Post) Better URL Routing ?] (https://www.ant0ine.com/post/better-url-routing-golang.html) + + +## Version 3 release notes + +### What's New in v3 + +* Public Middlewares. (12 included in the package) +* A new App interface. (the router being the provided App) +* A new Api object that manages the Middlewares and the App. +* Optional and interchangeable App/router. + +### Here is for instance the new minimal "Hello World!" + +```go +api := rest.NewApi() +api.Use(rest.DefaultDevStack...) +api.SetApp(rest.AppSimple(func(w rest.ResponseWriter, r *rest.Request) { + w.WriteJson(map[string]string{"Body": "Hello World!"}) +})) +http.ListenAndServe(":8080", api.MakeHandler()) +``` + +*All 19 examples have been updated to use the new API. [See here](https://github.com/ant0ine/go-json-rest#examples)* + +### Deprecating the ResourceHandler + +V3 is about deprecating the ResourceHandler in favor of a new API that exposes the Middlewares. As a consequence, all the Middlewares are now public, and the new Api object helps putting them together as a stack. Some default stack configurations are offered. The router is now an App that sits on top of the stack of Middlewares. Which means that the router is no longer required to use Go-Json-Rest. + +*Design ideas and discussion [See here](https://github.com/ant0ine/go-json-rest/issues/110)* + + +## Migration guide from v2 to v3 + +V3 introduces an API change (see [Semver](http://semver.org/)). But it was possible to maintain backward compatibility, and so, ResourceHandler still works. +ResourceHandler does the same thing as in V2, **but it is now considered as deprecated, and will be removed in a few months**. In the meantime, it logs a +deprecation warning. + +### How to map the ResourceHandler options to the new stack of middlewares ? + +* `EnableGzip bool`: Just include GzipMiddleware in the stack of middlewares. +* `DisableJsonIndent bool`: Just don't include JsonIndentMiddleware in the stack of middlewares. +* `EnableStatusService bool`: Include StatusMiddleware in the stack and keep a reference to it to access GetStatus(). +* `EnableResponseStackTrace bool`: Same exact option but moved to RecoverMiddleware. +* `EnableLogAsJson bool`: Include AccessLogJsonMiddleware, and possibly remove AccessLogApacheMiddleware. +* `EnableRelaxedContentType bool`: Just don't include ContentTypeCheckerMiddleware. +* `OuterMiddlewares []Middleware`: You are now building the full stack, OuterMiddlewares are the first in the list. +* `PreRoutingMiddlewares []Middleware`: You are now building the full stack, OuterMiddlewares are the last in the list. +* `Logger *log.Logger`: Same option but moved to AccessLogApacheMiddleware and AccessLogJsonMiddleware. +* `LoggerFormat AccessLogFormat`: Same exact option but moved to AccessLogApacheMiddleware. +* `DisableLogger bool`: Just don't include any access log middleware. +* `ErrorLogger *log.Logger`: Same exact option but moved to RecoverMiddleware. +* `XPoweredBy string`: Same exact option but moved to PoweredByMiddleware. +* `DisableXPoweredBy bool`: Just don't include PoweredByMiddleware. + + +## Version 2 release notes + +* Middlewares, the notion of middleware is now formally defined. They can be setup as global pre-routing Middlewares wrapping all the endpoints, or on a per endpoint basis. +In fact the internal code of **go-json-rest** is itself implemented with Middlewares, they are just hidden behind configuration boolean flags to make these very common options even easier to use. + +* A new ResponseWriter. This is now an interface, and allows Middlewares to wrap the writer. The provided writer implements, in addition of *rest.ResponseWriter*, *http.Flusher*, *http.CloseNotifier*, *http.Hijacker*, and *http.ResponseWriter*. A lot more Go-ish, and very similar to `net/http`. + +* The AuthBasic and CORS Middlewares have been added. More to come in the future. + +* Faster, more tasks are performed at init time, and less for each request. + +* New documentation, with more examples. + +* A lot of other small improvements, See the [Migration guide to v2](#migration-guide-from-v1-to-v2) + + +## Migration guide from v1 to v2 + +**Go-Json-Rest** follows [Semver](http://semver.org/) and a few breaking changes have been introduced with the v2. + + +#### The import path has changed to `github.com/ant0ine/go-json-rest/rest` + +This is more conform to Go style, and makes [goimports](https://godoc.org/golang.org/x/tools/cmd/goimports) work. + +This: +``` go +import ( + "github.com/ant0ine/go-json-rest" +) +``` +has to be changed to this: +``` go +import ( + "github.com/ant0ine/go-json-rest/rest" +) +``` + + +#### rest.ResponseWriter is now an interface + +This change allows the `ResponseWriter` to be wrapped, like the one of the `net/http` package. +This is much more powerful, and allows the creation of Middlewares that wrap the writer. The gzip option, for instance, uses this to encode the payload (see gzip.go). + +This: +``` go +func (w *rest.ResponseWriter, req *rest.Request) { + ... +} +``` +has to be changed to this: +``` go +func (w rest.ResponseWriter, req *rest.Request) { + ... +} +``` + + +#### SetRoutes now takes pointers to Route + +Instead of copying Route structures everywhere, pointers are now used. This is more elegant, more efficient, and will allow more sophisticated Route manipulations in the future (like reverse route resolution). + +This: +``` go +handler.SetRoutes( + rest.Route{ + // ... + }, +) +``` +has to be changed to this: +``` go +handler.SetRoutes( + &rest.Route{ + // ... + }, +) +``` + + +#### The notion of Middleware is now formally defined + +A middleware is an object satisfying this interface: +``` go +type Middleware interface { + MiddlewareFunc(handler HandlerFunc) HandlerFunc +} +``` + +Code using PreRoutingMiddleware will have to be adapted to provide a list of Middleware objects. +See the [Basic Auth example](https://github.com/ant0ine/go-json-rest-examples/blob/master/auth-basic/main.go). + + +#### Flush(), CloseNotify() and Write() are not directly exposed anymore + +They used to be public methods of the ResponseWriter. The implementation is still there but a type assertion of the corresponding interface is now necessary. +Regarding these features, a rest.ResponseWriter now behaves exactly as the http.ResponseWriter implementation provided by net/http. + +This: +``` go +writer.Flush() +``` +has to be changed to this: +``` go +writer.(http.Flusher).Flush() +``` + + +#### The /.status endpoint is not created automatically anymore + +The route has to be manually defined. +See the [Status example](https://github.com/ant0ine/go-json-rest-examples/blob/master/status/main.go). +This is more flexible (the route is customizable), and allows combination with Middlewarres. +See for instance how to [protect this status endpoint with the AuthBasic middleware](https://github.com/ant0ine/go-json-rest-examples/blob/master/status-auth/main.go). + + +#### Request utility methods have changed + +Overall, they provide the same features, but with two methods instead of three, better names, and without the confusing `UriForWithParams`. + +- `func (r *Request) UriBase() url.URL` is now `func (r *Request) BaseUrl() *url.URL`, Note the pointer as the returned value. + +- `func (r *Request) UriForWithParams(path string, parameters map[string][]string) url.URL` is now `func (r *Request) UrlFor(path string, queryParams map[string][]string) *url.URL`. + +- `func (r *Request) UriFor(path string) url.URL` has be removed. + + +## Thanks + +- [Franck Cuny](https://github.com/franckcuny) +- [Yann Kerhervé](https://github.com/yannk) +- [Ask Bjørn Hansen](https://github.com/abh) +- [Paul Lam](https://github.com/Quantisan) +- [Thanabodee Charoenpiriyakij](https://github.com/wingyplus) +- [Sebastien Estienne](https://github.com/sebest) +- [Edward Bramanti](https://github.com/jadengore) + + +Copyright (c) 2013-2016 Antoine Imbert + +[MIT License](https://github.com/ant0ine/go-json-rest/blob/master/LICENSE) + +[![Analytics](https://ga-beacon.appspot.com/UA-309210-4/go-json-rest/master/readme)](https://github.com/igrigorik/ga-beacon) + diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/access_log_apache.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/access_log_apache.go new file mode 100644 index 00000000..1832b527 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/access_log_apache.go @@ -0,0 +1,236 @@ +package rest + +import ( + "bytes" + "fmt" + "log" + "net" + "os" + "strings" + "text/template" + "time" +) + +// TODO Future improvements: +// * support %{strftime}t ? +// * support %{
}o to print headers + +// AccessLogFormat defines the format of the access log record. +// This implementation is a subset of Apache mod_log_config. +// (See http://httpd.apache.org/docs/2.0/mod/mod_log_config.html) +// +// %b content length in bytes, - if 0 +// %B content length in bytes +// %D response elapsed time in microseconds +// %h remote address +// %H server protocol +// %l identd logname, not supported, - +// %m http method +// %P process id +// %q query string +// %r first line of the request +// %s status code +// %S status code preceeded by a terminal color +// %t time of the request +// %T response elapsed time in seconds, 3 decimals +// %u remote user, - if missing +// %{User-Agent}i user agent, - if missing +// %{Referer}i referer, - is missing +// +// Some predefined formats are provided as contants. +type AccessLogFormat string + +const ( + // CommonLogFormat is the Common Log Format (CLF). + CommonLogFormat = "%h %l %u %t \"%r\" %s %b" + + // CombinedLogFormat is the NCSA extended/combined log format. + CombinedLogFormat = "%h %l %u %t \"%r\" %s %b \"%{Referer}i\" \"%{User-Agent}i\"" + + // DefaultLogFormat is the default format, colored output and response time, convenient for development. + DefaultLogFormat = "%t %S\033[0m \033[36;1m%Dμs\033[0m \"%r\" \033[1;30m%u \"%{User-Agent}i\"\033[0m" +) + +// AccessLogApacheMiddleware produces the access log following a format inspired by Apache +// mod_log_config. It depends on TimerMiddleware and RecorderMiddleware that should be in the wrapped +// middlewares. It also uses request.Env["REMOTE_USER"].(string) set by the auth middlewares. +type AccessLogApacheMiddleware struct { + + // Logger points to the logger object used by this middleware, it defaults to + // log.New(os.Stderr, "", 0). + Logger *log.Logger + + // Format defines the format of the access log record. See AccessLogFormat for the details. + // It defaults to DefaultLogFormat. + Format AccessLogFormat + + textTemplate *template.Template +} + +// MiddlewareFunc makes AccessLogApacheMiddleware implement the Middleware interface. +func (mw *AccessLogApacheMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + + // set the default Logger + if mw.Logger == nil { + mw.Logger = log.New(os.Stderr, "", 0) + } + + // set default format + if mw.Format == "" { + mw.Format = DefaultLogFormat + } + + mw.convertFormat() + + return func(w ResponseWriter, r *Request) { + + // call the handler + h(w, r) + + //util := &accessLogUtil{w, r} + + //mw.Logger.Print(mw.executeTextTemplate(util)) + } +} + +var apacheAdapter = strings.NewReplacer( + "%b", "{{.BytesWritten | dashIf0}}", + "%B", "{{.BytesWritten}}", + "%D", "{{.ResponseTime | microseconds}}", + "%h", "{{.ApacheRemoteAddr}}", + "%H", "{{.R.Proto}}", + "%l", "-", + "%m", "{{.R.Method}}", + "%P", "{{.Pid}}", + "%q", "{{.ApacheQueryString}}", + "%r", "{{.R.Method}} {{.R.URL.RequestURI}} {{.R.Proto}}", + "%s", "{{.StatusCode}}", + "%S", "\033[{{.StatusCode | statusCodeColor}}m{{.StatusCode}}", + "%t", "{{if .StartTime}}{{.StartTime.Format \"02/Jan/2006:15:04:05 -0700\"}}{{end}}", + "%T", "{{if .ResponseTime}}{{.ResponseTime.Seconds | printf \"%.3f\"}}{{end}}", + "%u", "{{.RemoteUser | dashIfEmptyStr}}", + "%{User-Agent}i", "{{.R.UserAgent | dashIfEmptyStr}}", + "%{Referer}i", "{{.R.Referer | dashIfEmptyStr}}", +) + +// Convert the Apache access log format into a text/template +func (mw *AccessLogApacheMiddleware) convertFormat() { + + tmplText := apacheAdapter.Replace(string(mw.Format)) + + funcMap := template.FuncMap{ + "dashIfEmptyStr": func(value string) string { + if value == "" { + return "-" + } + return value + }, + "dashIf0": func(value int64) string { + if value == 0 { + return "-" + } + return fmt.Sprintf("%d", value) + }, + "microseconds": func(dur *time.Duration) string { + if dur != nil { + return fmt.Sprintf("%d", dur.Nanoseconds()/1000) + } + return "" + }, + "statusCodeColor": func(statusCode int) string { + if statusCode >= 400 && statusCode < 500 { + return "1;33" + } else if statusCode >= 500 { + return "0;31" + } + return "0;32" + }, + } + + var err error + mw.textTemplate, err = template.New("accessLog").Funcs(funcMap).Parse(tmplText) + if err != nil { + panic(err) + } +} + +// Execute the text template with the data derived from the request, and return a string. +func (mw *AccessLogApacheMiddleware) executeTextTemplate(util *accessLogUtil) string { + buf := bytes.NewBufferString("") + err := mw.textTemplate.Execute(buf, util) + if err != nil { + panic(err) + } + return buf.String() +} + +// accessLogUtil provides a collection of utility functions that devrive data from the Request object. +// This object is used to provide data to the Apache Style template and the the JSON log record. +type accessLogUtil struct { + W ResponseWriter + R *Request +} + +// As stored by the auth middlewares. +func (u *accessLogUtil) RemoteUser() string { + if u.R.Env["REMOTE_USER"] != nil { + return u.R.Env["REMOTE_USER"].(string) + } + return "" +} + +// If qs exists then return it with a leadin "?", apache log style. +func (u *accessLogUtil) ApacheQueryString() string { + if u.R.URL.RawQuery != "" { + return "?" + u.R.URL.RawQuery + } + return "" +} + +// When the request entered the timer middleware. +func (u *accessLogUtil) StartTime() *time.Time { + if u.R.Env["START_TIME"] != nil { + return u.R.Env["START_TIME"].(*time.Time) + } + return nil +} + +// If remoteAddr is set then return is without the port number, apache log style. +func (u *accessLogUtil) ApacheRemoteAddr() string { + remoteAddr := u.R.RemoteAddr + if remoteAddr != "" { + if ip, _, err := net.SplitHostPort(remoteAddr); err == nil { + return ip + } + } + return "" +} + +// As recorded by the recorder middleware. +func (u *accessLogUtil) StatusCode() int { + if u.R.Env["STATUS_CODE"] != nil { + return u.R.Env["STATUS_CODE"].(int) + } + return 0 +} + +// As mesured by the timer middleware. +func (u *accessLogUtil) ResponseTime() *time.Duration { + if u.R.Env["ELAPSED_TIME"] != nil { + return u.R.Env["ELAPSED_TIME"].(*time.Duration) + } + return nil +} + +// Process id. +func (u *accessLogUtil) Pid() int { + return os.Getpid() +} + +// As recorded by the recorder middleware. +func (u *accessLogUtil) BytesWritten() int64 { + if u.R.Env["BYTES_WRITTEN"] != nil { + return u.R.Env["BYTES_WRITTEN"].(int64) + } + return 0 +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/access_log_apache_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/access_log_apache_test.go new file mode 100644 index 00000000..64127442 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/access_log_apache_test.go @@ -0,0 +1,78 @@ +package rest + +import ( + "bytes" + "github.com/ant0ine/go-json-rest/rest/test" + "log" + "regexp" + "testing" +) + +func TestAccessLogApacheMiddleware(t *testing.T) { + + api := NewApi() + + // the middlewares stack + buffer := bytes.NewBufferString("") + api.Use(&AccessLogApacheMiddleware{ + Logger: log.New(buffer, "", 0), + Format: CommonLogFormat, + textTemplate: nil, + }) + api.Use(&TimerMiddleware{}) + api.Use(&RecorderMiddleware{}) + + // a simple app + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + w.WriteJson(map[string]string{"Id": "123"}) + })) + + // wrap all + handler := api.MakeHandler() + + req := test.MakeSimpleRequest("GET", "http://localhost/", nil) + req.RemoteAddr = "127.0.0.1:1234" + recorded := test.RunRequest(t, handler, req) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() + + // log tests, eg: '127.0.0.1 - - 29/Nov/2014:22:28:34 +0000 "GET / HTTP/1.1" 200 12' + apacheCommon := regexp.MustCompile(`127.0.0.1 - - \d{2}/\w{3}/\d{4}:\d{2}:\d{2}:\d{2} [+\-]\d{4}\ "GET / HTTP/1.1" 200 12`) + + if !apacheCommon.Match(buffer.Bytes()) { + t.Errorf("Got: %s", buffer.String()) + } +} + +func TestAccessLogApacheMiddlewareMissingData(t *testing.T) { + + api := NewApi() + + // the uncomplete middlewares stack + buffer := bytes.NewBufferString("") + api.Use(&AccessLogApacheMiddleware{ + Logger: log.New(buffer, "", 0), + Format: CommonLogFormat, + textTemplate: nil, + }) + + // a simple app + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + w.WriteJson(map[string]string{"Id": "123"}) + })) + + // wrap all + handler := api.MakeHandler() + + req := test.MakeSimpleRequest("GET", "http://localhost/", nil) + recorded := test.RunRequest(t, handler, req) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() + + // not much to log when the Env data is missing, but this should still work + apacheCommon := regexp.MustCompile(` - - "GET / HTTP/1.1" 0 -`) + + if !apacheCommon.Match(buffer.Bytes()) { + t.Errorf("Got: %s", buffer.String()) + } +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/access_log_json.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/access_log_json.go new file mode 100644 index 00000000..a6bc175f --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/access_log_json.go @@ -0,0 +1,88 @@ +package rest + +import ( + "encoding/json" + "log" + "os" + "time" +) + +// AccessLogJsonMiddleware produces the access log with records written as JSON. This middleware +// depends on TimerMiddleware and RecorderMiddleware that must be in the wrapped middlewares. It +// also uses request.Env["REMOTE_USER"].(string) set by the auth middlewares. +type AccessLogJsonMiddleware struct { + + // Logger points to the logger object used by this middleware, it defaults to + // log.New(os.Stderr, "", 0). + Logger *log.Logger +} + +// MiddlewareFunc makes AccessLogJsonMiddleware implement the Middleware interface. +func (mw *AccessLogJsonMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + + // set the default Logger + if mw.Logger == nil { + mw.Logger = log.New(os.Stderr, "", 0) + } + + return func(w ResponseWriter, r *Request) { + + // call the handler + h(w, r) + + mw.Logger.Printf("%s", makeAccessLogJsonRecord(r).asJson()) + } +} + +// AccessLogJsonRecord is the data structure used by AccessLogJsonMiddleware to create the JSON +// records. (Public for documentation only, no public method uses it) +type AccessLogJsonRecord struct { + Timestamp *time.Time + StatusCode int + ResponseTime *time.Duration + HttpMethod string + RequestURI string + RemoteUser string + UserAgent string +} + +func makeAccessLogJsonRecord(r *Request) *AccessLogJsonRecord { + + var timestamp *time.Time + if r.Env["START_TIME"] != nil { + timestamp = r.Env["START_TIME"].(*time.Time) + } + + var statusCode int + if r.Env["STATUS_CODE"] != nil { + statusCode = r.Env["STATUS_CODE"].(int) + } + + var responseTime *time.Duration + if r.Env["ELAPSED_TIME"] != nil { + responseTime = r.Env["ELAPSED_TIME"].(*time.Duration) + } + + var remoteUser string + if r.Env["REMOTE_USER"] != nil { + remoteUser = r.Env["REMOTE_USER"].(string) + } + + return &AccessLogJsonRecord{ + Timestamp: timestamp, + StatusCode: statusCode, + ResponseTime: responseTime, + HttpMethod: r.Method, + RequestURI: r.URL.RequestURI(), + RemoteUser: remoteUser, + UserAgent: r.UserAgent(), + } +} + +func (r *AccessLogJsonRecord) asJson() []byte { + b, err := json.Marshal(r) + if err != nil { + panic(err) + } + return b +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/access_log_json_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/access_log_json_test.go new file mode 100644 index 00000000..9085fcb8 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/access_log_json_test.go @@ -0,0 +1,53 @@ +package rest + +import ( + "bytes" + "encoding/json" + "github.com/ant0ine/go-json-rest/rest/test" + "log" + "testing" +) + +func TestAccessLogJsonMiddleware(t *testing.T) { + + api := NewApi() + + // the middlewares stack + buffer := bytes.NewBufferString("") + api.Use(&AccessLogJsonMiddleware{ + Logger: log.New(buffer, "", 0), + }) + api.Use(&TimerMiddleware{}) + api.Use(&RecorderMiddleware{}) + + // a simple app + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + w.WriteJson(map[string]string{"Id": "123"}) + })) + + // wrap all + handler := api.MakeHandler() + + req := test.MakeSimpleRequest("GET", "http://localhost/", nil) + req.RemoteAddr = "127.0.0.1:1234" + recorded := test.RunRequest(t, handler, req) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() + + // log tests + decoded := &AccessLogJsonRecord{} + err := json.Unmarshal(buffer.Bytes(), decoded) + if err != nil { + t.Fatal(err) + } + + if decoded.StatusCode != 200 { + t.Errorf("StatusCode 200 expected, got %d", decoded.StatusCode) + } + if decoded.RequestURI != "/" { + t.Errorf("RequestURI / expected, got %s", decoded.RequestURI) + } + if decoded.HttpMethod != "GET" { + t.Errorf("HttpMethod GET expected, got %s", decoded.HttpMethod) + } +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/api.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/api.go new file mode 100644 index 00000000..6295430c --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/api.go @@ -0,0 +1,83 @@ +package rest + +import ( + "net/http" +) + +// Api defines a stack of Middlewares and an App. +type Api struct { + stack []Middleware + app App +} + +// NewApi makes a new Api object. The Middleware stack is empty, and the App is nil. +func NewApi() *Api { + return &Api{ + stack: []Middleware{}, + app: nil, + } +} + +// Use pushes one or multiple middlewares to the stack for middlewares +// maintained in the Api object. +func (api *Api) Use(middlewares ...Middleware) { + api.stack = append(api.stack, middlewares...) +} + +// SetApp sets the App in the Api object. +func (api *Api) SetApp(app App) { + api.app = app +} + +// MakeHandler wraps all the Middlewares of the stack and the App together, and returns an +// http.Handler ready to be used. If the Middleware stack is empty the App is used directly. If the +// App is nil, a HandlerFunc that does nothing is used instead. +func (api *Api) MakeHandler() http.Handler { + var appFunc HandlerFunc + if api.app != nil { + appFunc = api.app.AppFunc() + } else { + appFunc = func(w ResponseWriter, r *Request) {} + } + return http.HandlerFunc( + adapterFunc( + WrapMiddlewares(api.stack, appFunc), + ), + ) +} + +// Defines a stack of middlewares convenient for development. Among other things: +// console friendly logging, JSON indentation, error stack strace in the response. +var DefaultDevStack = []Middleware{ + &AccessLogApacheMiddleware{}, + &TimerMiddleware{}, + &RecorderMiddleware{}, + &PoweredByMiddleware{}, + &RecoverMiddleware{ + EnableResponseStackTrace: true, + }, + &JsonIndentMiddleware{}, + &ContentTypeCheckerMiddleware{}, +} + +// Defines a stack of middlewares convenient for production. Among other things: +// Apache CombinedLogFormat logging, gzip compression. +var DefaultProdStack = []Middleware{ + &AccessLogApacheMiddleware{ + Format: CombinedLogFormat, + }, + &TimerMiddleware{}, + &RecorderMiddleware{}, + &PoweredByMiddleware{}, + &RecoverMiddleware{}, + &GzipMiddleware{}, + &ContentTypeCheckerMiddleware{}, +} + +// Defines a stack of middlewares that should be common to most of the middleware stacks. +var DefaultCommonStack = []Middleware{ + &TimerMiddleware{}, + &RecorderMiddleware{}, + &PoweredByMiddleware{}, + &RecoverMiddleware{}, +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/api_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/api_test.go new file mode 100644 index 00000000..269edfcf --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/api_test.go @@ -0,0 +1,97 @@ +package rest + +import ( + "github.com/ant0ine/go-json-rest/rest/test" + "testing" +) + +func TestApiNoAppNoMiddleware(t *testing.T) { + + api := NewApi() + if api == nil { + t.Fatal("Api object must be instantiated") + } + + handler := api.MakeHandler() + if handler == nil { + t.Fatal("the http.Handler must have been created") + } + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/", nil)) + recorded.CodeIs(200) +} + +func TestApiSimpleAppNoMiddleware(t *testing.T) { + + api := NewApi() + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + w.WriteJson(map[string]string{"Id": "123"}) + })) + + handler := api.MakeHandler() + if handler == nil { + t.Fatal("the http.Handler must have been created") + } + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/", nil)) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() + recorded.BodyIs(`{"Id":"123"}`) +} + +func TestDevStack(t *testing.T) { + + api := NewApi() + api.Use(DefaultDevStack...) + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + w.WriteJson(map[string]string{"Id": "123"}) + })) + + handler := api.MakeHandler() + if handler == nil { + t.Fatal("the http.Handler must have been created") + } + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/", nil)) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() + recorded.BodyIs("{\n \"Id\": \"123\"\n}") +} + +func TestProdStack(t *testing.T) { + + api := NewApi() + api.Use(DefaultProdStack...) + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + w.WriteJson(map[string]string{"Id": "123"}) + })) + + handler := api.MakeHandler() + if handler == nil { + t.Fatal("the http.Handler must have been created") + } + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/", nil)) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() + recorded.ContentEncodingIsGzip() +} + +func TestCommonStack(t *testing.T) { + + api := NewApi() + api.Use(DefaultCommonStack...) + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + w.WriteJson(map[string]string{"Id": "123"}) + })) + + handler := api.MakeHandler() + if handler == nil { + t.Fatal("the http.Handler must have been created") + } + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/", nil)) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() + recorded.BodyIs(`{"Id":"123"}`) +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/auth_basic.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/auth_basic.go new file mode 100644 index 00000000..dbf254cf --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/auth_basic.go @@ -0,0 +1,100 @@ +package rest + +import ( + "encoding/base64" + "errors" + "log" + "net/http" + "strings" +) + +// AuthBasicMiddleware provides a simple AuthBasic implementation. On failure, a 401 HTTP response +//is returned. On success, the wrapped middleware is called, and the userId is made available as +// request.Env["REMOTE_USER"].(string) +type AuthBasicMiddleware struct { + + // Realm name to display to the user. Required. + Realm string + + // Callback function that should perform the authentication of the user based on userId and + // password. Must return true on success, false on failure. Required. + Authenticator func(userId string, password string) bool + + // Callback function that should perform the authorization of the authenticated user. Called + // only after an authentication success. Must return true on success, false on failure. + // Optional, default to success. + Authorizator func(userId string, request *Request) bool +} + +// MiddlewareFunc makes AuthBasicMiddleware implement the Middleware interface. +func (mw *AuthBasicMiddleware) MiddlewareFunc(handler HandlerFunc) HandlerFunc { + + if mw.Realm == "" { + log.Fatal("Realm is required") + } + + if mw.Authenticator == nil { + log.Fatal("Authenticator is required") + } + + if mw.Authorizator == nil { + mw.Authorizator = func(userId string, request *Request) bool { + return true + } + } + + return func(writer ResponseWriter, request *Request) { + + authHeader := request.Header.Get("Authorization") + if authHeader == "" { + mw.unauthorized(writer) + return + } + + providedUserId, providedPassword, err := mw.decodeBasicAuthHeader(authHeader) + + if err != nil { + Error(writer, "Invalid authentication", http.StatusBadRequest) + return + } + + if !mw.Authenticator(providedUserId, providedPassword) { + mw.unauthorized(writer) + return + } + + if !mw.Authorizator(providedUserId, request) { + mw.unauthorized(writer) + return + } + + request.Env["REMOTE_USER"] = providedUserId + + handler(writer, request) + } +} + +func (mw *AuthBasicMiddleware) unauthorized(writer ResponseWriter) { + writer.Header().Set("WWW-Authenticate", "Basic realm="+mw.Realm) + Error(writer, "Not Authorized", http.StatusUnauthorized) +} + +func (mw *AuthBasicMiddleware) decodeBasicAuthHeader(header string) (user string, password string, err error) { + + parts := strings.SplitN(header, " ", 2) + if !(len(parts) == 2 && parts[0] == "Basic") { + return "", "", errors.New("Invalid authentication") + } + + decoded, err := base64.StdEncoding.DecodeString(parts[1]) + if err != nil { + return "", "", errors.New("Invalid base64") + } + + creds := strings.SplitN(string(decoded), ":", 2) + if len(creds) != 2 { + return "", "", errors.New("Invalid authentication") + } + + return creds[0], creds[1], nil +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/auth_basic_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/auth_basic_test.go new file mode 100644 index 00000000..8206ca04 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/auth_basic_test.go @@ -0,0 +1,78 @@ +package rest + +import ( + "encoding/base64" + "github.com/ant0ine/go-json-rest/rest/test" + "testing" +) + +func TestAuthBasic(t *testing.T) { + + // the middleware to test + authMiddleware := &AuthBasicMiddleware{ + Realm: "test zone", + Authenticator: func(userId string, password string) bool { + if userId == "admin" && password == "admin" { + return true + } + return false + }, + Authorizator: func(userId string, request *Request) bool { + if request.Method == "GET" { + return true + } + return false + }, + } + + // api for testing failure + apiFailure := NewApi() + apiFailure.Use(authMiddleware) + apiFailure.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + t.Error("Should never be executed") + })) + handler := apiFailure.MakeHandler() + + // simple request fails + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/", nil)) + recorded.CodeIs(401) + recorded.ContentTypeIsJson() + + // auth with wrong cred and right method fails + wrongCredReq := test.MakeSimpleRequest("GET", "http://localhost/", nil) + encoded := base64.StdEncoding.EncodeToString([]byte("admin:AdmIn")) + wrongCredReq.Header.Set("Authorization", "Basic "+encoded) + recorded = test.RunRequest(t, handler, wrongCredReq) + recorded.CodeIs(401) + recorded.ContentTypeIsJson() + + // auth with right cred and wrong method fails + rightCredReq := test.MakeSimpleRequest("POST", "http://localhost/", nil) + encoded = base64.StdEncoding.EncodeToString([]byte("admin:admin")) + rightCredReq.Header.Set("Authorization", "Basic "+encoded) + recorded = test.RunRequest(t, handler, rightCredReq) + recorded.CodeIs(401) + recorded.ContentTypeIsJson() + + // api for testing success + apiSuccess := NewApi() + apiSuccess.Use(authMiddleware) + apiSuccess.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + if r.Env["REMOTE_USER"] == nil { + t.Error("REMOTE_USER is nil") + } + user := r.Env["REMOTE_USER"].(string) + if user != "admin" { + t.Error("REMOTE_USER is expected to be 'admin'") + } + w.WriteJson(map[string]string{"Id": "123"}) + })) + + // auth with right cred and right method succeeds + rightCredReq = test.MakeSimpleRequest("GET", "http://localhost/", nil) + encoded = base64.StdEncoding.EncodeToString([]byte("admin:admin")) + rightCredReq.Header.Set("Authorization", "Basic "+encoded) + recorded = test.RunRequest(t, apiSuccess.MakeHandler(), rightCredReq) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/content_type_checker.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/content_type_checker.go new file mode 100644 index 00000000..1d878773 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/content_type_checker.go @@ -0,0 +1,40 @@ +package rest + +import ( + "mime" + "net/http" + "strings" +) + +// ContentTypeCheckerMiddleware verifies the request Content-Type header and returns a +// StatusUnsupportedMediaType (415) HTTP error response if it's incorrect. The expected +// Content-Type is 'application/json' if the content is non-null. Note: If a charset parameter +// exists, it MUST be UTF-8. +type ContentTypeCheckerMiddleware struct{} + +// MiddlewareFunc makes ContentTypeCheckerMiddleware implement the Middleware interface. +func (mw *ContentTypeCheckerMiddleware) MiddlewareFunc(handler HandlerFunc) HandlerFunc { + + return func(w ResponseWriter, r *Request) { + + mediatype, params, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + charset, ok := params["charset"] + if !ok { + charset = "UTF-8" + } + + // per net/http doc, means that the length is known and non-null + if r.ContentLength > 0 && + !(mediatype == "application/json" && strings.ToUpper(charset) == "UTF-8") { + + Error(w, + "Bad Content-Type or charset, expected 'application/json'", + http.StatusUnsupportedMediaType, + ) + return + } + + // call the wrapped handler + handler(w, r) + } +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/content_type_checker_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/content_type_checker_test.go new file mode 100644 index 00000000..0f819bc3 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/content_type_checker_test.go @@ -0,0 +1,48 @@ +package rest + +import ( + "github.com/ant0ine/go-json-rest/rest/test" + "testing" +) + +func TestContentTypeCheckerMiddleware(t *testing.T) { + + api := NewApi() + + // the middleware to test + api.Use(&ContentTypeCheckerMiddleware{}) + + // a simple app + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + w.WriteJson(map[string]string{"Id": "123"}) + })) + + // wrap all + handler := api.MakeHandler() + + // no payload, no content length, no check + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/", nil)) + recorded.CodeIs(200) + + // JSON payload with correct content type + recorded = test.RunRequest(t, handler, test.MakeSimpleRequest("POST", "http://localhost/", map[string]string{"Id": "123"})) + recorded.CodeIs(200) + + // JSON payload with correct content type specifying the utf-8 charset + req := test.MakeSimpleRequest("POST", "http://localhost/", map[string]string{"Id": "123"}) + req.Header.Set("Content-Type", "application/json; charset=utf-8") + recorded = test.RunRequest(t, handler, req) + recorded.CodeIs(200) + + // JSON payload with incorrect content type + req = test.MakeSimpleRequest("POST", "http://localhost/", map[string]string{"Id": "123"}) + req.Header.Set("Content-Type", "text/x-json") + recorded = test.RunRequest(t, handler, req) + recorded.CodeIs(415) + + // JSON payload with correct content type but incorrect charset + req = test.MakeSimpleRequest("POST", "http://localhost/", map[string]string{"Id": "123"}) + req.Header.Set("Content-Type", "application/json; charset=ISO-8859-1") + recorded = test.RunRequest(t, handler, req) + recorded.CodeIs(415) +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/cors.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/cors.go new file mode 100644 index 00000000..5b005432 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/cors.go @@ -0,0 +1,135 @@ +package rest + +import ( + "net/http" + "strconv" + "strings" +) + +// Possible improvements: +// If AllowedMethods["*"] then Access-Control-Allow-Methods is set to the requested methods +// If AllowedHeaderss["*"] then Access-Control-Allow-Headers is set to the requested headers +// Put some presets in AllowedHeaders +// Put some presets in AccessControlExposeHeaders + +// CorsMiddleware provides a configurable CORS implementation. +type CorsMiddleware struct { + allowedMethods map[string]bool + allowedMethodsCsv string + allowedHeaders map[string]bool + allowedHeadersCsv string + + // Reject non CORS requests if true. See CorsInfo.IsCors. + RejectNonCorsRequests bool + + // Function excecuted for every CORS requests to validate the Origin. (Required) + // Must return true if valid, false if invalid. + // For instance: simple equality, regexp, DB lookup, ... + OriginValidator func(origin string, request *Request) bool + + // List of allowed HTTP methods. Note that the comparison will be made in + // uppercase to avoid common mistakes. And that the + // Access-Control-Allow-Methods response header also uses uppercase. + // (see CorsInfo.AccessControlRequestMethod) + AllowedMethods []string + + // List of allowed HTTP Headers. Note that the comparison will be made with + // noarmalized names (http.CanonicalHeaderKey). And that the response header + // also uses normalized names. + // (see CorsInfo.AccessControlRequestHeaders) + AllowedHeaders []string + + // List of headers used to set the Access-Control-Expose-Headers header. + AccessControlExposeHeaders []string + + // User to se the Access-Control-Allow-Credentials response header. + AccessControlAllowCredentials bool + + // Used to set the Access-Control-Max-Age response header, in seconds. + AccessControlMaxAge int +} + +// MiddlewareFunc makes CorsMiddleware implement the Middleware interface. +func (mw *CorsMiddleware) MiddlewareFunc(handler HandlerFunc) HandlerFunc { + + // precompute as much as possible at init time + + mw.allowedMethods = map[string]bool{} + normedMethods := []string{} + for _, allowedMethod := range mw.AllowedMethods { + normed := strings.ToUpper(allowedMethod) + mw.allowedMethods[normed] = true + normedMethods = append(normedMethods, normed) + } + mw.allowedMethodsCsv = strings.Join(normedMethods, ",") + + mw.allowedHeaders = map[string]bool{} + normedHeaders := []string{} + for _, allowedHeader := range mw.AllowedHeaders { + normed := http.CanonicalHeaderKey(allowedHeader) + mw.allowedHeaders[normed] = true + normedHeaders = append(normedHeaders, normed) + } + mw.allowedHeadersCsv = strings.Join(normedHeaders, ",") + + return func(writer ResponseWriter, request *Request) { + + corsInfo := request.GetCorsInfo() + + // non CORS requests + if !corsInfo.IsCors { + if mw.RejectNonCorsRequests { + Error(writer, "Non CORS request", http.StatusForbidden) + return + } + // continue, execute the wrapped middleware + handler(writer, request) + return + } + + // Validate the Origin + if mw.OriginValidator(corsInfo.Origin, request) == false { + Error(writer, "Invalid Origin", http.StatusForbidden) + return + } + + if corsInfo.IsPreflight { + + // check the request methods + if mw.allowedMethods[corsInfo.AccessControlRequestMethod] == false { + Error(writer, "Invalid Preflight Request", http.StatusForbidden) + return + } + + // check the request headers + for _, requestedHeader := range corsInfo.AccessControlRequestHeaders { + if mw.allowedHeaders[requestedHeader] == false { + Error(writer, "Invalid Preflight Request", http.StatusForbidden) + return + } + } + + writer.Header().Set("Access-Control-Allow-Methods", mw.allowedMethodsCsv) + writer.Header().Set("Access-Control-Allow-Headers", mw.allowedHeadersCsv) + writer.Header().Set("Access-Control-Allow-Origin", corsInfo.Origin) + if mw.AccessControlAllowCredentials == true { + writer.Header().Set("Access-Control-Allow-Credentials", "true") + } + writer.Header().Set("Access-Control-Max-Age", strconv.Itoa(mw.AccessControlMaxAge)) + writer.WriteHeader(http.StatusOK) + return + } + + // Non-preflight requests + for _, exposed := range mw.AccessControlExposeHeaders { + writer.Header().Add("Access-Control-Expose-Headers", exposed) + } + writer.Header().Set("Access-Control-Allow-Origin", corsInfo.Origin) + if mw.AccessControlAllowCredentials == true { + writer.Header().Set("Access-Control-Allow-Credentials", "true") + } + // continure, execute the wrapped middleware + handler(writer, request) + return + } +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/cors_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/cors_test.go new file mode 100644 index 00000000..09bbbc40 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/cors_test.go @@ -0,0 +1,43 @@ +package rest + +import ( + "net/http" + "testing" + + "github.com/ant0ine/go-json-rest/rest/test" +) + +func TestCorsMiddlewareEmptyAccessControlRequestHeaders(t *testing.T) { + api := NewApi() + + // the middleware to test + api.Use(&CorsMiddleware{ + OriginValidator: func(_ string, _ *Request) bool { + return true + }, + AllowedMethods: []string{ + "GET", + "POST", + "PUT", + }, + AllowedHeaders: []string{ + "Origin", + "Referer", + }, + }) + + // wrap all + handler := api.MakeHandler() + + req, _ := http.NewRequest("OPTIONS", "http://localhost", nil) + req.Header.Set("Origin", "http://another.host") + req.Header.Set("Access-Control-Request-Method", "PUT") + req.Header.Set("Access-Control-Request-Headers", "") + + recorded := test.RunRequest(t, handler, req) + t.Logf("recorded: %+v\n", recorded.Recorder) + recorded.CodeIs(200) + recorded.HeaderIs("Access-Control-Allow-Methods", "GET,POST,PUT") + recorded.HeaderIs("Access-Control-Allow-Headers", "Origin,Referer") + recorded.HeaderIs("Access-Control-Allow-Origin", "http://another.host") +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/doc.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/doc.go new file mode 100644 index 00000000..fa6f5b2c --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/doc.go @@ -0,0 +1,47 @@ +// A quick and easy way to setup a RESTful JSON API +// +// http://ant0ine.github.io/go-json-rest/ +// +// Go-Json-Rest is a thin layer on top of net/http that helps building RESTful JSON APIs easily. +// It provides fast and scalable request routing using a Trie based implementation, helpers to deal +// with JSON requests and responses, and middlewares for functionalities like CORS, Auth, Gzip, +// Status, ... +// +// Example: +// +// package main +// +// import ( +// "github.com/ant0ine/go-json-rest/rest" +// "log" +// "net/http" +// ) +// +// type User struct { +// Id string +// Name string +// } +// +// func GetUser(w rest.ResponseWriter, req *rest.Request) { +// user := User{ +// Id: req.PathParam("id"), +// Name: "Antoine", +// } +// w.WriteJson(&user) +// } +// +// func main() { +// api := rest.NewApi() +// api.Use(rest.DefaultDevStack...) +// router, err := rest.MakeRouter( +// rest.Get("/users/:id", GetUser), +// ) +// if err != nil { +// log.Fatal(err) +// } +// api.SetApp(router) +// log.Fatal(http.ListenAndServe(":8080", api.MakeHandler())) +// } +// +// +package rest diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/gzip.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/gzip.go new file mode 100644 index 00000000..0fafc054 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/gzip.go @@ -0,0 +1,132 @@ +package rest + +import ( + "bufio" + "compress/gzip" + "net" + "net/http" + "strings" +) + +// GzipMiddleware is responsible for compressing the payload with gzip and setting the proper +// headers when supported by the client. It must be wrapped by TimerMiddleware for the +// compression time to be captured. And It must be wrapped by RecorderMiddleware for the +// compressed BYTES_WRITTEN to be captured. +type GzipMiddleware struct{} + +// MiddlewareFunc makes GzipMiddleware implement the Middleware interface. +func (mw *GzipMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + return func(w ResponseWriter, r *Request) { + // gzip support enabled + canGzip := strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") + // client accepts gzip ? + writer := &gzipResponseWriter{w, false, canGzip, nil} + defer func() { + // need to close gzip writer + if writer.gzipWriter != nil { + writer.gzipWriter.Close() + } + }() + // call the handler with the wrapped writer + h(writer, r) + } +} + +// Private responseWriter intantiated by the gzip middleware. +// It encodes the payload with gzip and set the proper headers. +// It implements the following interfaces: +// ResponseWriter +// http.ResponseWriter +// http.Flusher +// http.CloseNotifier +// http.Hijacker +type gzipResponseWriter struct { + ResponseWriter + wroteHeader bool + canGzip bool + gzipWriter *gzip.Writer +} + +// Set the right headers for gzip encoded responses. +func (w *gzipResponseWriter) WriteHeader(code int) { + + // Always set the Vary header, even if this particular request + // is not gzipped. + w.Header().Add("Vary", "Accept-Encoding") + + if w.canGzip { + w.Header().Set("Content-Encoding", "gzip") + } + + w.ResponseWriter.WriteHeader(code) + w.wroteHeader = true +} + +// Make sure the local Write is called. +func (w *gzipResponseWriter) WriteJson(v interface{}) error { + b, err := w.EncodeJson(v) + if err != nil { + return err + } + _, err = w.Write(b) + if err != nil { + return err + } + return nil +} + +// Make sure the local WriteHeader is called, and call the parent Flush. +// Provided in order to implement the http.Flusher interface. +func (w *gzipResponseWriter) Flush() { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + flusher := w.ResponseWriter.(http.Flusher) + flusher.Flush() +} + +// Call the parent CloseNotify. +// Provided in order to implement the http.CloseNotifier interface. +func (w *gzipResponseWriter) CloseNotify() <-chan bool { + notifier := w.ResponseWriter.(http.CloseNotifier) + return notifier.CloseNotify() +} + +// Provided in order to implement the http.Hijacker interface. +func (w *gzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker := w.ResponseWriter.(http.Hijacker) + return hijacker.Hijack() +} + +// Make sure the local WriteHeader is called, and encode the payload if necessary. +// Provided in order to implement the http.ResponseWriter interface. +func (w *gzipResponseWriter) Write(b []byte) (int, error) { + + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + + writer := w.ResponseWriter.(http.ResponseWriter) + + if w.canGzip { + // Write can be called multiple times for a given response. + // (see the streaming example: + // https://github.com/ant0ine/go-json-rest-examples/tree/master/streaming) + // The gzipWriter is instantiated only once, and flushed after + // each write. + if w.gzipWriter == nil { + w.gzipWriter = gzip.NewWriter(writer) + } + count, errW := w.gzipWriter.Write(b) + errF := w.gzipWriter.Flush() + if errW != nil { + return count, errW + } + if errF != nil { + return count, errF + } + return count, nil + } + + return writer.Write(b) +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/gzip_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/gzip_test.go new file mode 100644 index 00000000..06a7e6fc --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/gzip_test.go @@ -0,0 +1,68 @@ +package rest + +import ( + "github.com/ant0ine/go-json-rest/rest/test" + "testing" +) + +func TestGzipEnabled(t *testing.T) { + + api := NewApi() + + // the middleware to test + api.Use(&GzipMiddleware{}) + + // router app with success and error paths + router, err := MakeRouter( + Get("/ok", func(w ResponseWriter, r *Request) { + w.WriteJson(map[string]string{"Id": "123"}) + }), + Get("/error", func(w ResponseWriter, r *Request) { + Error(w, "gzipped error", 500) + }), + ) + if err != nil { + t.Fatal(err) + } + + api.SetApp(router) + + // wrap all + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/ok", nil)) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() + recorded.ContentEncodingIsGzip() + recorded.HeaderIs("Vary", "Accept-Encoding") + + recorded = test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/error", nil)) + recorded.CodeIs(500) + recorded.ContentTypeIsJson() + recorded.ContentEncodingIsGzip() + recorded.HeaderIs("Vary", "Accept-Encoding") +} + +func TestGzipDisabled(t *testing.T) { + + api := NewApi() + + // router app with success and error paths + router, err := MakeRouter( + Get("/ok", func(w ResponseWriter, r *Request) { + w.WriteJson(map[string]string{"Id": "123"}) + }), + ) + if err != nil { + t.Fatal(err) + } + + api.SetApp(router) + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/ok", nil)) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() + recorded.HeaderIs("Content-Encoding", "") + recorded.HeaderIs("Vary", "") +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/if.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/if.go new file mode 100644 index 00000000..daa37d1a --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/if.go @@ -0,0 +1,53 @@ +package rest + +import ( + "log" +) + +// IfMiddleware evaluates at runtime a condition based on the current request, and decides to +// execute one of the other Middleware based on this boolean. +type IfMiddleware struct { + + // Runtime condition that decides of the execution of IfTrue of IfFalse. + Condition func(r *Request) bool + + // Middleware to run when the condition is true. Note that the middleware is initialized + // weather if will be used or not. (Optional, pass-through if not set) + IfTrue Middleware + + // Middleware to run when the condition is false. Note that the middleware is initialized + // weather if will be used or not. (Optional, pass-through if not set) + IfFalse Middleware +} + +// MiddlewareFunc makes TimerMiddleware implement the Middleware interface. +func (mw *IfMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + + if mw.Condition == nil { + log.Fatal("IfMiddleware Condition is required") + } + + var ifTrueHandler HandlerFunc + if mw.IfTrue != nil { + ifTrueHandler = mw.IfTrue.MiddlewareFunc(h) + } else { + ifTrueHandler = h + } + + var ifFalseHandler HandlerFunc + if mw.IfFalse != nil { + ifFalseHandler = mw.IfFalse.MiddlewareFunc(h) + } else { + ifFalseHandler = h + } + + return func(w ResponseWriter, r *Request) { + + if mw.Condition(r) { + ifTrueHandler(w, r) + } else { + ifFalseHandler(w, r) + } + + } +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/if_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/if_test.go new file mode 100644 index 00000000..fca57f48 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/if_test.go @@ -0,0 +1,51 @@ +package rest + +import ( + "github.com/ant0ine/go-json-rest/rest/test" + "testing" +) + +func TestIfMiddleware(t *testing.T) { + + api := NewApi() + + // the middleware to test + api.Use(&IfMiddleware{ + Condition: func(r *Request) bool { + if r.URL.Path == "/true" { + return true + } + return false + }, + IfTrue: MiddlewareSimple(func(handler HandlerFunc) HandlerFunc { + return func(w ResponseWriter, r *Request) { + r.Env["TRUE_MIDDLEWARE"] = true + handler(w, r) + } + }), + IfFalse: MiddlewareSimple(func(handler HandlerFunc) HandlerFunc { + return func(w ResponseWriter, r *Request) { + r.Env["FALSE_MIDDLEWARE"] = true + handler(w, r) + } + }), + }) + + // a simple app + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + w.WriteJson(r.Env) + })) + + // wrap all + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/", nil)) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() + recorded.BodyIs("{\"FALSE_MIDDLEWARE\":true}") + + recorded = test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/true", nil)) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() + recorded.BodyIs("{\"TRUE_MIDDLEWARE\":true}") +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/json_indent.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/json_indent.go new file mode 100644 index 00000000..ad9a5ca3 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/json_indent.go @@ -0,0 +1,113 @@ +package rest + +import ( + "bufio" + "encoding/json" + "net" + "net/http" +) + +// JsonIndentMiddleware provides JSON encoding with indentation. +// It could be convenient to use it during development. +// It works by "subclassing" the responseWriter provided by the wrapping middleware, +// replacing the writer.EncodeJson and writer.WriteJson implementations, +// and making the parent implementations ignored. +type JsonIndentMiddleware struct { + + // prefix string, as in json.MarshalIndent + Prefix string + + // indentation string, as in json.MarshalIndent + Indent string +} + +// MiddlewareFunc makes JsonIndentMiddleware implement the Middleware interface. +func (mw *JsonIndentMiddleware) MiddlewareFunc(handler HandlerFunc) HandlerFunc { + + if mw.Indent == "" { + mw.Indent = " " + } + + return func(w ResponseWriter, r *Request) { + + writer := &jsonIndentResponseWriter{w, false, mw.Prefix, mw.Indent} + // call the wrapped handler + handler(writer, r) + } +} + +// Private responseWriter intantiated by the middleware. +// It implements the following interfaces: +// ResponseWriter +// http.ResponseWriter +// http.Flusher +// http.CloseNotifier +// http.Hijacker +type jsonIndentResponseWriter struct { + ResponseWriter + wroteHeader bool + prefix string + indent string +} + +// Replace the parent EncodeJson to provide indentation. +func (w *jsonIndentResponseWriter) EncodeJson(v interface{}) ([]byte, error) { + b, err := json.MarshalIndent(v, w.prefix, w.indent) + if err != nil { + return nil, err + } + return b, nil +} + +// Make sure the local EncodeJson and local Write are called. +// Does not call the parent WriteJson. +func (w *jsonIndentResponseWriter) WriteJson(v interface{}) error { + b, err := w.EncodeJson(v) + if err != nil { + return err + } + _, err = w.Write(b) + if err != nil { + return err + } + return nil +} + +// Call the parent WriteHeader. +func (w *jsonIndentResponseWriter) WriteHeader(code int) { + w.ResponseWriter.WriteHeader(code) + w.wroteHeader = true +} + +// Make sure the local WriteHeader is called, and call the parent Flush. +// Provided in order to implement the http.Flusher interface. +func (w *jsonIndentResponseWriter) Flush() { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + flusher := w.ResponseWriter.(http.Flusher) + flusher.Flush() +} + +// Call the parent CloseNotify. +// Provided in order to implement the http.CloseNotifier interface. +func (w *jsonIndentResponseWriter) CloseNotify() <-chan bool { + notifier := w.ResponseWriter.(http.CloseNotifier) + return notifier.CloseNotify() +} + +// Provided in order to implement the http.Hijacker interface. +func (w *jsonIndentResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker := w.ResponseWriter.(http.Hijacker) + return hijacker.Hijack() +} + +// Make sure the local WriteHeader is called, and call the parent Write. +// Provided in order to implement the http.ResponseWriter interface. +func (w *jsonIndentResponseWriter) Write(b []byte) (int, error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + writer := w.ResponseWriter.(http.ResponseWriter) + return writer.Write(b) +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/json_indent_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/json_indent_test.go new file mode 100644 index 00000000..58924e09 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/json_indent_test.go @@ -0,0 +1,28 @@ +package rest + +import ( + "github.com/ant0ine/go-json-rest/rest/test" + "testing" +) + +func TestJsonIndentMiddleware(t *testing.T) { + + api := NewApi() + + // the middleware to test + api.Use(&JsonIndentMiddleware{}) + + // a simple app + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + w.WriteJson(map[string]string{"Id": "123"}) + })) + + // wrap all + handler := api.MakeHandler() + + req := test.MakeSimpleRequest("GET", "http://localhost/", nil) + recorded := test.RunRequest(t, handler, req) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() + recorded.BodyIs("{\n \"Id\": \"123\"\n}") +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/jsonp.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/jsonp.go new file mode 100644 index 00000000..6071b503 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/jsonp.go @@ -0,0 +1,116 @@ +package rest + +import ( + "bufio" + "net" + "net/http" +) + +// JsonpMiddleware provides JSONP responses on demand, based on the presence +// of a query string argument specifying the callback name. +type JsonpMiddleware struct { + + // Name of the query string parameter used to specify the + // the name of the JS callback used for the padding. + // Defaults to "callback". + CallbackNameKey string +} + +// MiddlewareFunc returns a HandlerFunc that implements the middleware. +func (mw *JsonpMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + + if mw.CallbackNameKey == "" { + mw.CallbackNameKey = "callback" + } + + return func(w ResponseWriter, r *Request) { + + callbackName := r.URL.Query().Get(mw.CallbackNameKey) + // TODO validate the callbackName ? + + if callbackName != "" { + // the client request JSONP, instantiate JsonpMiddleware. + writer := &jsonpResponseWriter{w, false, callbackName} + // call the handler with the wrapped writer + h(writer, r) + } else { + // do nothing special + h(w, r) + } + + } +} + +// Private responseWriter intantiated by the JSONP middleware. +// It adds the padding to the payload and set the proper headers. +// It implements the following interfaces: +// ResponseWriter +// http.ResponseWriter +// http.Flusher +// http.CloseNotifier +// http.Hijacker +type jsonpResponseWriter struct { + ResponseWriter + wroteHeader bool + callbackName string +} + +// Overwrite the Content-Type to be text/javascript +func (w *jsonpResponseWriter) WriteHeader(code int) { + + w.Header().Set("Content-Type", "text/javascript") + + w.ResponseWriter.WriteHeader(code) + w.wroteHeader = true +} + +// Make sure the local Write is called. +func (w *jsonpResponseWriter) WriteJson(v interface{}) error { + b, err := w.EncodeJson(v) + if err != nil { + return err + } + // JSONP security fix (http://miki.it/blog/2014/7/8/abusing-jsonp-with-rosetta-flash/) + w.Header().Set("Content-Disposition", "filename=f.txt") + w.Header().Set("X-Content-Type-Options", "nosniff") + w.Write([]byte("/**/" + w.callbackName + "(")) + w.Write(b) + w.Write([]byte(")")) + return nil +} + +// Make sure the local WriteHeader is called, and call the parent Flush. +// Provided in order to implement the http.Flusher interface. +func (w *jsonpResponseWriter) Flush() { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + flusher := w.ResponseWriter.(http.Flusher) + flusher.Flush() +} + +// Call the parent CloseNotify. +// Provided in order to implement the http.CloseNotifier interface. +func (w *jsonpResponseWriter) CloseNotify() <-chan bool { + notifier := w.ResponseWriter.(http.CloseNotifier) + return notifier.CloseNotify() +} + +// Provided in order to implement the http.Hijacker interface. +func (w *jsonpResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker := w.ResponseWriter.(http.Hijacker) + return hijacker.Hijack() +} + +// Make sure the local WriteHeader is called. +// Provided in order to implement the http.ResponseWriter interface. +func (w *jsonpResponseWriter) Write(b []byte) (int, error) { + + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + + writer := w.ResponseWriter.(http.ResponseWriter) + + return writer.Write(b) +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/jsonp_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/jsonp_test.go new file mode 100644 index 00000000..e556d8f8 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/jsonp_test.go @@ -0,0 +1,47 @@ +package rest + +import ( + "testing" + + "github.com/ant0ine/go-json-rest/rest/test" +) + +func TestJsonpMiddleware(t *testing.T) { + + api := NewApi() + + // the middleware to test + api.Use(&JsonpMiddleware{}) + + // router app with success and error paths + router, err := MakeRouter( + Get("/ok", func(w ResponseWriter, r *Request) { + w.WriteJson(map[string]string{"Id": "123"}) + }), + Get("/error", func(w ResponseWriter, r *Request) { + Error(w, "jsonp error", 500) + }), + ) + if err != nil { + t.Fatal(err) + } + + api.SetApp(router) + + // wrap all + handler := api.MakeHandler() + + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/ok?callback=parseResponse", nil)) + recorded.CodeIs(200) + recorded.HeaderIs("Content-Type", "text/javascript") + recorded.HeaderIs("Content-Disposition", "filename=f.txt") + recorded.HeaderIs("X-Content-Type-Options", "nosniff") + recorded.BodyIs("/**/parseResponse({\"Id\":\"123\"})") + + recorded = test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/error?callback=parseResponse", nil)) + recorded.CodeIs(500) + recorded.HeaderIs("Content-Type", "text/javascript") + recorded.HeaderIs("Content-Disposition", "filename=f.txt") + recorded.HeaderIs("X-Content-Type-Options", "nosniff") + recorded.BodyIs("/**/parseResponse({\"Error\":\"jsonp error\"})") +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/middleware.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/middleware.go new file mode 100644 index 00000000..ba03fb8c --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/middleware.go @@ -0,0 +1,72 @@ +package rest + +import ( + "net/http" +) + +// HandlerFunc defines the handler function. It is the go-json-rest equivalent of http.HandlerFunc. +type HandlerFunc func(ResponseWriter, *Request) + +// App defines the interface that an object should implement to be used as an app in this framework +// stack. The App is the top element of the stack, the other elements being middlewares. +type App interface { + AppFunc() HandlerFunc +} + +// AppSimple is an adapter type that makes it easy to write an App with a simple function. +// eg: rest.NewApi(rest.AppSimple(func(w rest.ResponseWriter, r *rest.Request) { ... })) +type AppSimple HandlerFunc + +// AppFunc makes AppSimple implement the App interface. +func (as AppSimple) AppFunc() HandlerFunc { + return HandlerFunc(as) +} + +// Middleware defines the interface that objects must implement in order to wrap a HandlerFunc and +// be used in the middleware stack. +type Middleware interface { + MiddlewareFunc(handler HandlerFunc) HandlerFunc +} + +// MiddlewareSimple is an adapter type that makes it easy to write a Middleware with a simple +// function. eg: api.Use(rest.MiddlewareSimple(func(h HandlerFunc) Handlerfunc { ... })) +type MiddlewareSimple func(handler HandlerFunc) HandlerFunc + +// MiddlewareFunc makes MiddlewareSimple implement the Middleware interface. +func (ms MiddlewareSimple) MiddlewareFunc(handler HandlerFunc) HandlerFunc { + return ms(handler) +} + +// WrapMiddlewares calls the MiddlewareFunc methods in the reverse order and returns an HandlerFunc +// ready to be executed. This can be used to wrap a set of middlewares, post routing, on a per Route +// basis. +func WrapMiddlewares(middlewares []Middleware, handler HandlerFunc) HandlerFunc { + wrapped := handler + for i := len(middlewares) - 1; i >= 0; i-- { + wrapped = middlewares[i].MiddlewareFunc(wrapped) + } + return wrapped +} + +// Handle the transition between net/http and go-json-rest objects. +// It intanciates the rest.Request and rest.ResponseWriter, ... +func adapterFunc(handler HandlerFunc) http.HandlerFunc { + + return func(origWriter http.ResponseWriter, origRequest *http.Request) { + + // instantiate the rest objects + request := &Request{ + origRequest, + nil, + map[string]interface{}{}, + } + + writer := &responseWriter{ + origWriter, + false, + } + + // call the wrapped handler + handler(writer, request) + } +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/middleware_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/middleware_test.go new file mode 100644 index 00000000..63b3d1a8 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/middleware_test.go @@ -0,0 +1,57 @@ +package rest + +import ( + "testing" +) + +type testMiddleware struct { + name string +} + +func (mw *testMiddleware) MiddlewareFunc(handler HandlerFunc) HandlerFunc { + return func(w ResponseWriter, r *Request) { + if r.Env["BEFORE"] == nil { + r.Env["BEFORE"] = mw.name + } else { + r.Env["BEFORE"] = r.Env["BEFORE"].(string) + mw.name + } + handler(w, r) + if r.Env["AFTER"] == nil { + r.Env["AFTER"] = mw.name + } else { + r.Env["AFTER"] = r.Env["AFTER"].(string) + mw.name + } + } +} + +func TestWrapMiddlewares(t *testing.T) { + + a := &testMiddleware{"A"} + b := &testMiddleware{"B"} + c := &testMiddleware{"C"} + + app := func(w ResponseWriter, r *Request) { + // do nothing + } + + handlerFunc := WrapMiddlewares([]Middleware{a, b, c}, app) + + // fake request + r := &Request{ + nil, + nil, + map[string]interface{}{}, + } + + handlerFunc(nil, r) + + before := r.Env["BEFORE"].(string) + if before != "ABC" { + t.Error("middleware executed in the wrong order, expected ABC") + } + + after := r.Env["AFTER"].(string) + if after != "CBA" { + t.Error("middleware executed in the wrong order, expected CBA") + } +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/powered_by.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/powered_by.go new file mode 100644 index 00000000..3b22ccfd --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/powered_by.go @@ -0,0 +1,29 @@ +package rest + +const xPoweredByDefault = "go-json-rest" + +// PoweredByMiddleware adds the "X-Powered-By" header to the HTTP response. +type PoweredByMiddleware struct { + + // If specified, used as the value for the "X-Powered-By" response header. + // Defaults to "go-json-rest". + XPoweredBy string +} + +// MiddlewareFunc makes PoweredByMiddleware implement the Middleware interface. +func (mw *PoweredByMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + + poweredBy := xPoweredByDefault + if mw.XPoweredBy != "" { + poweredBy = mw.XPoweredBy + } + + return func(w ResponseWriter, r *Request) { + + w.Header().Add("X-Powered-By", poweredBy) + + // call the handler + h(w, r) + + } +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/powered_by_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/powered_by_test.go new file mode 100644 index 00000000..9d1ca34a --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/powered_by_test.go @@ -0,0 +1,30 @@ +package rest + +import ( + "github.com/ant0ine/go-json-rest/rest/test" + "testing" +) + +func TestPoweredByMiddleware(t *testing.T) { + + api := NewApi() + + // the middleware to test + api.Use(&PoweredByMiddleware{ + XPoweredBy: "test", + }) + + // a simple app + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + w.WriteJson(map[string]string{"Id": "123"}) + })) + + // wrap all + handler := api.MakeHandler() + + req := test.MakeSimpleRequest("GET", "http://localhost/", nil) + recorded := test.RunRequest(t, handler, req) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() + recorded.HeaderIs("X-Powered-By", "test") +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/recorder.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/recorder.go new file mode 100644 index 00000000..20502e94 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/recorder.go @@ -0,0 +1,100 @@ +package rest + +import ( + "bufio" + "net" + "net/http" +) + +// RecorderMiddleware keeps a record of the HTTP status code of the response, +// and the number of bytes written. +// The result is available to the wrapping handlers as request.Env["STATUS_CODE"].(int), +// and as request.Env["BYTES_WRITTEN"].(int64) +type RecorderMiddleware struct{} + +// MiddlewareFunc makes RecorderMiddleware implement the Middleware interface. +func (mw *RecorderMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + return func(w ResponseWriter, r *Request) { + + writer := &recorderResponseWriter{w, 0, false, 0} + + // call the handler + h(writer, r) + + r.Env["STATUS_CODE"] = writer.statusCode + r.Env["BYTES_WRITTEN"] = writer.bytesWritten + } +} + +// Private responseWriter intantiated by the recorder middleware. +// It keeps a record of the HTTP status code of the response. +// It implements the following interfaces: +// ResponseWriter +// http.ResponseWriter +// http.Flusher +// http.CloseNotifier +// http.Hijacker +type recorderResponseWriter struct { + ResponseWriter + statusCode int + wroteHeader bool + bytesWritten int64 +} + +// Record the status code. +func (w *recorderResponseWriter) WriteHeader(code int) { + w.ResponseWriter.WriteHeader(code) + if w.wroteHeader { + return + } + w.statusCode = code + w.wroteHeader = true +} + +// Make sure the local Write is called. +func (w *recorderResponseWriter) WriteJson(v interface{}) error { + b, err := w.EncodeJson(v) + if err != nil { + return err + } + _, err = w.Write(b) + if err != nil { + return err + } + return nil +} + +// Make sure the local WriteHeader is called, and call the parent Flush. +// Provided in order to implement the http.Flusher interface. +func (w *recorderResponseWriter) Flush() { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + flusher := w.ResponseWriter.(http.Flusher) + flusher.Flush() +} + +// Call the parent CloseNotify. +// Provided in order to implement the http.CloseNotifier interface. +func (w *recorderResponseWriter) CloseNotify() <-chan bool { + notifier := w.ResponseWriter.(http.CloseNotifier) + return notifier.CloseNotify() +} + +// Provided in order to implement the http.Hijacker interface. +func (w *recorderResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker := w.ResponseWriter.(http.Hijacker) + return hijacker.Hijack() +} + +// Make sure the local WriteHeader is called, and call the parent Write. +// Provided in order to implement the http.ResponseWriter interface. +func (w *recorderResponseWriter) Write(b []byte) (int, error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + writer := w.ResponseWriter.(http.ResponseWriter) + written, err := writer.Write(b) + w.bytesWritten += int64(written) + return written, err +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/recorder_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/recorder_test.go new file mode 100644 index 00000000..c3dabd27 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/recorder_test.go @@ -0,0 +1,134 @@ +package rest + +import ( + "testing" + + "github.com/ant0ine/go-json-rest/rest/test" +) + +func TestRecorderMiddleware(t *testing.T) { + + api := NewApi() + + // a middleware carrying the Env tests + api.Use(MiddlewareSimple(func(handler HandlerFunc) HandlerFunc { + return func(w ResponseWriter, r *Request) { + + handler(w, r) + + if r.Env["STATUS_CODE"] == nil { + t.Error("STATUS_CODE is nil") + } + statusCode := r.Env["STATUS_CODE"].(int) + if statusCode != 200 { + t.Errorf("STATUS_CODE = 200 expected, got %d", statusCode) + } + + if r.Env["BYTES_WRITTEN"] == nil { + t.Error("BYTES_WRITTEN is nil") + } + bytesWritten := r.Env["BYTES_WRITTEN"].(int64) + // '{"Id":"123"}' => 12 chars + if bytesWritten != 12 { + t.Errorf("BYTES_WRITTEN 12 expected, got %d", bytesWritten) + } + } + })) + + // the middleware to test + api.Use(&RecorderMiddleware{}) + + // a simple app + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + w.WriteJson(map[string]string{"Id": "123"}) + })) + + // wrap all + handler := api.MakeHandler() + + req := test.MakeSimpleRequest("GET", "http://localhost/", nil) + recorded := test.RunRequest(t, handler, req) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() +} + +// See how many bytes are written when gzipping +func TestRecorderAndGzipMiddleware(t *testing.T) { + + api := NewApi() + + // a middleware carrying the Env tests + api.Use(MiddlewareSimple(func(handler HandlerFunc) HandlerFunc { + return func(w ResponseWriter, r *Request) { + + handler(w, r) + + if r.Env["BYTES_WRITTEN"] == nil { + t.Error("BYTES_WRITTEN is nil") + } + bytesWritten := r.Env["BYTES_WRITTEN"].(int64) + // Yes, the gzipped version actually takes more space. + if bytesWritten != 41 { + t.Errorf("BYTES_WRITTEN 41 expected, got %d", bytesWritten) + } + } + })) + + // the middlewares to test + api.Use(&RecorderMiddleware{}) + api.Use(&GzipMiddleware{}) + + // a simple app + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + w.WriteJson(map[string]string{"Id": "123"}) + })) + + // wrap all + handler := api.MakeHandler() + + req := test.MakeSimpleRequest("GET", "http://localhost/", nil) + // "Accept-Encoding", "gzip" is set by test.MakeSimpleRequest + recorded := test.RunRequest(t, handler, req) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() +} + +//Underlying net/http only allows you to set the status code once +func TestRecorderMiddlewareReportsSameStatusCodeAsResponse(t *testing.T) { + api := NewApi() + const firstCode = 400 + const secondCode = 500 + + // a middleware carrying the Env tests + api.Use(MiddlewareSimple(func(handler HandlerFunc) HandlerFunc { + return func(w ResponseWriter, r *Request) { + + handler(w, r) + + if r.Env["STATUS_CODE"] == nil { + t.Error("STATUS_CODE is nil") + } + statusCode := r.Env["STATUS_CODE"].(int) + if statusCode != firstCode { + t.Errorf("STATUS_CODE = %d expected, got %d", firstCode, statusCode) + } + } + })) + + // the middleware to test + api.Use(&RecorderMiddleware{}) + + // a simple app + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + w.WriteHeader(firstCode) + w.WriteHeader(secondCode) + })) + + // wrap all + handler := api.MakeHandler() + + req := test.MakeSimpleRequest("GET", "http://localhost/", nil) + recorded := test.RunRequest(t, handler, req) + recorded.CodeIs(firstCode) + recorded.ContentTypeIsJson() +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/recover.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/recover.go new file mode 100644 index 00000000..99f15158 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/recover.go @@ -0,0 +1,74 @@ +package rest + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "os" + "runtime/debug" +) + +// RecoverMiddleware catches the panic errors that occur in the wrapped HandleFunc, +// and convert them to 500 responses. +type RecoverMiddleware struct { + + // Custom logger used for logging the panic errors, + // optional, defaults to log.New(os.Stderr, "", 0) + Logger *log.Logger + + // If true, the log records will be printed as JSON. Convenient for log parsing. + EnableLogAsJson bool + + // If true, when a "panic" happens, the error string and the stack trace will be + // printed in the 500 response body. + EnableResponseStackTrace bool +} + +// MiddlewareFunc makes RecoverMiddleware implement the Middleware interface. +func (mw *RecoverMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + + // set the default Logger + if mw.Logger == nil { + mw.Logger = log.New(os.Stderr, "", 0) + } + + return func(w ResponseWriter, r *Request) { + + // catch user code's panic, and convert to http response + defer func() { + if reco := recover(); reco != nil { + trace := debug.Stack() + + // log the trace + message := fmt.Sprintf("%s\n%s", reco, trace) + mw.logError(message) + + // write error response + if mw.EnableResponseStackTrace { + Error(w, message, http.StatusInternalServerError) + } else { + Error(w, "Internal Server Error", http.StatusInternalServerError) + } + } + }() + + // call the handler + h(w, r) + } +} + +func (mw *RecoverMiddleware) logError(message string) { + if mw.EnableLogAsJson { + record := map[string]string{ + "error": message, + } + b, err := json.Marshal(&record) + if err != nil { + panic(err) + } + mw.Logger.Printf("%s", b) + } else { + mw.Logger.Print(message) + } +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/recover_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/recover_test.go new file mode 100644 index 00000000..953ae5a6 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/recover_test.go @@ -0,0 +1,43 @@ +package rest + +import ( + "github.com/ant0ine/go-json-rest/rest/test" + "io/ioutil" + "log" + "testing" +) + +func TestRecoverMiddleware(t *testing.T) { + + api := NewApi() + + // the middleware to test + api.Use(&RecoverMiddleware{ + Logger: log.New(ioutil.Discard, "", 0), + EnableLogAsJson: false, + EnableResponseStackTrace: true, + }) + + // a simple app that fails + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + panic("test") + })) + + // wrap all + handler := api.MakeHandler() + + req := test.MakeSimpleRequest("GET", "http://localhost/", nil) + recorded := test.RunRequest(t, handler, req) + recorded.CodeIs(500) + recorded.ContentTypeIsJson() + + // payload + payload := map[string]string{} + err := recorded.DecodeJsonPayload(&payload) + if err != nil { + t.Fatal(err) + } + if payload["Error"] == "" { + t.Errorf("Expected an error message, got: %v", payload) + } +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/request.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/request.go new file mode 100644 index 00000000..c4eb3814 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/request.go @@ -0,0 +1,148 @@ +package rest + +import ( + "encoding/json" + "errors" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +var ( + // ErrJsonPayloadEmpty is returned when the JSON payload is empty. + ErrJsonPayloadEmpty = errors.New("JSON payload is empty") +) + +// Request inherits from http.Request, and provides additional methods. +type Request struct { + *http.Request + + // Map of parameters that have been matched in the URL Path. + PathParams map[string]string + + // Environment used by middlewares to communicate. + Env map[string]interface{} +} + +// PathParam provides a convenient access to the PathParams map. +func (r *Request) PathParam(name string) string { + return r.PathParams[name] +} + +// DecodeJsonPayload reads the request body and decodes the JSON using json.Unmarshal. +func (r *Request) DecodeJsonPayload(v interface{}) error { + content, err := ioutil.ReadAll(r.Body) + r.Body.Close() + if err != nil { + return err + } + if len(content) == 0 { + return ErrJsonPayloadEmpty + } + err = json.Unmarshal(content, v) + if err != nil { + return err + } + return nil +} + +// BaseUrl returns a new URL object with the Host and Scheme taken from the request. +// (without the trailing slash in the host) +func (r *Request) BaseUrl() *url.URL { + scheme := r.URL.Scheme + if scheme == "" { + scheme = "http" + } + + // HTTP sometimes gives the default scheme as HTTP even when used with TLS + // Check if TLS is not nil and given back https scheme + if scheme == "http" && r.TLS != nil { + scheme = "https" + } + + host := r.Host + if len(host) > 0 && host[len(host)-1] == '/' { + host = host[:len(host)-1] + } + + return &url.URL{ + Scheme: scheme, + Host: host, + } +} + +// UrlFor returns the URL object from UriBase with the Path set to path, and the query +// string built with queryParams. +func (r *Request) UrlFor(path string, queryParams map[string][]string) *url.URL { + baseUrl := r.BaseUrl() + baseUrl.Path = path + if queryParams != nil { + query := url.Values{} + for k, v := range queryParams { + for _, vv := range v { + query.Add(k, vv) + } + } + baseUrl.RawQuery = query.Encode() + } + return baseUrl +} + +// CorsInfo contains the CORS request info derived from a rest.Request. +type CorsInfo struct { + IsCors bool + IsPreflight bool + Origin string + OriginUrl *url.URL + + // The header value is converted to uppercase to avoid common mistakes. + AccessControlRequestMethod string + + // The header values are normalized with http.CanonicalHeaderKey. + AccessControlRequestHeaders []string +} + +// GetCorsInfo derives CorsInfo from Request. +func (r *Request) GetCorsInfo() *CorsInfo { + + origin := r.Header.Get("Origin") + + var originUrl *url.URL + var isCors bool + + if origin == "" { + isCors = false + } else if origin == "null" { + isCors = true + } else { + var err error + originUrl, err = url.ParseRequestURI(origin) + isCors = err == nil && r.Host != originUrl.Host + } + + reqMethod := r.Header.Get("Access-Control-Request-Method") + + reqHeaders := []string{} + rawReqHeaders := r.Header[http.CanonicalHeaderKey("Access-Control-Request-Headers")] + for _, rawReqHeader := range rawReqHeaders { + if len(rawReqHeader) == 0 { + continue + } + // net/http does not handle comma delimited headers for us + for _, reqHeader := range strings.Split(rawReqHeader, ",") { + reqHeaders = append(reqHeaders, http.CanonicalHeaderKey(strings.TrimSpace(reqHeader))) + } + } + + isPreflight := isCors && r.Method == "OPTIONS" && reqMethod != "" + + return &CorsInfo{ + IsCors: isCors, + IsPreflight: isPreflight, + Origin: origin, + OriginUrl: originUrl, + AccessControlRequestMethod: strings.ToUpper(reqMethod), + AccessControlRequestHeaders: reqHeaders, + } +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/request_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/request_test.go new file mode 100644 index 00000000..1467c925 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/request_test.go @@ -0,0 +1,213 @@ +package rest + +import ( + "crypto/tls" + "io" + "net/http" + "strings" + "testing" +) + +func defaultRequest(method string, urlStr string, body io.Reader, t *testing.T) *Request { + origReq, err := http.NewRequest(method, urlStr, body) + if err != nil { + t.Fatal(err) + } + return &Request{ + origReq, + nil, + map[string]interface{}{}, + } +} + +func TestRequestEmptyJson(t *testing.T) { + req := defaultRequest("POST", "http://localhost", strings.NewReader(""), t) + err := req.DecodeJsonPayload(nil) + if err != ErrJsonPayloadEmpty { + t.Error("Expected ErrJsonPayloadEmpty") + } +} + +func TestRequestBaseUrl(t *testing.T) { + req := defaultRequest("GET", "http://localhost", nil, t) + urlBase := req.BaseUrl() + urlString := urlBase.String() + + expected := "http://localhost" + if urlString != expected { + t.Error(expected + " was the expected URL base, but instead got " + urlString) + } +} + +func TestRequestUrlScheme(t *testing.T) { + req := defaultRequest("GET", "https://localhost", nil, t) + urlBase := req.BaseUrl() + + expected := "https" + if urlBase.Scheme != expected { + t.Error(expected + " was the expected scheme, but instead got " + urlBase.Scheme) + } +} + +func TestRequestUrlSchemeHTTP(t *testing.T) { + req := defaultRequest("GET", "http://localhost", nil, t) + urlBase := req.BaseUrl() + + expected := "http" + if urlBase.Scheme != expected { + t.Error(expected + " was the expected scheme, but instead got " + urlBase.Scheme) + } +} + +func TestRequestUrlSchemeHTTP2TLS(t *testing.T) { + req := defaultRequest("GET", "http://localhost", nil, t) + req.Proto = "HTTP" + req.ProtoMajor = 2 + req.ProtoMinor = 0 + req.TLS = &tls.ConnectionState{} + urlBase := req.BaseUrl() + + expected := "https" + if urlBase.Scheme != expected { + t.Error(expected + " was the expected scheme, but instead got " + urlBase.Scheme) + } +} + +func TestRequestUrlFor(t *testing.T) { + req := defaultRequest("GET", "http://localhost", nil, t) + + path := "/foo/bar" + + urlObj := req.UrlFor(path, nil) + if urlObj.Path != path { + t.Error(path + " was expected to be the path, but got " + urlObj.Path) + } + + expected := "http://localhost/foo/bar" + if urlObj.String() != expected { + t.Error(expected + " was expected, but the returned URL was " + urlObj.String()) + } +} + +func TestRequestUrlForQueryString(t *testing.T) { + req := defaultRequest("GET", "http://localhost", nil, t) + + params := map[string][]string{ + "id": {"foo", "bar"}, + } + + urlObj := req.UrlFor("/foo/bar", params) + + expected := "http://localhost/foo/bar?id=foo&id=bar" + if urlObj.String() != expected { + t.Error(expected + " was expected, but the returned URL was " + urlObj.String()) + } +} + +func TestCorsInfoSimpleCors(t *testing.T) { + req := defaultRequest("GET", "http://localhost", nil, t) + req.Request.Header.Set("Origin", "http://another.host") + + corsInfo := req.GetCorsInfo() + if corsInfo == nil { + t.Error("Expected non nil CorsInfo") + } + if corsInfo.IsCors == false { + t.Error("This is a CORS request") + } + if corsInfo.IsPreflight == true { + t.Error("This is not a Preflight request") + } +} + +func TestCorsInfoNullOrigin(t *testing.T) { + req := defaultRequest("GET", "http://localhost", nil, t) + req.Request.Header.Set("Origin", "null") + + corsInfo := req.GetCorsInfo() + if corsInfo == nil { + t.Error("Expected non nil CorsInfo") + } + if corsInfo.IsCors == false { + t.Error("This is a CORS request") + } + if corsInfo.IsPreflight == true { + t.Error("This is not a Preflight request") + } + if corsInfo.OriginUrl != nil { + t.Error("OriginUrl cannot be set") + } +} + +func TestCorsInfoPreflightCors(t *testing.T) { + req := defaultRequest("OPTIONS", "http://localhost", nil, t) + req.Request.Header.Set("Origin", "http://another.host") + + corsInfo := req.GetCorsInfo() + if corsInfo == nil { + t.Error("Expected non nil CorsInfo") + } + if corsInfo.IsCors == false { + t.Error("This is a CORS request") + } + if corsInfo.IsPreflight == true { + t.Error("This is NOT a Preflight request") + } + + // Preflight must have the Access-Control-Request-Method header + req.Request.Header.Set("Access-Control-Request-Method", "PUT") + corsInfo = req.GetCorsInfo() + if corsInfo == nil { + t.Error("Expected non nil CorsInfo") + } + if corsInfo.IsCors == false { + t.Error("This is a CORS request") + } + if corsInfo.IsPreflight == false { + t.Error("This is a Preflight request") + } + if corsInfo.Origin != "http://another.host" { + t.Error("Origin must be identical to the header value") + } + if corsInfo.OriginUrl == nil { + t.Error("OriginUrl must be set") + } +} + +func TestCorsInfoEmptyAccessControlRequestHeaders(t *testing.T) { + req := defaultRequest("OPTIONS", "http://localhost", nil, t) + req.Request.Header.Set("Origin", "http://another.host") + + // make it a preflight request + req.Request.Header.Set("Access-Control-Request-Method", "PUT") + + // WebKit based browsers may send `Access-Control-Request-Headers:` with + // no value, in which case, the header will be present in requests + // Header map, but its value is an empty string. + req.Request.Header.Set("Access-Control-Request-Headers", "") + corsInfo := req.GetCorsInfo() + if corsInfo == nil { + t.Error("Expected non nil CorsInfo") + } + if corsInfo.IsCors == false { + t.Error("This is a CORS request") + } + if len(corsInfo.AccessControlRequestHeaders) > 0 { + t.Error("Access-Control-Request-Headers should have been removed") + } + + req.Request.Header.Set("Access-Control-Request-Headers", "") + corsInfo = req.GetCorsInfo() + if corsInfo == nil { + t.Error("Expected non nil CorsInfo") + } + if corsInfo.IsCors == false { + t.Error("This is a CORS request") + } + if corsInfo.IsPreflight == false { + t.Error("This is a Preflight request") + } + if len(corsInfo.AccessControlRequestHeaders) > 0 { + t.Error("Empty Access-Control-Request-Headers header should have been removed") + } +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/response.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/response.go new file mode 100644 index 00000000..52529f16 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/response.go @@ -0,0 +1,127 @@ +package rest + +import ( + "bufio" + "encoding/json" + "net" + "net/http" +) + +// A ResponseWriter interface dedicated to JSON HTTP response. +// Note, the responseWriter object instantiated by the framework also implements many other interfaces +// accessible by type assertion: http.ResponseWriter, http.Flusher, http.CloseNotifier, http.Hijacker. +type ResponseWriter interface { + + // Identical to the http.ResponseWriter interface + Header() http.Header + + // Use EncodeJson to generate the payload, write the headers with http.StatusOK if + // they are not already written, then write the payload. + // The Content-Type header is set to "application/json", unless already specified. + WriteJson(v interface{}) error + + // Encode the data structure to JSON, mainly used to wrap ResponseWriter in + // middlewares. + EncodeJson(v interface{}) ([]byte, error) + + // Similar to the http.ResponseWriter interface, with additional JSON related + // headers set. + WriteHeader(int) +} + +// This allows to customize the field name used in the error response payload. +// It defaults to "Error" for compatibility reason, but can be changed before starting the server. +// eg: rest.ErrorFieldName = "errorMessage" +var ErrorFieldName = "Error" + +// Error produces an error response in JSON with the following structure, '{"Error":"My error message"}' +// The standard plain text net/http Error helper can still be called like this: +// http.Error(w, "error message", code) +func Error(w ResponseWriter, error string, code int) { + w.WriteHeader(code) + err := w.WriteJson(map[string]string{ErrorFieldName: error}) + if err != nil { + panic(err) + } +} + +// NotFound produces a 404 response with the following JSON, '{"Error":"Resource not found"}' +// The standard plain text net/http NotFound helper can still be called like this: +// http.NotFound(w, r.Request) +func NotFound(w ResponseWriter, r *Request) { + Error(w, "Resource not found", http.StatusNotFound) +} + +// Private responseWriter intantiated by the resource handler. +// It implements the following interfaces: +// ResponseWriter +// http.ResponseWriter +// http.Flusher +// http.CloseNotifier +// http.Hijacker +type responseWriter struct { + http.ResponseWriter + wroteHeader bool +} + +func (w *responseWriter) WriteHeader(code int) { + if w.Header().Get("Content-Type") == "" { + // Per spec, UTF-8 is the default, and the charset parameter should not + // be necessary. But some clients (eg: Chrome) think otherwise. + // Since json.Marshal produces UTF-8, setting the charset parameter is a + // safe option. + w.Header().Set("Content-Type", "application/json; charset=utf-8") + } + w.ResponseWriter.WriteHeader(code) + w.wroteHeader = true +} + +func (w *responseWriter) EncodeJson(v interface{}) ([]byte, error) { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + return b, nil +} + +// Encode the object in JSON and call Write. +func (w *responseWriter) WriteJson(v interface{}) error { + b, err := w.EncodeJson(v) + if err != nil { + return err + } + _, err = w.Write(b) + if err != nil { + return err + } + return nil +} + +// Provided in order to implement the http.ResponseWriter interface. +func (w *responseWriter) Write(b []byte) (int, error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + return w.ResponseWriter.Write(b) +} + +// Provided in order to implement the http.Flusher interface. +func (w *responseWriter) Flush() { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + flusher := w.ResponseWriter.(http.Flusher) + flusher.Flush() +} + +// Provided in order to implement the http.CloseNotifier interface. +func (w *responseWriter) CloseNotify() <-chan bool { + notifier := w.ResponseWriter.(http.CloseNotifier) + return notifier.CloseNotify() +} + +// Provided in order to implement the http.Hijacker interface. +func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker := w.ResponseWriter.(http.Hijacker) + return hijacker.Hijack() +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/response_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/response_test.go new file mode 100644 index 00000000..ba13f38a --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/response_test.go @@ -0,0 +1,68 @@ +package rest + +import ( + "testing" + + "github.com/ant0ine/go-json-rest/rest/test" +) + +func TestResponseNotIndent(t *testing.T) { + + writer := responseWriter{ + nil, + false, + } + + got, err := writer.EncodeJson(map[string]bool{"test": true}) + if err != nil { + t.Error(err.Error()) + } + gotStr := string(got) + expected := "{\"test\":true}" + if gotStr != expected { + t.Error(expected + " was the expected, but instead got " + gotStr) + } +} + +// The following tests could instantiate only the reponseWriter, +// but using the Api object allows to use the rest/test utilities, +// and make the tests easier to write. + +func TestWriteJsonResponse(t *testing.T) { + + api := NewApi() + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + w.WriteJson(map[string]string{"Id": "123"}) + })) + + recorded := test.RunRequest(t, api.MakeHandler(), test.MakeSimpleRequest("GET", "http://localhost/", nil)) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() + recorded.BodyIs("{\"Id\":\"123\"}") +} + +func TestErrorResponse(t *testing.T) { + + api := NewApi() + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + Error(w, "test", 500) + })) + + recorded := test.RunRequest(t, api.MakeHandler(), test.MakeSimpleRequest("GET", "http://localhost/", nil)) + recorded.CodeIs(500) + recorded.ContentTypeIsJson() + recorded.BodyIs("{\"Error\":\"test\"}") +} + +func TestNotFoundResponse(t *testing.T) { + + api := NewApi() + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + NotFound(w, r) + })) + + recorded := test.RunRequest(t, api.MakeHandler(), test.MakeSimpleRequest("GET", "http://localhost/", nil)) + recorded.CodeIs(404) + recorded.ContentTypeIsJson() + recorded.BodyIs("{\"Error\":\"Resource not found\"}") +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/route.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/route.go new file mode 100644 index 00000000..efb94a75 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/route.go @@ -0,0 +1,107 @@ +package rest + +import ( + "strings" +) + +// Route defines a route as consumed by the router. It can be instantiated directly, or using one +// of the shortcut methods: rest.Get, rest.Post, rest.Put, rest.Patch and rest.Delete. +type Route struct { + + // Any HTTP method. It will be used as uppercase to avoid common mistakes. + HttpMethod string + + // A string like "/resource/:id.json". + // Placeholders supported are: + // :paramName that matches any char to the first '/' or '.' + // #paramName that matches any char to the first '/' + // *paramName that matches everything to the end of the string + // (placeholder names must be unique per PathExp) + PathExp string + + // Code that will be executed when this route is taken. + Func HandlerFunc +} + +// MakePath generates the path corresponding to this Route and the provided path parameters. +// This is used for reverse route resolution. +func (route *Route) MakePath(pathParams map[string]string) string { + path := route.PathExp + for paramName, paramValue := range pathParams { + paramPlaceholder := ":" + paramName + relaxedPlaceholder := "#" + paramName + splatPlaceholder := "*" + paramName + r := strings.NewReplacer(paramPlaceholder, paramValue, splatPlaceholder, paramValue, relaxedPlaceholder, paramValue) + path = r.Replace(path) + } + return path +} + +// Head is a shortcut method that instantiates a HEAD route. See the Route object the parameters definitions. +// Equivalent to &Route{"HEAD", pathExp, handlerFunc} +func Head(pathExp string, handlerFunc HandlerFunc) *Route { + return &Route{ + HttpMethod: "HEAD", + PathExp: pathExp, + Func: handlerFunc, + } +} + +// Get is a shortcut method that instantiates a GET route. See the Route object the parameters definitions. +// Equivalent to &Route{"GET", pathExp, handlerFunc} +func Get(pathExp string, handlerFunc HandlerFunc) *Route { + return &Route{ + HttpMethod: "GET", + PathExp: pathExp, + Func: handlerFunc, + } +} + +// Post is a shortcut method that instantiates a POST route. See the Route object the parameters definitions. +// Equivalent to &Route{"POST", pathExp, handlerFunc} +func Post(pathExp string, handlerFunc HandlerFunc) *Route { + return &Route{ + HttpMethod: "POST", + PathExp: pathExp, + Func: handlerFunc, + } +} + +// Put is a shortcut method that instantiates a PUT route. See the Route object the parameters definitions. +// Equivalent to &Route{"PUT", pathExp, handlerFunc} +func Put(pathExp string, handlerFunc HandlerFunc) *Route { + return &Route{ + HttpMethod: "PUT", + PathExp: pathExp, + Func: handlerFunc, + } +} + +// Patch is a shortcut method that instantiates a PATCH route. See the Route object the parameters definitions. +// Equivalent to &Route{"PATCH", pathExp, handlerFunc} +func Patch(pathExp string, handlerFunc HandlerFunc) *Route { + return &Route{ + HttpMethod: "PATCH", + PathExp: pathExp, + Func: handlerFunc, + } +} + +// Delete is a shortcut method that instantiates a DELETE route. Equivalent to &Route{"DELETE", pathExp, handlerFunc} +func Delete(pathExp string, handlerFunc HandlerFunc) *Route { + return &Route{ + HttpMethod: "DELETE", + PathExp: pathExp, + Func: handlerFunc, + } +} + +// Options is a shortcut method that instantiates an OPTIONS route. See the Route object the parameters definitions. +// Equivalent to &Route{"OPTIONS", pathExp, handlerFunc} +func Options(pathExp string, handlerFunc HandlerFunc) *Route { + return &Route{ + HttpMethod: "OPTIONS", + PathExp: pathExp, + Func: handlerFunc, + } +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/route_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/route_test.go new file mode 100644 index 00000000..5ea63b8b --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/route_test.go @@ -0,0 +1,88 @@ +package rest + +import ( + "testing" +) + +func TestReverseRouteResolution(t *testing.T) { + + noParam := &Route{"GET", "/", nil} + got := noParam.MakePath(nil) + expected := "/" + if got != expected { + t.Errorf("expected %s, got %s", expected, got) + } + + twoParams := &Route{"GET", "/:id.:format", nil} + got = twoParams.MakePath( + map[string]string{ + "id": "123", + "format": "json", + }, + ) + expected = "/123.json" + if got != expected { + t.Errorf("expected %s, got %s", expected, got) + } + + splatParam := &Route{"GET", "/:id.*format", nil} + got = splatParam.MakePath( + map[string]string{ + "id": "123", + "format": "tar.gz", + }, + ) + expected = "/123.tar.gz" + if got != expected { + t.Errorf("expected %s, got %s", expected, got) + } + + relaxedParam := &Route{"GET", "/#file", nil} + got = relaxedParam.MakePath( + map[string]string{ + "file": "a.txt", + }, + ) + expected = "/a.txt" + if got != expected { + t.Errorf("expected %s, got %s", expected, got) + } +} + +func TestShortcutMethods(t *testing.T) { + + r := Head("/", nil) + if r.HttpMethod != "HEAD" { + t.Errorf("expected HEAD, got %s", r.HttpMethod) + } + + r = Get("/", nil) + if r.HttpMethod != "GET" { + t.Errorf("expected GET, got %s", r.HttpMethod) + } + + r = Post("/", nil) + if r.HttpMethod != "POST" { + t.Errorf("expected POST, got %s", r.HttpMethod) + } + + r = Put("/", nil) + if r.HttpMethod != "PUT" { + t.Errorf("expected PUT, got %s", r.HttpMethod) + } + + r = Patch("/", nil) + if r.HttpMethod != "PATCH" { + t.Errorf("expected PATCH, got %s", r.HttpMethod) + } + + r = Delete("/", nil) + if r.HttpMethod != "DELETE" { + t.Errorf("expected DELETE, got %s", r.HttpMethod) + } + + r = Options("/", nil) + if r.HttpMethod != "OPTIONS" { + t.Errorf("expected OPTIONS, got %s", r.HttpMethod) + } +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/router.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/router.go new file mode 100644 index 00000000..f7ab7130 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/router.go @@ -0,0 +1,194 @@ +package rest + +import ( + "errors" + "github.com/ant0ine/go-json-rest/rest/trie" + "net/http" + "net/url" + "strings" +) + +type router struct { + Routes []*Route + + disableTrieCompression bool + index map[*Route]int + trie *trie.Trie +} + +// MakeRouter returns the router app. Given a set of Routes, it dispatches the request to the +// HandlerFunc of the first route that matches. The order of the Routes matters. +func MakeRouter(routes ...*Route) (App, error) { + r := &router{ + Routes: routes, + } + err := r.start() + if err != nil { + return nil, err + } + return r, nil +} + +// Handle the REST routing and run the user code. +func (rt *router) AppFunc() HandlerFunc { + return func(writer ResponseWriter, request *Request) { + + // find the route + route, params, pathMatched := rt.findRouteFromURL(request.Method, request.URL) + if route == nil { + + if pathMatched { + // no route found, but path was matched: 405 Method Not Allowed + Error(writer, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + // no route found, the path was not matched: 404 Not Found + NotFound(writer, request) + return + } + + // a route was found, set the PathParams + request.PathParams = params + + // run the user code + handler := route.Func + handler(writer, request) + } +} + +// This is run for each new request, perf is important. +func escapedPath(urlObj *url.URL) string { + // the escape method of url.URL should be public + // that would avoid this split. + parts := strings.SplitN(urlObj.RequestURI(), "?", 2) + return parts[0] +} + +var preEscape = strings.NewReplacer("*", "__SPLAT_PLACEHOLDER__", "#", "__RELAXED_PLACEHOLDER__") + +var postEscape = strings.NewReplacer("__SPLAT_PLACEHOLDER__", "*", "__RELAXED_PLACEHOLDER__", "#") + +// This is run at init time only. +func escapedPathExp(pathExp string) (string, error) { + + // PathExp validation + if pathExp == "" { + return "", errors.New("empty PathExp") + } + if pathExp[0] != '/' { + return "", errors.New("PathExp must start with /") + } + if strings.Contains(pathExp, "?") { + return "", errors.New("PathExp must not contain the query string") + } + + // Get the right escaping + // XXX a bit hacky + + pathExp = preEscape.Replace(pathExp) + + urlObj, err := url.Parse(pathExp) + if err != nil { + return "", err + } + + // get the same escaping as find requests + pathExp = urlObj.RequestURI() + + pathExp = postEscape.Replace(pathExp) + + return pathExp, nil +} + +// This validates the Routes and prepares the Trie data structure. +// It must be called once the Routes are defined and before trying to find Routes. +// The order matters, if multiple Routes match, the first defined will be used. +func (rt *router) start() error { + + rt.trie = trie.New() + rt.index = map[*Route]int{} + + for i, route := range rt.Routes { + + // work with the PathExp urlencoded. + pathExp, err := escapedPathExp(route.PathExp) + if err != nil { + return err + } + + // insert in the Trie + err = rt.trie.AddRoute( + strings.ToUpper(route.HttpMethod), // work with the HttpMethod in uppercase + pathExp, + route, + ) + if err != nil { + return err + } + + // index + rt.index[route] = i + } + + if rt.disableTrieCompression == false { + rt.trie.Compress() + } + + return nil +} + +// return the result that has the route defined the earliest +func (rt *router) ofFirstDefinedRoute(matches []*trie.Match) *trie.Match { + minIndex := -1 + var bestMatch *trie.Match + + for _, result := range matches { + route := result.Route.(*Route) + routeIndex := rt.index[route] + if minIndex == -1 || routeIndex < minIndex { + minIndex = routeIndex + bestMatch = result + } + } + + return bestMatch +} + +// Return the first matching Route and the corresponding parameters for a given URL object. +func (rt *router) findRouteFromURL(httpMethod string, urlObj *url.URL) (*Route, map[string]string, bool) { + + // lookup the routes in the Trie + matches, pathMatched := rt.trie.FindRoutesAndPathMatched( + strings.ToUpper(httpMethod), // work with the httpMethod in uppercase + escapedPath(urlObj), // work with the path urlencoded + ) + + // short cuts + if len(matches) == 0 { + // no route found + return nil, nil, pathMatched + } + + if len(matches) == 1 { + // one route found + return matches[0].Route.(*Route), matches[0].Params, pathMatched + } + + // multiple routes found, pick the first defined + result := rt.ofFirstDefinedRoute(matches) + return result.Route.(*Route), result.Params, pathMatched +} + +// Parse the url string (complete or just the path) and return the first matching Route and the corresponding parameters. +func (rt *router) findRoute(httpMethod, urlStr string) (*Route, map[string]string, bool, error) { + + // parse the url + urlObj, err := url.Parse(urlStr) + if err != nil { + return nil, nil, false, err + } + + route, params, pathMatched := rt.findRouteFromURL(httpMethod, urlObj) + return route, params, pathMatched, nil +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/router_benchmark_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/router_benchmark_test.go new file mode 100644 index 00000000..59add299 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/router_benchmark_test.go @@ -0,0 +1,143 @@ +package rest + +import ( + "fmt" + "net/url" + "regexp" + "testing" +) + +func routes() []*Route { + // simulate the routes of a real but reasonable app. + // 6 + 10 * (5 + 2) + 1 = 77 routes + routePaths := []string{ + "/", + "/signin", + "/signout", + "/profile", + "/settings", + "/upload/*file", + } + for i := 0; i < 10; i++ { + for j := 0; j < 5; j++ { + routePaths = append(routePaths, fmt.Sprintf("/resource%d/:id/property%d", i, j)) + } + routePaths = append(routePaths, fmt.Sprintf("/resource%d/:id", i)) + routePaths = append(routePaths, fmt.Sprintf("/resource%d", i)) + } + routePaths = append(routePaths, "/*") + + routes := []*Route{} + for _, path := range routePaths { + routes = append(routes, &Route{ + HttpMethod: "GET", + PathExp: path, + }) + } + return routes +} + +func requestUrls() []*url.URL { + // simulate a few requests + urlStrs := []string{ + "http://example.org/", + "http://example.org/resource9/123", + "http://example.org/resource9/123/property1", + "http://example.org/doesnotexist", + } + urlObjs := []*url.URL{} + for _, urlStr := range urlStrs { + urlObj, _ := url.Parse(urlStr) + urlObjs = append(urlObjs, urlObj) + } + return urlObjs +} + +func BenchmarkNoCompression(b *testing.B) { + + b.StopTimer() + + r := router{ + Routes: routes(), + disableTrieCompression: true, + } + r.start() + urlObjs := requestUrls() + + b.StartTimer() + + for i := 0; i < b.N; i++ { + for _, urlObj := range urlObjs { + r.findRouteFromURL("GET", urlObj) + } + } +} + +func BenchmarkCompression(b *testing.B) { + + b.StopTimer() + + r := router{ + Routes: routes(), + } + r.start() + urlObjs := requestUrls() + + b.StartTimer() + + for i := 0; i < b.N; i++ { + for _, urlObj := range urlObjs { + r.findRouteFromURL("GET", urlObj) + } + } +} + +func BenchmarkRegExpLoop(b *testing.B) { + // reference benchmark using the usual RegExps + Loop strategy + + b.StopTimer() + + routes := routes() + urlObjs := requestUrls() + + // build the route regexps + r1, err := regexp.Compile(":[^/\\.]*") + if err != nil { + panic(err) + } + r2, err := regexp.Compile("\\*.*") + if err != nil { + panic(err) + } + routeRegexps := []*regexp.Regexp{} + for _, route := range routes { + + // generate the regexp string + regStr := r2.ReplaceAllString(route.PathExp, "([^/\\.]+)") + regStr = r1.ReplaceAllString(regStr, "(.+)") + regStr = "^" + regStr + "$" + + // compile it + reg, err := regexp.Compile(regStr) + if err != nil { + panic(err) + } + + routeRegexps = append(routeRegexps, reg) + } + + b.StartTimer() + + for i := 0; i < b.N; i++ { + // do it for a few urls + for _, urlObj := range urlObjs { + // stop at the first route that matches + for index, reg := range routeRegexps { + if reg.FindAllString(urlObj.Path, 1) != nil { + _ = routes[index] + break + } + } + } + } +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/router_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/router_test.go new file mode 100644 index 00000000..6dfc5219 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/router_test.go @@ -0,0 +1,438 @@ +package rest + +import ( + "net/url" + "strings" + "testing" + + "github.com/ant0ine/go-json-rest/rest/test" +) + +func TestFindRouteAPI(t *testing.T) { + + r := router{ + Routes: []*Route{ + { + HttpMethod: "GET", + PathExp: "/", + }, + }, + } + + err := r.start() + if err != nil { + t.Fatal(err) + } + + // full url string + input := "http://example.org/" + route, params, pathMatched, err := r.findRoute("GET", input) + if err != nil { + t.Fatal(err) + } + if route.PathExp != "/" { + t.Error("Expected PathExp to be /") + } + if len(params) != 0 { + t.Error("Expected 0 param") + } + if pathMatched != true { + t.Error("Expected pathMatched to be true") + } + + // part of the url string + input = "/" + route, params, pathMatched, err = r.findRoute("GET", input) + if err != nil { + t.Fatal(err) + } + if route.PathExp != "/" { + t.Error("Expected PathExp to be /") + } + if len(params) != 0 { + t.Error("Expected 0 param") + } + if pathMatched != true { + t.Error("Expected pathMatched to be true") + } + + // url object + urlObj, err := url.Parse("http://example.org/") + if err != nil { + t.Fatal(err) + } + route, params, pathMatched = r.findRouteFromURL("GET", urlObj) + if route.PathExp != "/" { + t.Error("Expected PathExp to be /") + } + if len(params) != 0 { + t.Error("Expected 0 param") + } + if pathMatched != true { + t.Error("Expected pathMatched to be true") + } +} + +func TestNoRoute(t *testing.T) { + + r := router{ + Routes: []*Route{}, + } + + err := r.start() + if err != nil { + t.Fatal(err) + } + + input := "http://example.org/notfound" + route, params, pathMatched, err := r.findRoute("GET", input) + if err != nil { + t.Fatal(err) + } + + if route != nil { + t.Error("should not be able to find a route") + } + if params != nil { + t.Error("params must be nil too") + } + if pathMatched != false { + t.Error("Expected pathMatched to be false") + } +} + +func TestEmptyPathExp(t *testing.T) { + + r := router{ + Routes: []*Route{ + { + HttpMethod: "GET", + PathExp: "", + }, + }, + } + + err := r.start() + if err == nil || !strings.Contains(err.Error(), "empty") { + t.Error("expected the empty PathExp error") + } +} + +func TestInvalidPathExp(t *testing.T) { + + r := router{ + Routes: []*Route{ + { + HttpMethod: "GET", + PathExp: "invalid", + }, + }, + } + + err := r.start() + if err == nil || !strings.Contains(err.Error(), "/") { + t.Error("expected the / PathExp error") + } +} + +func TestUrlEncodedFind(t *testing.T) { + + r := router{ + Routes: []*Route{ + { + HttpMethod: "GET", + PathExp: "/with space", // not urlencoded + }, + }, + } + + err := r.start() + if err != nil { + t.Fatal(err) + } + + input := "http://example.org/with%20space" // urlencoded + route, _, pathMatched, err := r.findRoute("GET", input) + if err != nil { + t.Fatal(err) + } + if route.PathExp != "/with space" { + t.Error("Expected PathExp to be /with space") + } + if pathMatched != true { + t.Error("Expected pathMatched to be true") + } +} + +func TestWithQueryString(t *testing.T) { + + r := router{ + Routes: []*Route{ + { + HttpMethod: "GET", + PathExp: "/r/:id", + }, + }, + } + + err := r.start() + if err != nil { + t.Fatal(err) + } + + input := "http://example.org/r/123?arg=value" + route, params, pathMatched, err := r.findRoute("GET", input) + if err != nil { + t.Fatal(err) + } + if route == nil { + t.Fatal("Expected a match") + } + if params["id"] != "123" { + t.Errorf("expected 123, got %s", params["id"]) + } + if pathMatched != true { + t.Error("Expected pathMatched to be true") + } +} + +func TestNonUrlEncodedFind(t *testing.T) { + + r := router{ + Routes: []*Route{ + { + HttpMethod: "GET", + PathExp: "/with%20space", // urlencoded + }, + }, + } + + err := r.start() + if err != nil { + t.Fatal(err) + } + + input := "http://example.org/with space" // not urlencoded + route, _, pathMatched, err := r.findRoute("GET", input) + if err != nil { + t.Fatal(err) + } + if route.PathExp != "/with%20space" { + t.Errorf("Expected PathExp to be %s", "/with20space") + } + if pathMatched != true { + t.Error("Expected pathMatched to be true") + } +} + +func TestDuplicatedRoute(t *testing.T) { + + r := router{ + Routes: []*Route{ + { + HttpMethod: "GET", + PathExp: "/", + }, + { + HttpMethod: "GET", + PathExp: "/", + }, + }, + } + + err := r.start() + if err == nil { + t.Error("expected the duplicated route error") + } +} + +func TestSplatUrlEncoded(t *testing.T) { + + r := router{ + Routes: []*Route{ + { + HttpMethod: "GET", + PathExp: "/r/*rest", + }, + }, + } + + err := r.start() + if err != nil { + t.Fatal(err) + } + + input := "http://example.org/r/123" + route, params, pathMatched, err := r.findRoute("GET", input) + if err != nil { + t.Fatal(err) + } + if route == nil { + t.Fatal("Expected a match") + } + if params["rest"] != "123" { + t.Error("Expected rest to be 123") + } + if pathMatched != true { + t.Error("Expected pathMatched to be true") + } +} + +func TestRouteOrder(t *testing.T) { + + r := router{ + Routes: []*Route{ + { + HttpMethod: "GET", + PathExp: "/r/:id", + }, + { + HttpMethod: "GET", + PathExp: "/r/*rest", + }, + }, + } + + err := r.start() + if err != nil { + t.Fatal(err) + } + + input := "http://example.org/r/123" + route, params, pathMatched, err := r.findRoute("GET", input) + if err != nil { + t.Fatal(err) + } + if route == nil { + t.Fatal("Expected one route to be matched") + } + if route.PathExp != "/r/:id" { + t.Errorf("both match, expected the first defined, got %s", route.PathExp) + } + if params["id"] != "123" { + t.Error("Expected id to be 123") + } + if pathMatched != true { + t.Error("Expected pathMatched to be true") + } +} + +func TestRelaxedPlaceholder(t *testing.T) { + + r := router{ + Routes: []*Route{ + { + HttpMethod: "GET", + PathExp: "/r/:id", + }, + { + HttpMethod: "GET", + PathExp: "/r/#filename", + }, + }, + } + + err := r.start() + if err != nil { + t.Fatal(err) + } + + input := "http://example.org/r/a.txt" + route, params, pathMatched, err := r.findRoute("GET", input) + if err != nil { + t.Fatal(err) + } + if route == nil { + t.Fatal("Expected one route to be matched") + } + if route.PathExp != "/r/#filename" { + t.Errorf("expected the second route, got %s", route.PathExp) + } + if params["filename"] != "a.txt" { + t.Error("Expected filename to be a.txt") + } + if pathMatched != true { + t.Error("Expected pathMatched to be true") + } +} + +func TestSimpleExample(t *testing.T) { + + r := router{ + Routes: []*Route{ + { + HttpMethod: "GET", + PathExp: "/resources/:id", + }, + { + HttpMethod: "GET", + PathExp: "/resources", + }, + }, + } + + err := r.start() + if err != nil { + t.Fatal(err) + } + + input := "http://example.org/resources/123" + route, params, pathMatched, err := r.findRoute("GET", input) + if err != nil { + t.Fatal(err) + } + + if route.PathExp != "/resources/:id" { + t.Error("Expected PathExp to be /resources/:id") + } + if params["id"] != "123" { + t.Error("Expected id to be 123") + } + if pathMatched != true { + t.Error("Expected pathMatched to be true") + } +} + +func TestHttpResponseLayer(t *testing.T) { + + api := NewApi() + router, err := MakeRouter( + Get("/r/:id", func(w ResponseWriter, r *Request) { + id := r.PathParam("id") + w.WriteJson(map[string]string{"Id": id}) + }), + Post("/r/:id", func(w ResponseWriter, r *Request) { + // JSON echo + data := map[string]string{} + err := r.DecodeJsonPayload(&data) + if err != nil { + t.Fatal(err) + } + w.WriteJson(data) + }), + ) + if err != nil { + t.Fatal(err) + } + api.SetApp(router) + + handler := api.MakeHandler() + + // valid get resource + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://1.2.3.4/r/123", nil)) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() + recorded.BodyIs(`{"Id":"123"}`) + + // auto 405 on undefined route (wrong method) + recorded = test.RunRequest(t, handler, test.MakeSimpleRequest("DELETE", "http://1.2.3.4/r/123", nil)) + recorded.CodeIs(405) + recorded.ContentTypeIsJson() + recorded.BodyIs(`{"Error":"Method not allowed"}`) + + // auto 404 on undefined route (wrong path) + recorded = test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://1.2.3.4/s/123", nil)) + recorded.CodeIs(404) + recorded.ContentTypeIsJson() + recorded.BodyIs(`{"Error":"Resource not found"}`) +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/status.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/status.go new file mode 100644 index 00000000..6b6b5d11 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/status.go @@ -0,0 +1,129 @@ +package rest + +import ( + "fmt" + "log" + "os" + "sync" + "time" +) + +// StatusMiddleware keeps track of various stats about the processed requests. +// It depends on request.Env["STATUS_CODE"] and request.Env["ELAPSED_TIME"], +// recorderMiddleware and timerMiddleware must be in the wrapped middlewares. +type StatusMiddleware struct { + lock sync.RWMutex + start time.Time + pid int + responseCounts map[string]int + totalResponseTime time.Time +} + +// MiddlewareFunc makes StatusMiddleware implement the Middleware interface. +func (mw *StatusMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + + mw.start = time.Now() + mw.pid = os.Getpid() + mw.responseCounts = map[string]int{} + mw.totalResponseTime = time.Time{} + + return func(w ResponseWriter, r *Request) { + + // call the handler + h(w, r) + + if r.Env["STATUS_CODE"] == nil { + log.Fatal("StatusMiddleware: Env[\"STATUS_CODE\"] is nil, " + + "RecorderMiddleware may not be in the wrapped Middlewares.") + } + statusCode := r.Env["STATUS_CODE"].(int) + + if r.Env["ELAPSED_TIME"] == nil { + log.Fatal("StatusMiddleware: Env[\"ELAPSED_TIME\"] is nil, " + + "TimerMiddleware may not be in the wrapped Middlewares.") + } + responseTime := r.Env["ELAPSED_TIME"].(*time.Duration) + + mw.lock.Lock() + mw.responseCounts[fmt.Sprintf("%d", statusCode)]++ + mw.totalResponseTime = mw.totalResponseTime.Add(*responseTime) + mw.lock.Unlock() + } +} + +// Status contains stats and status information. It is returned by GetStatus. +// These information can be made available as an API endpoint, see the "status" +// example to install the following status route. +// GET /.status returns something like: +// +// { +// "Pid": 21732, +// "UpTime": "1m15.926272s", +// "UpTimeSec": 75.926272, +// "Time": "2013-03-04 08:00:27.152986 +0000 UTC", +// "TimeUnix": 1362384027, +// "StatusCodeCount": { +// "200": 53, +// "404": 11 +// }, +// "TotalCount": 64, +// "TotalResponseTime": "16.777ms", +// "TotalResponseTimeSec": 0.016777, +// "AverageResponseTime": "262.14us", +// "AverageResponseTimeSec": 0.00026214 +// } +type Status struct { + Pid int + UpTime string + UpTimeSec float64 + Time string + TimeUnix int64 + StatusCodeCount map[string]int + TotalCount int + TotalResponseTime string + TotalResponseTimeSec float64 + AverageResponseTime string + AverageResponseTimeSec float64 +} + +// GetStatus computes and returns a Status object based on the request informations accumulated +// since the start of the process. +func (mw *StatusMiddleware) GetStatus() *Status { + + mw.lock.RLock() + + now := time.Now() + + uptime := now.Sub(mw.start) + + totalCount := 0 + for _, count := range mw.responseCounts { + totalCount += count + } + + totalResponseTime := mw.totalResponseTime.Sub(time.Time{}) + + averageResponseTime := time.Duration(0) + if totalCount > 0 { + avgNs := int64(totalResponseTime) / int64(totalCount) + averageResponseTime = time.Duration(avgNs) + } + + status := &Status{ + Pid: mw.pid, + UpTime: uptime.String(), + UpTimeSec: uptime.Seconds(), + Time: now.String(), + TimeUnix: now.Unix(), + StatusCodeCount: mw.responseCounts, + TotalCount: totalCount, + TotalResponseTime: totalResponseTime.String(), + TotalResponseTimeSec: totalResponseTime.Seconds(), + AverageResponseTime: averageResponseTime.String(), + AverageResponseTimeSec: averageResponseTime.Seconds(), + } + + mw.lock.RUnlock() + + return status +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/status_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/status_test.go new file mode 100644 index 00000000..c2b93c4a --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/status_test.go @@ -0,0 +1,54 @@ +package rest + +import ( + "github.com/ant0ine/go-json-rest/rest/test" + "testing" +) + +func TestStatusMiddleware(t *testing.T) { + + api := NewApi() + + // the middlewares + status := &StatusMiddleware{} + api.Use(status) + api.Use(&TimerMiddleware{}) + api.Use(&RecorderMiddleware{}) + + // an app that return the Status + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + w.WriteJson(status.GetStatus()) + })) + + // wrap all + handler := api.MakeHandler() + + // one request + recorded := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/1", nil)) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() + + // another request + recorded = test.RunRequest(t, handler, test.MakeSimpleRequest("GET", "http://localhost/2", nil)) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() + + // payload + payload := map[string]interface{}{} + err := recorded.DecodeJsonPayload(&payload) + if err != nil { + t.Fatal(err) + } + + if payload["Pid"] == nil { + t.Error("Expected a non nil Pid") + } + + if payload["TotalCount"].(float64) != 1 { + t.Errorf("TotalCount 1 Expected, got: %f", payload["TotalCount"].(float64)) + } + + if payload["StatusCodeCount"].(map[string]interface{})["200"].(float64) != 1 { + t.Errorf("StatusCodeCount 200 1 Expected, got: %f", payload["StatusCodeCount"].(map[string]interface{})["200"].(float64)) + } +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/test/doc.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/test/doc.go new file mode 100644 index 00000000..f58d50eb --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/test/doc.go @@ -0,0 +1,33 @@ +// Utility functions to help writing tests for a Go-Json-Rest app +// +// Go comes with net/http/httptest to help writing test for an http +// server. When this http server implements a JSON REST API, some basic +// checks end up to be always the same. This test package tries to save +// some typing by providing helpers for this particular use case. +// +// package main +// +// import ( +// "github.com/ant0ine/go-json-rest/rest" +// "github.com/ant0ine/go-json-rest/rest/test" +// "testing" +// ) +// +// func TestSimpleRequest(t *testing.T) { +// api := rest.NewApi() +// api.Use(rest.DefaultDevStack...) +// router, err := rest.MakeRouter( +// rest.Get("/r", func(w rest.ResponseWriter, r *rest.Request) { +// w.WriteJson(map[string]string{"Id": "123"}) +// }), +// ) +// if err != nil { +// log.Fatal(err) +// } +// api.SetApp(router) +// recorded := test.RunRequest(t, api.MakeHandler(), +// test.MakeSimpleRequest("GET", "http://1.2.3.4/r", nil)) +// recorded.CodeIs(200) +// recorded.ContentTypeIsJson() +// } +package test diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/test/util.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/test/util.go new file mode 100644 index 00000000..9f1b77ae --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/test/util.go @@ -0,0 +1,136 @@ +package test + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "mime" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +// MakeSimpleRequest returns a http.Request. The returned request object can be +// further prepared by adding headers and query string parmaters, for instance. +func MakeSimpleRequest(method string, urlStr string, payload interface{}) *http.Request { + var s string + + if payload != nil { + b, err := json.Marshal(payload) + if err != nil { + panic(err) + } + s = fmt.Sprintf("%s", b) + } + + r, err := http.NewRequest(method, urlStr, strings.NewReader(s)) + if err != nil { + panic(err) + } + r.Header.Set("Accept-Encoding", "gzip") + if payload != nil { + r.Header.Set("Content-Type", "application/json") + } + + return r +} + +// CodeIs compares the rescorded status code +func CodeIs(t *testing.T, r *httptest.ResponseRecorder, expectedCode int) { + if r.Code != expectedCode { + t.Errorf("Code %d expected, got: %d", expectedCode, r.Code) + } +} + +// HeaderIs tests the first value for the given headerKey +func HeaderIs(t *testing.T, r *httptest.ResponseRecorder, headerKey, expectedValue string) { + value := r.HeaderMap.Get(headerKey) + if value != expectedValue { + t.Errorf( + "%s: %s expected, got: %s", + headerKey, + expectedValue, + value, + ) + } +} + +func ContentTypeIsJson(t *testing.T, r *httptest.ResponseRecorder) { + + mediaType, params, _ := mime.ParseMediaType(r.HeaderMap.Get("Content-Type")) + charset := params["charset"] + + if mediaType != "application/json" { + t.Errorf( + "Content-Type media type: application/json expected, got: %s", + mediaType, + ) + } + + if charset != "" && strings.ToUpper(charset) != "UTF-8" { + t.Errorf( + "Content-Type charset: must be empty or UTF-8, got: %s", + charset, + ) + } +} + +func ContentEncodingIsGzip(t *testing.T, r *httptest.ResponseRecorder) { + HeaderIs(t, r, "Content-Encoding", "gzip") +} + +func BodyIs(t *testing.T, r *httptest.ResponseRecorder, expectedBody string) { + body := r.Body.String() + if body != expectedBody { + t.Errorf("Body '%s' expected, got: '%s'", expectedBody, body) + } +} + +func DecodeJsonPayload(r *httptest.ResponseRecorder, v interface{}) error { + content, err := ioutil.ReadAll(r.Body) + if err != nil { + return err + } + err = json.Unmarshal(content, v) + if err != nil { + return err + } + return nil +} + +type Recorded struct { + T *testing.T + Recorder *httptest.ResponseRecorder +} + +// RunRequest runs a HTTP request through the given handler +func RunRequest(t *testing.T, handler http.Handler, request *http.Request) *Recorded { + recorder := httptest.NewRecorder() + handler.ServeHTTP(recorder, request) + return &Recorded{t, recorder} +} + +func (rd *Recorded) CodeIs(expectedCode int) { + CodeIs(rd.T, rd.Recorder, expectedCode) +} + +func (rd *Recorded) HeaderIs(headerKey, expectedValue string) { + HeaderIs(rd.T, rd.Recorder, headerKey, expectedValue) +} + +func (rd *Recorded) ContentTypeIsJson() { + ContentTypeIsJson(rd.T, rd.Recorder) +} + +func (rd *Recorded) ContentEncodingIsGzip() { + rd.HeaderIs("Content-Encoding", "gzip") +} + +func (rd *Recorded) BodyIs(expectedBody string) { + BodyIs(rd.T, rd.Recorder, expectedBody) +} + +func (rd *Recorded) DecodeJsonPayload(v interface{}) error { + return DecodeJsonPayload(rd.Recorder, v) +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/timer.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/timer.go new file mode 100644 index 00000000..b2616c80 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/timer.go @@ -0,0 +1,26 @@ +package rest + +import ( + "time" +) + +// TimerMiddleware computes the elapsed time spent during the execution of the wrapped handler. +// The result is available to the wrapping handlers as request.Env["ELAPSED_TIME"].(*time.Duration), +// and as request.Env["START_TIME"].(*time.Time) +type TimerMiddleware struct{} + +// MiddlewareFunc makes TimerMiddleware implement the Middleware interface. +func (mw *TimerMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + return func(w ResponseWriter, r *Request) { + + start := time.Now() + r.Env["START_TIME"] = &start + + // call the handler + h(w, r) + + end := time.Now() + elapsed := end.Sub(start) + r.Env["ELAPSED_TIME"] = &elapsed + } +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/timer_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/timer_test.go new file mode 100644 index 00000000..b790fd5b --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/timer_test.go @@ -0,0 +1,58 @@ +package rest + +import ( + "github.com/ant0ine/go-json-rest/rest/test" + "testing" + "time" +) + +func TestTimerMiddleware(t *testing.T) { + + api := NewApi() + + // a middleware carrying the Env tests + api.Use(MiddlewareSimple(func(handler HandlerFunc) HandlerFunc { + return func(w ResponseWriter, r *Request) { + + handler(w, r) + + if r.Env["ELAPSED_TIME"] == nil { + t.Error("ELAPSED_TIME is nil") + } + elapsedTime := r.Env["ELAPSED_TIME"].(*time.Duration) + if elapsedTime.Nanoseconds() <= 0 { + t.Errorf( + "ELAPSED_TIME is expected to be at least 1 nanosecond %d", + elapsedTime.Nanoseconds(), + ) + } + + if r.Env["START_TIME"] == nil { + t.Error("START_TIME is nil") + } + start := r.Env["START_TIME"].(*time.Time) + if start.After(time.Now()) { + t.Errorf( + "START_TIME is expected to be in the past %s", + start.String(), + ) + } + } + })) + + // the middleware to test + api.Use(&TimerMiddleware{}) + + // a simple app + api.SetApp(AppSimple(func(w ResponseWriter, r *Request) { + w.WriteJson(map[string]string{"Id": "123"}) + })) + + // wrap all + handler := api.MakeHandler() + + req := test.MakeSimpleRequest("GET", "http://localhost/", nil) + recorded := test.RunRequest(t, handler, req) + recorded.CodeIs(200) + recorded.ContentTypeIsJson() +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/trie/impl.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/trie/impl.go new file mode 100644 index 00000000..2a9fff15 --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/trie/impl.go @@ -0,0 +1,426 @@ +// Special Trie implementation for HTTP routing. +// +// This Trie implementation is designed to support strings that includes +// :param and *splat parameters. Strings that are commonly used to represent +// the Path in HTTP routing. This implementation also maintain for each Path +// a map of HTTP Methods associated with the Route. +// +// You probably don't need to use this package directly. +// +package trie + +import ( + "errors" + "fmt" +) + +func splitParam(remaining string) (string, string) { + i := 0 + for len(remaining) > i && remaining[i] != '/' && remaining[i] != '.' { + i++ + } + return remaining[:i], remaining[i:] +} + +func splitRelaxed(remaining string) (string, string) { + i := 0 + for len(remaining) > i && remaining[i] != '/' { + i++ + } + return remaining[:i], remaining[i:] +} + +type node struct { + HttpMethodToRoute map[string]interface{} + + Children map[string]*node + ChildrenKeyLen int + + ParamChild *node + ParamName string + + RelaxedChild *node + RelaxedName string + + SplatChild *node + SplatName string +} + +func (n *node) addRoute(httpMethod, pathExp string, route interface{}, usedParams []string) error { + + if len(pathExp) == 0 { + // end of the path, leaf node, update the map + if n.HttpMethodToRoute == nil { + n.HttpMethodToRoute = map[string]interface{}{ + httpMethod: route, + } + return nil + } else { + if n.HttpMethodToRoute[httpMethod] != nil { + return errors.New("node.Route already set, duplicated path and method") + } + n.HttpMethodToRoute[httpMethod] = route + return nil + } + } + + token := pathExp[0:1] + remaining := pathExp[1:] + var nextNode *node + + if token[0] == ':' { + // :param case + var name string + name, remaining = splitParam(remaining) + + // Check param name is unique + for _, e := range usedParams { + if e == name { + return errors.New( + fmt.Sprintf("A route can't have two placeholders with the same name: %s", name), + ) + } + } + usedParams = append(usedParams, name) + + if n.ParamChild == nil { + n.ParamChild = &node{} + n.ParamName = name + } else { + if n.ParamName != name { + return errors.New( + fmt.Sprintf( + "Routes sharing a common placeholder MUST name it consistently: %s != %s", + n.ParamName, + name, + ), + ) + } + } + nextNode = n.ParamChild + } else if token[0] == '#' { + // #param case + var name string + name, remaining = splitRelaxed(remaining) + + // Check param name is unique + for _, e := range usedParams { + if e == name { + return errors.New( + fmt.Sprintf("A route can't have two placeholders with the same name: %s", name), + ) + } + } + usedParams = append(usedParams, name) + + if n.RelaxedChild == nil { + n.RelaxedChild = &node{} + n.RelaxedName = name + } else { + if n.RelaxedName != name { + return errors.New( + fmt.Sprintf( + "Routes sharing a common placeholder MUST name it consistently: %s != %s", + n.RelaxedName, + name, + ), + ) + } + } + nextNode = n.RelaxedChild + } else if token[0] == '*' { + // *splat case + name := remaining + remaining = "" + + // Check param name is unique + for _, e := range usedParams { + if e == name { + return errors.New( + fmt.Sprintf("A route can't have two placeholders with the same name: %s", name), + ) + } + } + + if n.SplatChild == nil { + n.SplatChild = &node{} + n.SplatName = name + } + nextNode = n.SplatChild + } else { + // general case + if n.Children == nil { + n.Children = map[string]*node{} + n.ChildrenKeyLen = 1 + } + if n.Children[token] == nil { + n.Children[token] = &node{} + } + nextNode = n.Children[token] + } + + return nextNode.addRoute(httpMethod, remaining, route, usedParams) +} + +func (n *node) compress() { + // *splat branch + if n.SplatChild != nil { + n.SplatChild.compress() + } + // :param branch + if n.ParamChild != nil { + n.ParamChild.compress() + } + // #param branch + if n.RelaxedChild != nil { + n.RelaxedChild.compress() + } + // main branch + if len(n.Children) == 0 { + return + } + // compressable ? + canCompress := true + for _, node := range n.Children { + if node.HttpMethodToRoute != nil || node.SplatChild != nil || node.ParamChild != nil || node.RelaxedChild != nil { + canCompress = false + } + } + // compress + if canCompress { + merged := map[string]*node{} + for key, node := range n.Children { + for gdKey, gdNode := range node.Children { + mergedKey := key + gdKey + merged[mergedKey] = gdNode + } + } + n.Children = merged + n.ChildrenKeyLen++ + n.compress() + // continue + } else { + for _, node := range n.Children { + node.compress() + } + } +} + +func printFPadding(padding int, format string, a ...interface{}) { + for i := 0; i < padding; i++ { + fmt.Print(" ") + } + fmt.Printf(format, a...) +} + +// Private function for now +func (n *node) printDebug(level int) { + level++ + // *splat branch + if n.SplatChild != nil { + printFPadding(level, "*splat\n") + n.SplatChild.printDebug(level) + } + // :param branch + if n.ParamChild != nil { + printFPadding(level, ":param\n") + n.ParamChild.printDebug(level) + } + // #param branch + if n.RelaxedChild != nil { + printFPadding(level, "#relaxed\n") + n.RelaxedChild.printDebug(level) + } + // main branch + for key, node := range n.Children { + printFPadding(level, "\"%s\"\n", key) + node.printDebug(level) + } +} + +// utility for the node.findRoutes recursive method + +type paramMatch struct { + name string + value string +} + +type findContext struct { + paramStack []paramMatch + matchFunc func(httpMethod, path string, node *node) +} + +func newFindContext() *findContext { + return &findContext{ + paramStack: []paramMatch{}, + } +} + +func (fc *findContext) pushParams(name, value string) { + fc.paramStack = append( + fc.paramStack, + paramMatch{name, value}, + ) +} + +func (fc *findContext) popParams() { + fc.paramStack = fc.paramStack[:len(fc.paramStack)-1] +} + +func (fc *findContext) paramsAsMap() map[string]string { + r := map[string]string{} + for _, param := range fc.paramStack { + if r[param.name] != "" { + // this is checked at addRoute time, and should never happen. + panic(fmt.Sprintf( + "placeholder %s already found, placeholder names should be unique per route", + param.name, + )) + } + r[param.name] = param.value + } + return r +} + +type Match struct { + // Same Route as in AddRoute + Route interface{} + // map of params matched for this result + Params map[string]string +} + +func (n *node) find(httpMethod, path string, context *findContext) { + + if n.HttpMethodToRoute != nil && path == "" { + context.matchFunc(httpMethod, path, n) + } + + if len(path) == 0 { + return + } + + // *splat branch + if n.SplatChild != nil { + context.pushParams(n.SplatName, path) + n.SplatChild.find(httpMethod, "", context) + context.popParams() + } + + // :param branch + if n.ParamChild != nil { + value, remaining := splitParam(path) + context.pushParams(n.ParamName, value) + n.ParamChild.find(httpMethod, remaining, context) + context.popParams() + } + + // #param branch + if n.RelaxedChild != nil { + value, remaining := splitRelaxed(path) + context.pushParams(n.RelaxedName, value) + n.RelaxedChild.find(httpMethod, remaining, context) + context.popParams() + } + + // main branch + length := n.ChildrenKeyLen + if len(path) < length { + return + } + token := path[0:length] + remaining := path[length:] + if n.Children[token] != nil { + n.Children[token].find(httpMethod, remaining, context) + } +} + +type Trie struct { + root *node +} + +// Instanciate a Trie with an empty node as the root. +func New() *Trie { + return &Trie{ + root: &node{}, + } +} + +// Insert the route in the Trie following or creating the nodes corresponding to the path. +func (t *Trie) AddRoute(httpMethod, pathExp string, route interface{}) error { + return t.root.addRoute(httpMethod, pathExp, route, []string{}) +} + +// Reduce the size of the tree, must be done after the last AddRoute. +func (t *Trie) Compress() { + t.root.compress() +} + +// Private function for now. +func (t *Trie) printDebug() { + fmt.Print("\n") + t.root.printDebug(0) + fmt.Print("\n") +} + +// Given a path and an http method, return all the matching routes. +func (t *Trie) FindRoutes(httpMethod, path string) []*Match { + context := newFindContext() + matches := []*Match{} + context.matchFunc = func(httpMethod, path string, node *node) { + if node.HttpMethodToRoute[httpMethod] != nil { + // path and method match, found a route ! + matches = append( + matches, + &Match{ + Route: node.HttpMethodToRoute[httpMethod], + Params: context.paramsAsMap(), + }, + ) + } + } + t.root.find(httpMethod, path, context) + return matches +} + +// Same as FindRoutes, but return in addition a boolean indicating if the path was matched. +// Useful to return 405 +func (t *Trie) FindRoutesAndPathMatched(httpMethod, path string) ([]*Match, bool) { + context := newFindContext() + pathMatched := false + matches := []*Match{} + context.matchFunc = func(httpMethod, path string, node *node) { + pathMatched = true + if node.HttpMethodToRoute[httpMethod] != nil { + // path and method match, found a route ! + matches = append( + matches, + &Match{ + Route: node.HttpMethodToRoute[httpMethod], + Params: context.paramsAsMap(), + }, + ) + } + } + t.root.find(httpMethod, path, context) + return matches, pathMatched +} + +// Given a path, and whatever the http method, return all the matching routes. +func (t *Trie) FindRoutesForPath(path string) []*Match { + context := newFindContext() + matches := []*Match{} + context.matchFunc = func(httpMethod, path string, node *node) { + params := context.paramsAsMap() + for _, route := range node.HttpMethodToRoute { + matches = append( + matches, + &Match{ + Route: route, + Params: params, + }, + ) + } + } + t.root.find("", path, context) + return matches +} diff --git a/src/vendor/github.com/ant0ine/go-json-rest/rest/trie/impl_test.go b/src/vendor/github.com/ant0ine/go-json-rest/rest/trie/impl_test.go new file mode 100644 index 00000000..d8fc8b5f --- /dev/null +++ b/src/vendor/github.com/ant0ine/go-json-rest/rest/trie/impl_test.go @@ -0,0 +1,276 @@ +package trie + +import ( + "testing" +) + +func TestPathInsert(t *testing.T) { + + trie := New() + if trie.root == nil { + t.Error("Expected to not be nil") + } + + trie.AddRoute("GET", "/", "1") + if trie.root.Children["/"] == nil { + t.Error("Expected to not be nil") + } + + trie.AddRoute("GET", "/r", "2") + if trie.root.Children["/"].Children["r"] == nil { + t.Error("Expected to not be nil") + } + + trie.AddRoute("GET", "/r/", "3") + if trie.root.Children["/"].Children["r"].Children["/"] == nil { + t.Error("Expected to not be nil") + } +} + +func TestTrieCompression(t *testing.T) { + + trie := New() + trie.AddRoute("GET", "/abc", "3") + trie.AddRoute("GET", "/adc", "3") + + // before compression + if trie.root.Children["/"].Children["a"].Children["b"].Children["c"] == nil { + t.Error("Expected to not be nil") + } + if trie.root.Children["/"].Children["a"].Children["d"].Children["c"] == nil { + t.Error("Expected to not be nil") + } + + trie.Compress() + + // after compression + if trie.root.Children["/abc"] == nil { + t.Errorf("%+v", trie.root) + } + if trie.root.Children["/adc"] == nil { + t.Errorf("%+v", trie.root) + } +} + +func TestParamInsert(t *testing.T) { + trie := New() + + trie.AddRoute("GET", "/:id/", "") + if trie.root.Children["/"].ParamChild.Children["/"] == nil { + t.Error("Expected to not be nil") + } + if trie.root.Children["/"].ParamName != "id" { + t.Error("Expected ParamName to be id") + } + + trie.AddRoute("GET", "/:id/:property.:format", "") + if trie.root.Children["/"].ParamChild.Children["/"].ParamChild.Children["."].ParamChild == nil { + t.Error("Expected to not be nil") + } + if trie.root.Children["/"].ParamName != "id" { + t.Error("Expected ParamName to be id") + } + if trie.root.Children["/"].ParamChild.Children["/"].ParamName != "property" { + t.Error("Expected ParamName to be property") + } + if trie.root.Children["/"].ParamChild.Children["/"].ParamChild.Children["."].ParamName != "format" { + t.Error("Expected ParamName to be format") + } +} + +func TestRelaxedInsert(t *testing.T) { + trie := New() + + trie.AddRoute("GET", "/#id/", "") + if trie.root.Children["/"].RelaxedChild.Children["/"] == nil { + t.Error("Expected to not be nil") + } + if trie.root.Children["/"].RelaxedName != "id" { + t.Error("Expected RelaxedName to be id") + } +} + +func TestSplatInsert(t *testing.T) { + trie := New() + trie.AddRoute("GET", "/*splat", "") + if trie.root.Children["/"].SplatChild == nil { + t.Error("Expected to not be nil") + } +} + +func TestDupeInsert(t *testing.T) { + trie := New() + trie.AddRoute("GET", "/", "1") + err := trie.AddRoute("GET", "/", "2") + if err == nil { + t.Error("Expected to not be nil") + } + if trie.root.Children["/"].HttpMethodToRoute["GET"] != "1" { + t.Error("Expected to be 1") + } +} + +func isInMatches(test string, matches []*Match) bool { + for _, match := range matches { + if match.Route.(string) == test { + return true + } + } + return false +} + +func TestFindRoute(t *testing.T) { + + trie := New() + + trie.AddRoute("GET", "/", "root") + trie.AddRoute("GET", "/r/:id", "resource") + trie.AddRoute("GET", "/r/:id/property", "property") + trie.AddRoute("GET", "/r/:id/property.*format", "property_format") + trie.AddRoute("GET", "/user/#username/property", "user_property") + + trie.Compress() + + matches := trie.FindRoutes("GET", "/") + if len(matches) != 1 { + t.Errorf("expected one route, got %d", len(matches)) + } + if !isInMatches("root", matches) { + t.Error("expected 'root'") + } + + matches = trie.FindRoutes("GET", "/notfound") + if len(matches) != 0 { + t.Errorf("expected zero route, got %d", len(matches)) + } + + matches = trie.FindRoutes("GET", "/r/1") + if len(matches) != 1 { + t.Errorf("expected one route, got %d", len(matches)) + } + if !isInMatches("resource", matches) { + t.Errorf("expected 'resource', got %+v", matches) + } + if matches[0].Params["id"] != "1" { + t.Error("Expected Params id to be 1") + } + + matches = trie.FindRoutes("GET", "/r/1/property") + if len(matches) != 1 { + t.Errorf("expected one route, got %d", len(matches)) + } + if !isInMatches("property", matches) { + t.Error("expected 'property'") + } + if matches[0].Params["id"] != "1" { + t.Error("Expected Params id to be 1") + } + + matches = trie.FindRoutes("GET", "/r/1/property.json") + if len(matches) != 1 { + t.Errorf("expected one route, got %d", len(matches)) + } + if !isInMatches("property_format", matches) { + t.Error("expected 'property_format'") + } + if matches[0].Params["id"] != "1" { + t.Error("Expected Params id to be 1") + } + if matches[0].Params["format"] != "json" { + t.Error("Expected Params format to be json") + } + + matches = trie.FindRoutes("GET", "/user/antoine.imbert/property") + if len(matches) != 1 { + t.Errorf("expected one route, got %d", len(matches)) + } + if !isInMatches("user_property", matches) { + t.Error("expected 'user_property'") + } + if matches[0].Params["username"] != "antoine.imbert" { + t.Error("Expected Params username to be antoine.imbert") + } +} + +func TestFindRouteMultipleMatches(t *testing.T) { + + trie := New() + + trie.AddRoute("GET", "/r/1", "resource1") + trie.AddRoute("GET", "/r/2", "resource2") + trie.AddRoute("GET", "/r/:id", "resource_generic") + trie.AddRoute("GET", "/s/*rest", "special_all") + trie.AddRoute("GET", "/s/:param", "special_generic") + trie.AddRoute("GET", "/s/#param", "special_relaxed") + trie.AddRoute("GET", "/", "root") + + trie.Compress() + + matches := trie.FindRoutes("GET", "/r/1") + if len(matches) != 2 { + t.Errorf("expected two matches, got %d", len(matches)) + } + if !isInMatches("resource_generic", matches) { + t.Error("Expected resource_generic to match") + } + if !isInMatches("resource1", matches) { + t.Error("Expected resource1 to match") + } + + matches = trie.FindRoutes("GET", "/s/1") + if len(matches) != 3 { + t.Errorf("expected two matches, got %d", len(matches)) + } + if !isInMatches("special_all", matches) { + t.Error("Expected special_all to match") + } + if !isInMatches("special_generic", matches) { + t.Error("Expected special_generic to match") + } + if !isInMatches("special_relaxed", matches) { + t.Error("Expected special_relaxed to match") + } +} + +func TestConsistentPlaceholderName(t *testing.T) { + + trie := New() + + trie.AddRoute("GET", "/r/:id", "oneph") + err := trie.AddRoute("GET", "/r/:rid/other", "twoph") + if err == nil { + t.Error("Should have died on inconsistent placeholder name") + } + + trie.AddRoute("GET", "/r/#id", "oneph") + err = trie.AddRoute("GET", "/r/#rid/other", "twoph") + if err == nil { + t.Error("Should have died on inconsistent placeholder name") + } + + trie.AddRoute("GET", "/r/*id", "oneph") + err = trie.AddRoute("GET", "/r/*rid", "twoph") + if err == nil { + t.Error("Should have died on duplicated route") + } +} + +func TestDuplicateName(t *testing.T) { + + trie := New() + + err := trie.AddRoute("GET", "/r/:id/o/:id", "two") + if err == nil { + t.Error("Should have died, this route has two placeholder named `id`") + } + + err = trie.AddRoute("GET", "/r/:id/o/*id", "two") + if err == nil { + t.Error("Should have died, this route has two placeholder named `id`") + } + + err = trie.AddRoute("GET", "/r/:id/o/#id", "two") + if err == nil { + t.Error("Should have died, this route has two placeholder named `id`") + } +} diff --git a/src/vendor/github.com/beefsack/go-rate/.gitignore b/src/vendor/github.com/beefsack/go-rate/.gitignore new file mode 100644 index 00000000..1377554e --- /dev/null +++ b/src/vendor/github.com/beefsack/go-rate/.gitignore @@ -0,0 +1 @@ +*.swp diff --git a/src/vendor/github.com/beefsack/go-rate/.travis.yml b/src/vendor/github.com/beefsack/go-rate/.travis.yml new file mode 100644 index 00000000..825dc3f2 --- /dev/null +++ b/src/vendor/github.com/beefsack/go-rate/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - 1.3 + - tip diff --git a/src/vendor/github.com/beefsack/go-rate/LICENSE b/src/vendor/github.com/beefsack/go-rate/LICENSE new file mode 100644 index 00000000..13917f00 --- /dev/null +++ b/src/vendor/github.com/beefsack/go-rate/LICENSE @@ -0,0 +1,675 @@ + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + {one line to give the program's name and a brief idea of what it does.} + Copyright (C) {year} {name of author} + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + {project} Copyright (C) {year} {fullname} + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/src/vendor/github.com/beefsack/go-rate/README.md b/src/vendor/github.com/beefsack/go-rate/README.md new file mode 100644 index 00000000..38862052 --- /dev/null +++ b/src/vendor/github.com/beefsack/go-rate/README.md @@ -0,0 +1,151 @@ +go-rate +=============== + +[![Build Status](https://travis-ci.org/beefsack/go-rate.svg?branch=master)](https://travis-ci.org/beefsack/go-rate) +[![GoDoc](https://godoc.org/github.com/beefsack/go-rate?status.svg)](https://godoc.org/github.com/beefsack/go-rate) + +**go-rate** is a rate limiter designed for a range of use cases, +including server side spam protection and preventing saturation of APIs you +consume. + +It is used in production at +[LangTrend](http://langtrend.com/l/Java,PHP,JavaScript) to adhere to the GitHub +API rate limits. + +Usage +----- + +Import `github.com/beefsack/go-rate` and create a new rate limiter with +the `rate.New(limit int, interval time.Duration)` function. + +The rate limiter provides a `Wait()` and a `Try() (bool, time.Duration)` method +for both blocking and non-blocking functionality respectively. + +API documentation available at [godoc.org](http://godoc.org/github.com/beefsack/go-rate). + +Examples +-------- + +### Blocking rate limiting + +This example demonstrates limiting the output rate to 3 times per second. + +```Go +package main + +import ( + "fmt" + "time" + + "github.com/beefsack/go-rate" +) + +func main() { + rl := rate.New(3, time.Second) // 3 times per second + begin := time.Now() + for i := 1; i <= 10; i++ { + rl.Wait() + fmt.Printf("%d started at %s\n", i, time.Now().Sub(begin)) + } + // Output: + // 1 started at 12.584us + // 2 started at 40.13us + // 3 started at 44.92us + // 4 started at 1.000125362s + // 5 started at 1.000143066s + // 6 started at 1.000144707s + // 7 started at 2.000224641s + // 8 started at 2.000240751s + // 9 started at 2.00024244s + // 10 started at 3.000314332s +} +``` + +### Blocking rate limiting with multiple limiters + +This example demonstrates combining rate limiters, one limiting at once per +second, the other limiting at 2 times per 3 seconds. + +```Go +package main + +import ( + "fmt" + "time" + + "github.com/beefsack/go-rate" +) + +func main() { + begin := time.Now() + rl1 := rate.New(1, time.Second) // Once per second + rl2 := rate.New(2, time.Second*3) // 2 times per 3 seconds + for i := 1; i <= 10; i++ { + rl1.Wait() + rl2.Wait() + fmt.Printf("%d started at %s\n", i, time.Now().Sub(begin)) + } + // Output: + // 1 started at 11.197us + // 2 started at 1.00011941s + // 3 started at 3.000105858s + // 4 started at 4.000210639s + // 5 started at 6.000189578s + // 6 started at 7.000289992s + // 7 started at 9.000289942s + // 8 started at 10.00038286s + // 9 started at 12.000386821s + // 10 started at 13.000465465s +} +``` + +### Non-blocking rate limiting + +This example demonstrates non-blocking rate limiting, such as would be used to +limit spam in a chat client. + +```Go +package main + +import ( + "fmt" + "time" + + "github.com/beefsack/go-rate" +) + +var rl = rate.New(3, time.Second) // 3 times per second + +func say(message string) { + if ok, remaining := rl.Try(); ok { + fmt.Printf("You said: %s\n", message) + } else { + fmt.Printf("Spam filter triggered, please wait %s\n", remaining) + } +} + +func main() { + for i := 1; i <= 5; i++ { + say(fmt.Sprintf("Message %d", i)) + } + time.Sleep(time.Second / 2) + say("I waited half a second, is that enough?") + time.Sleep(time.Second / 2) + say("Okay, I waited a second.") + // Output: + // You said: Message 1 + // You said: Message 2 + // You said: Message 3 + // Spam filter triggered, please wait 999.980816ms + // Spam filter triggered, please wait 999.976704ms + // Spam filter triggered, please wait 499.844795ms + // You said: Okay, I waited a second. +} +``` + +Authors +------- + +* [Michael Alexander](https://github.com/beefsack) +* [Geert-Johan Riemer](https://github.com/GeertJohan) +* [Matt T. Proud](https://github.com/matttproud) diff --git a/src/vendor/github.com/beefsack/go-rate/rate.go b/src/vendor/github.com/beefsack/go-rate/rate.go new file mode 100644 index 00000000..bd6b10d2 --- /dev/null +++ b/src/vendor/github.com/beefsack/go-rate/rate.go @@ -0,0 +1,62 @@ +package rate + +import ( + "container/list" + "sync" + "time" +) + +// A RateLimiter limits the rate at which an action can be performed. It +// applies neither smoothing (like one could achieve in a token bucket system) +// nor does it offer any conception of warmup, wherein the rate of actions +// granted are steadily increased until a steady throughput equilibrium is +// reached. +type RateLimiter struct { + limit int + interval time.Duration + mtx sync.Mutex + times list.List +} + +// New creates a new rate limiter for the limit and interval. +func New(limit int, interval time.Duration) *RateLimiter { + lim := &RateLimiter{ + limit: limit, + interval: interval, + } + lim.times.Init() + return lim +} + +// Wait blocks if the rate limit has been reached. Wait offers no guarantees +// of fairness for multiple actors if the allowed rate has been temporarily +// exhausted. +func (r *RateLimiter) Wait() { + for { + ok, remaining := r.Try() + if ok { + break + } + time.Sleep(remaining) + } +} + +// Try returns true if under the rate limit, or false if over and the +// remaining time before the rate limit expires. +func (r *RateLimiter) Try() (ok bool, remaining time.Duration) { + r.mtx.Lock() + defer r.mtx.Unlock() + now := time.Now() + if l := r.times.Len(); l < r.limit { + r.times.PushBack(now) + return true, 0 + } + if frnt := r.times.Front(); frnt != nil { + if diff := now.Sub(frnt.Value.(time.Time)); diff < r.interval { + return false, r.interval - diff + } + frnt.Value = now + r.times.MoveToBack(frnt) + } + return true, 0 +} diff --git a/src/vendor/github.com/beefsack/go-rate/rate_test.go b/src/vendor/github.com/beefsack/go-rate/rate_test.go new file mode 100644 index 00000000..b6a3e382 --- /dev/null +++ b/src/vendor/github.com/beefsack/go-rate/rate_test.go @@ -0,0 +1,46 @@ +package rate + +import ( + "testing" + "time" +) + +func TestRateLimiter_Wait_noblock(t *testing.T) { + start := time.Now() + limit := 5 + interval := time.Second * 3 + limiter := New(limit, interval) + for i := 0; i < limit; i++ { + limiter.Wait() + } + if time.Now().Sub(start) >= interval { + t.Error("The limiter blocked when it shouldn't have") + } +} + +func TestRateLimiter_Wait_block(t *testing.T) { + start := time.Now() + limit := 5 + interval := time.Second * 3 + limiter := New(limit, interval) + for i := 0; i < limit+1; i++ { + limiter.Wait() + } + if time.Now().Sub(start) < interval { + t.Error("The limiter didn't block when it should have") + } +} + +func TestRateLimiter_Try(t *testing.T) { + limit := 5 + interval := time.Second * 3 + limiter := New(limit, interval) + for i := 0; i < limit; i++ { + if ok, _ := limiter.Try(); !ok { + t.Fatalf("Should have allowed try on attempt %d", i) + } + } + if ok, _ := limiter.Try(); ok { + t.Fatal("Should have not allowed try on final attempt") + } +} diff --git a/src/vendor/github.com/fortytw2/leaktest/.travis.yml b/src/vendor/github.com/fortytw2/leaktest/.travis.yml new file mode 100644 index 00000000..dffa53f3 --- /dev/null +++ b/src/vendor/github.com/fortytw2/leaktest/.travis.yml @@ -0,0 +1,15 @@ +language: go +go: + - 1.5.3 + - 1.6.3 + - 1.7 + - tip + +script: + - go test -v -race -parallel 5 -coverprofile=coverage.txt -covermode=atomic ./ + - go test github.com/fortytw2/leaktest -run ^TestEmptyLeak$ + +before_install: + - pip install --user codecov +after_success: + - codecov diff --git a/src/vendor/github.com/fortytw2/leaktest/LICENSE b/src/vendor/github.com/fortytw2/leaktest/LICENSE new file mode 100644 index 00000000..74487567 --- /dev/null +++ b/src/vendor/github.com/fortytw2/leaktest/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/vendor/github.com/fortytw2/leaktest/README.md b/src/vendor/github.com/fortytw2/leaktest/README.md new file mode 100644 index 00000000..778bf46b --- /dev/null +++ b/src/vendor/github.com/fortytw2/leaktest/README.md @@ -0,0 +1,36 @@ +Leaktest [![Build Status](https://travis-ci.org/fortytw2/leaktest.svg?branch=master)](https://travis-ci.org/fortytw2/leaktest) +------ + +Refactored, tested variant of the goroutine leak detector found in both `net/http` tests and the `cockroachdb` +source tree. + +Takes a snapshot of running goroutines at the start of a test, and at the end - +compares the two and *voila*. Ignores runtime/sys goroutines. Doesn't play nice +with `t.Parallel()` right now, but there are plans to do so. + +### Installation + +``` +go get -u github.com/fortytw2/leaktest +``` + +### Example + +This test fails, because it leaks a goroutine :o + +```go +func TestPool(t *testing.T) { + defer leaktest.Check(t)() + + go func() { + for { + time.Sleep(time.Second) + } + }() +} +``` + + +LICENSE +------ +Same BSD-style as Go, see LICENSE diff --git a/src/vendor/github.com/fortytw2/leaktest/leaktest.go b/src/vendor/github.com/fortytw2/leaktest/leaktest.go new file mode 100644 index 00000000..492c0b06 --- /dev/null +++ b/src/vendor/github.com/fortytw2/leaktest/leaktest.go @@ -0,0 +1,92 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package leaktest provides tools to detect leaked goroutines in tests. +// To use it, call "defer util.Check(t)()" at the beginning of each +// test that may use goroutines. +// copied out of the cockroachdb source tree with slight modifications to be +// more re-useable +package leaktest + +import ( + "runtime" + "sort" + "strings" + "time" +) + +// interestingGoroutines returns all goroutines we care about for the purpose +// of leak checking. It excludes testing or runtime ones. +func interestingGoroutines() (gs []string) { + buf := make([]byte, 2<<20) + buf = buf[:runtime.Stack(buf, true)] + for _, g := range strings.Split(string(buf), "\n\n") { + sl := strings.SplitN(g, "\n", 2) + if len(sl) != 2 { + continue + } + stack := strings.TrimSpace(sl[1]) + if strings.HasPrefix(stack, "testing.RunTests") { + continue + } + + if stack == "" || + // Below are the stacks ignored by the upstream leaktest code. + strings.Contains(stack, "testing.Main(") || + strings.Contains(stack, "testing.(*T).Run(") || + strings.Contains(stack, "runtime.goexit") || + strings.Contains(stack, "created by runtime.gc") || + strings.Contains(stack, "interestingGoroutines") || + strings.Contains(stack, "runtime.MHeap_Scavenger") || + strings.Contains(stack, "signal.signal_recv") || + strings.Contains(stack, "sigterm.handler") || + strings.Contains(stack, "runtime_mcall") || + strings.Contains(stack, "goroutine in C code") { + continue + } + gs = append(gs, strings.TrimSpace(g)) + } + sort.Strings(gs) + return +} + +// ErrorReporter is a tiny subset of a testing.TB to make testing not such a +// massive pain +type ErrorReporter interface { + Errorf(format string, args ...interface{}) +} + +// Check snapshots the currently-running goroutines and returns a +// function to be run at the end of tests to see whether any +// goroutines leaked. +func Check(t ErrorReporter) func() { + orig := map[string]bool{} + for _, g := range interestingGoroutines() { + orig[g] = true + } + return func() { + // Loop, waiting for goroutines to shut down. + // Wait up to 5 seconds, but finish as quickly as possible. + deadline := time.Now().Add(5 * time.Second) + for { + var leaked []string + for _, g := range interestingGoroutines() { + if !orig[g] { + leaked = append(leaked, g) + } + } + if len(leaked) == 0 { + return + } + if time.Now().Before(deadline) { + time.Sleep(50 * time.Millisecond) + continue + } + for _, g := range leaked { + t.Errorf("Leaked goroutine: %v", g) + } + return + } + } +} diff --git a/src/vendor/github.com/fortytw2/leaktest/leaktest_test.go b/src/vendor/github.com/fortytw2/leaktest/leaktest_test.go new file mode 100644 index 00000000..1ef26f30 --- /dev/null +++ b/src/vendor/github.com/fortytw2/leaktest/leaktest_test.go @@ -0,0 +1,83 @@ +package leaktest + +import ( + "fmt" + "sync" + "testing" + "time" +) + +type testReporter struct { + failed bool + msg string +} + +func (tr *testReporter) Errorf(format string, args ...interface{}) { + tr.failed = true + tr.msg = fmt.Sprintf(format, args) +} + +var leakyFuncs = []func(){ + // Infinite for loop + func() { + for { + time.Sleep(time.Second) + } + }, + // Select on a channel not referenced by other goroutines. + func() { + c := make(chan struct{}, 0) + select { + case <-c: + } + }, + // Blocked select on channels not referenced by other goroutines. + func() { + c := make(chan struct{}, 0) + c2 := make(chan struct{}, 0) + select { + case <-c: + case c2 <- struct{}{}: + } + }, + // Blocking wait on sync.Mutex that isn't referenced by other goroutines. + func() { + var mu sync.Mutex + mu.Lock() + mu.Lock() + }, + // Blocking wait on sync.RWMutex that isn't referenced by other goroutines. + func() { + var mu sync.RWMutex + mu.RLock() + mu.Lock() + }, + func() { + var mu sync.Mutex + mu.Lock() + c := sync.NewCond(&mu) + c.Wait() + }, +} + +func TestCheck(t *testing.T) { + + // this works because the running goroutine is left running at the + // start of the next test case - so the previous leaks don't affect the + // check for the next one + for i, fn := range leakyFuncs { + checker := &testReporter{} + snapshot := Check(checker) + go fn() + + snapshot() + if !checker.failed { + t.Errorf("didn't catch sleeping goroutine, test #%d", i) + } + } +} + +func TestEmptyLeak(t *testing.T) { + defer Check(t)() + time.Sleep(time.Second) +} diff --git a/src/vendor/github.com/mailru/easyjson/.gitignore b/src/vendor/github.com/mailru/easyjson/.gitignore new file mode 100644 index 00000000..db8c66ed --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/.gitignore @@ -0,0 +1,4 @@ +.root +*_easyjson.go +*.iml +.idea diff --git a/src/vendor/github.com/mailru/easyjson/.travis.yml b/src/vendor/github.com/mailru/easyjson/.travis.yml new file mode 100644 index 00000000..3e5ac132 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - tip +install: + - go get github.com/ugorji/go/codec + - go get github.com/pquerna/ffjson/fflib/v1 + - go get github.com/golang/lint/golint diff --git a/src/vendor/github.com/mailru/easyjson/LICENSE b/src/vendor/github.com/mailru/easyjson/LICENSE new file mode 100644 index 00000000..fbff658f --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/github.com/mailru/easyjson/Makefile b/src/vendor/github.com/mailru/easyjson/Makefile new file mode 100644 index 00000000..8e720a08 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/Makefile @@ -0,0 +1,54 @@ +PKG=github.com/mailru/easyjson +GOPATH:=$(PWD)/.root:$(GOPATH) +export GOPATH + +all: test + +.root/src/$(PKG): + mkdir -p $@ + for i in $$PWD/* ; do ln -s $$i $@/`basename $$i` ; done + +root: .root/src/$(PKG) + +clean: + rm -rf .root + +build: + go build -i -o .root/bin/easyjson $(PKG)/easyjson + +generate: root build + .root/bin/easyjson -stubs \ + .root/src/$(PKG)/tests/snake.go \ + .root/src/$(PKG)/tests/data.go \ + .root/src/$(PKG)/tests/omitempty.go \ + .root/src/$(PKG)/tests/nothing.go \ + .root/src/$(PKG)/tests/named_type.go + + .root/bin/easyjson -all .root/src/$(PKG)/tests/data.go + .root/bin/easyjson -all .root/src/$(PKG)/tests/nothing.go + .root/bin/easyjson -all .root/src/$(PKG)/tests/errors.go + .root/bin/easyjson -snake_case .root/src/$(PKG)/tests/snake.go + .root/bin/easyjson -omit_empty .root/src/$(PKG)/tests/omitempty.go + .root/bin/easyjson -build_tags=use_easyjson .root/src/$(PKG)/benchmark/data.go + .root/bin/easyjson .root/src/$(PKG)/tests/nested_easy.go + .root/bin/easyjson .root/src/$(PKG)/tests/named_type.go + +test: generate root + go test \ + $(PKG)/tests \ + $(PKG)/jlexer \ + $(PKG)/gen \ + $(PKG)/buffer + go test -benchmem -tags use_easyjson -bench . $(PKG)/benchmark + golint -set_exit_status .root/src/$(PKG)/tests/*_easyjson.go + +bench-other: generate root + @go test -benchmem -bench . $(PKG)/benchmark + @go test -benchmem -tags use_ffjson -bench . $(PKG)/benchmark + @go test -benchmem -tags use_codec -bench . $(PKG)/benchmark + +bench-python: + benchmark/ujson.sh + + +.PHONY: root clean generate test build diff --git a/src/vendor/github.com/mailru/easyjson/README.md b/src/vendor/github.com/mailru/easyjson/README.md new file mode 100644 index 00000000..9cb88455 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/README.md @@ -0,0 +1,329 @@ +# easyjson [![Build Status](https://travis-ci.org/mailru/easyjson.svg?branch=master)](https://travis-ci.org/mailru/easyjson) [![Go Report Card](https://goreportcard.com/badge/github.com/mailru/easyjson)](https://goreportcard.com/report/github.com/mailru/easyjson) + +Package easyjson provides a fast and easy way to marshal/unmarshal Go structs +to/from JSON without the use of reflection. In performance tests, easyjson +outperforms the standard `encoding/json` package by a factor of 4-5x, and other +JSON encoding packages by a factor of 2-3x. + +easyjson aims to keep generated Go code simple enough so that it can be easily +optimized or fixed. Another goal is to provide users with the ability to +customize the generated code by providing options not available with the +standard `encoding/json` package, such as generating "snake_case" names or +enabling `omitempty` behavior by default. + +## Usage +```sh +# install +go get -u github.com/mailru/easyjson/... + +# run +easyjson -all .go +``` + +The above will generate `_easyjson.go` containing the appropriate marshaler and +unmarshaler funcs for all structs contained in `.go`. + +Please note that easyjson requires a full Go build environment and the `GOPATH` +environment variable to be set. This is because easyjson code generation +invokes `go run` on a temporary file (an approach to code generation borrowed +from [ffjson](https://github.com/pquerna/ffjson)). + +## Options +```txt +Usage of easyjson: + -all + generate marshaler/unmarshalers for all structs in a file + -build_tags string + build tags to add to generated file + -leave_temps + do not delete temporary files + -no_std_marshalers + don't generate MarshalJSON/UnmarshalJSON funcs + -noformat + do not run 'gofmt -w' on output file + -omit_empty + omit empty fields by default + -output_filename string + specify the filename of the output + -pkg + process the whole package instead of just the given file + -snake_case + use snake_case names instead of CamelCase by default + -stubs + only generate stubs for marshaler/unmarshaler funcs +``` + +Using `-all` will generate marshalers/unmarshalers for all Go structs in the +file. If `-all` is not provided, then only those structs whose preceeding +comment starts with `easyjson:json` will have marshalers/unmarshalers +generated. For example: + +```go +//easyjson:json +struct A{} +``` + +Additional option notes: + +* `-snake_case` tells easyjson to generate snake\_case field names by default + (unless overridden by a field tag). The CamelCase to snake\_case conversion + algorithm should work in most cases (ie, HTTPVersion will be converted to + "http_version"). + +* `-build_tags` will add the specified build tags to generated Go sources. + +## Generated Marshaler/Unmarshaler Funcs + +For Go struct types, easyjson generates the funcs `MarshalEasyJSON` / +`UnmarshalEasyJSON` for marshaling/unmarshaling JSON. In turn, these satisify +the `easyjson.Marshaler` and `easyjson.Unmarshaler` interfaces and when used in +conjunction with `easyjson.Marshal` / `easyjson.Unmarshal` avoid unnecessary +reflection / type assertions during marshaling/unmarshaling to/from JSON for Go +structs. + +easyjson also generates `MarshalJSON` and `UnmarshalJSON` funcs for Go struct +types compatible with the standard `json.Marshaler` and `json.Unmarshaler` +interfaces. Please be aware that using the standard `json.Marshal` / +`json.Unmarshal` for marshaling/unmarshaling will incur a significant +performance penalty when compared to using `easyjson.Marshal` / +`easyjson.Unmarshal`. + +Additionally, easyjson exposes utility funcs that use the `MarshalEasyJSON` and +`UnmarshalEasyJSON` for marshaling/unmarshaling to and from standard readers +and writers. For example, easyjson provides `easyjson.MarshalToHTTPResponseWriter` +which marshals to the standard `http.ResponseWriter`. Please see the [GoDoc +listing](https://godoc.org/github.com/mailru/easyjson) for the full listing of +utility funcs that are available. + +## Controlling easyjson Marshaling and Unmarshaling Behavior + +Go types can provide their own `MarshalEasyJSON` and `UnmarshalEasyJSON` funcs +that satisify the `easyjson.Marshaler` / `easyjson.Unmarshaler` interfaces. +These will be used by `easyjson.Marshal` and `easyjson.Unmarshal` when defined +for a Go type. + +Go types can also satisify the `easyjson.Optional` interface, which allows the +type to define its own `omitempty` logic. + +## Type Wrappers + +easyjson provides additional type wrappers defined in the `easyjson/opt` +package. These wrap the standard Go primitives and in turn satisify the +easyjson interfaces. + +The `easyjson/opt` type wrappers are useful when needing to distinguish between +a missing value and/or when needing to specifying a default value. Type +wrappers allow easyjson to avoid additional pointers and heap allocations and +can significantly increase performance when used properly. + +## Memory Pooling + +easyjson uses a buffer pool that allocates data in increasing chunks from 128 +to 32768 bytes. Chunks of 512 bytes and larger will be reused with the help of +`sync.Pool`. The maximum size of a chunk is bounded to reduce redundant memory +allocation and to allow larger reusable buffers. + +easyjson's custom allocation buffer pool is defined in the `easyjson/buffer` +package, and the default behavior pool behavior can be modified (if necessary) +through a call to `buffer.Init()` prior to any marshaling or unmarshaling. +Please see the [GoDoc listing](https://godoc.org/github.com/mailru/easyjson/buffer) +for more information. + +## Issues, Notes, and Limitations + +* easyjson is still early in its development. As such, there are likely to be + bugs and missing features when compared to `encoding/json`. In the case of a + missing feature or bug, please create a GitHub issue. Pull requests are + welcome! + +* Unlike `encoding/json`, object keys are case-sensitive. Case-insensitive + matching is not currently provided due to the significant performance hit + when doing case-insensitive key matching. In the future, case-insensitive + object key matching may be provided via an option to the generator. + +* easyjson makes use of `unsafe`, which simplifies the code and + provides significant performance benefits by allowing no-copy + conversion from `[]byte` to `string`. That said, `unsafe` is used + only when unmarshaling and parsing JSON, and any `unsafe` operations + / memory allocations done will be safely deallocated by + easyjson. Set the build tag `easyjson_nounsafe` to compile it + without `unsafe`. + +* easyjson is compatible with Google App Engine. The `appengine` build + tag (set by App Engine's environment) will automatically disable the + use of `unsafe`, which is not allowed in App Engine's Standard + Environment. Note that the use with App Engine is still experimental. + +* Floats are formatted using the default precision from Go's `strconv` package. + As such, easyjson will not correctly handle high precision floats when + marshaling/unmarshaling JSON. Note, however, that there are very few/limited + uses where this behavior is not sufficient for general use. That said, a + different package may be needed if precise marshaling/unmarshaling of high + precision floats to/from JSON is required. + +* While unmarshaling, the JSON parser does the minimal amount of work needed to + skip over unmatching parens, and as such full validation is not done for the + entire JSON value being unmarshaled/parsed. + +* Currently there is no true streaming support for encoding/decoding as + typically for many uses/protocols the final, marshaled length of the JSON + needs to be known prior to sending the data. Currently this is not possible + with easyjson's architecture. + +## Benchmarks + +Most benchmarks were done using the example +[13kB example JSON](https://dev.twitter.com/rest/reference/get/search/tweets) +(9k after eliminating whitespace). This example is similar to real-world data, +is well-structured, and contains a healthy variety of different types, making +it ideal for JSON serialization benchmarks. + +Note: + +* For small request benchmarks, an 80 byte portion of the above example was + used. + +* For large request marshaling benchmarks, a struct containing 50 regular + samples was used, making a ~500kB output JSON. + +* Benchmarks are showing the results of easyjson's default behaviour, + which makes use of `unsafe`. + +Benchmarks are available in the repository and can be run by invoking `make`. + +### easyjson vs. encoding/json + +easyjson is roughly 5-6 times faster than the standard `encoding/json` for +unmarshaling, and 3-4 times faster for non-concurrent marshaling. Concurrent +marshaling is 6-7x faster if marshaling to a writer. + +### easyjson vs. ffjson + +easyjson uses the same approach for JSON marshaling as +[ffjson](https://github.com/pquerna/ffjson), but takes a significantly +different approach to lexing and parsing JSON during unmarshaling. This means +easyjson is roughly 2-3x faster for unmarshaling and 1.5-2x faster for +non-concurrent unmarshaling. + +As of this writing, `ffjson` seems to have issues when used concurrently: +specifically, large request pooling hurts `ffjson`'s performance and causes +scalability issues. These issues with `ffjson` can likely be fixed, but as of +writing remain outstanding/known issues with `ffjson`. + +easyjson and `ffjson` have similar performance for small requests, however +easyjson outperforms `ffjson` by roughly 2-5x times for large requests when +used with a writer. + +### easyjson vs. go/codec + +[go/codec](https://github.com/ugorji/go) provides +compile-time helpers for JSON generation. In this case, helpers do not work +like marshalers as they are encoding-independent. + +easyjson is generally 2x faster than `go/codec` for non-concurrent benchmarks +and about 3x faster for concurrent encoding (without marshaling to a writer). + +In an attempt to measure marshaling performance of `go/codec` (as opposed to +allocations/memcpy/writer interface invocations), a benchmark was done with +resetting length of a byte slice rather than resetting the whole slice to nil. +However, the optimization in this exact form may not be applicable in practice, +since the memory is not freed between marshaling operations. + +### easyjson vs 'ujson' python module + +[ujson](https://github.com/esnme/ultrajson) is using C code for parsing, so it +is interesting to see how plain golang compares to that. It is imporant to note +that the resulting object for python is slower to access, since the library +parses JSON object into dictionaries. + +easyjson is slightly faster for unmarshaling and 2-3x faster than `ujson` for +marshaling. + +### Benchmark Results + +`ffjson` results are from February 4th, 2016, using the latest `ffjson` and go1.6. +`go/codec` results are from March 4th, 2016, using the latest `go/codec` and go1.6. + +#### Unmarshaling + +| lib | json size | MB/s | allocs/op | B/op | +|:---------|:----------|-----:|----------:|------:| +| standard | regular | 22 | 218 | 10229 | +| standard | small | 9.7 | 14 | 720 | +| | | | | | +| easyjson | regular | 125 | 128 | 9794 | +| easyjson | small | 67 | 3 | 128 | +| | | | | | +| ffjson | regular | 66 | 141 | 9985 | +| ffjson | small | 17.6 | 10 | 488 | +| | | | | | +| codec | regular | 55 | 434 | 19299 | +| codec | small | 29 | 7 | 336 | +| | | | | | +| ujson | regular | 103 | N/A | N/A | + +#### Marshaling, one goroutine. + +| lib | json size | MB/s | allocs/op | B/op | +|:----------|:----------|-----:|----------:|------:| +| standard | regular | 75 | 9 | 23256 | +| standard | small | 32 | 3 | 328 | +| standard | large | 80 | 17 | 1.2M | +| | | | | | +| easyjson | regular | 213 | 9 | 10260 | +| easyjson* | regular | 263 | 8 | 742 | +| easyjson | small | 125 | 1 | 128 | +| easyjson | large | 212 | 33 | 490k | +| easyjson* | large | 262 | 25 | 2879 | +| | | | | | +| ffjson | regular | 122 | 153 | 21340 | +| ffjson** | regular | 146 | 152 | 4897 | +| ffjson | small | 36 | 5 | 384 | +| ffjson** | small | 64 | 4 | 128 | +| ffjson | large | 134 | 7317 | 818k | +| ffjson** | large | 125 | 7320 | 827k | +| | | | | | +| codec | regular | 80 | 17 | 33601 | +| codec*** | regular | 108 | 9 | 1153 | +| codec | small | 42 | 3 | 304 | +| codec*** | small | 56 | 1 | 48 | +| codec | large | 73 | 483 | 2.5M | +| codec*** | large | 103 | 451 | 66007 | +| | | | | | +| ujson | regular | 92 | N/A | N/A | + +\* marshaling to a writer, +\*\* using `ffjson.Pool()`, +\*\*\* reusing output slice instead of resetting it to nil + +#### Marshaling, concurrent. + +| lib | json size | MB/s | allocs/op | B/op | +|:----------|:----------|-----:|----------:|------:| +| standard | regular | 252 | 9 | 23257 | +| standard | small | 124 | 3 | 328 | +| standard | large | 289 | 17 | 1.2M | +| | | | | | +| easyjson | regular | 792 | 9 | 10597 | +| easyjson* | regular | 1748 | 8 | 779 | +| easyjson | small | 333 | 1 | 128 | +| easyjson | large | 718 | 36 | 548k | +| easyjson* | large | 2134 | 25 | 4957 | +| | | | | | +| ffjson | regular | 301 | 153 | 21629 | +| ffjson** | regular | 707 | 152 | 5148 | +| ffjson | small | 62 | 5 | 384 | +| ffjson** | small | 282 | 4 | 128 | +| ffjson | large | 438 | 7330 | 1.0M | +| ffjson** | large | 131 | 7319 | 820k | +| | | | | | +| codec | regular | 183 | 17 | 33603 | +| codec*** | regular | 671 | 9 | 1157 | +| codec | small | 147 | 3 | 304 | +| codec*** | small | 299 | 1 | 48 | +| codec | large | 190 | 483 | 2.5M | +| codec*** | large | 752 | 451 | 77574 | + +\* marshaling to a writer, +\*\* using `ffjson.Pool()`, +\*\*\* reusing output slice instead of resetting it to nil diff --git a/src/vendor/github.com/mailru/easyjson/benchmark/codec_test.go b/src/vendor/github.com/mailru/easyjson/benchmark/codec_test.go new file mode 100644 index 00000000..5c77072e --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/benchmark/codec_test.go @@ -0,0 +1,279 @@ +// +build use_codec + +package benchmark + +import ( + "testing" + + "github.com/ugorji/go/codec" +) + +func BenchmarkCodec_Unmarshal_M(b *testing.B) { + var h codec.Handle = new(codec.JsonHandle) + dec := codec.NewDecoderBytes(nil, h) + + b.SetBytes(int64(len(largeStructText))) + for i := 0; i < b.N; i++ { + var s LargeStruct + dec.ResetBytes(largeStructText) + if err := dec.Decode(&s); err != nil { + b.Error(err) + } + } +} + +func BenchmarkCodec_Unmarshal_S(b *testing.B) { + var h codec.Handle = new(codec.JsonHandle) + dec := codec.NewDecoderBytes(nil, h) + + b.SetBytes(int64(len(smallStructText))) + for i := 0; i < b.N; i++ { + var s LargeStruct + dec.ResetBytes(smallStructText) + if err := dec.Decode(&s); err != nil { + b.Error(err) + } + } +} + +func BenchmarkCodec_Marshal_S(b *testing.B) { + var h codec.Handle = new(codec.JsonHandle) + + var out []byte + enc := codec.NewEncoderBytes(&out, h) + + var l int64 + for i := 0; i < b.N; i++ { + enc.ResetBytes(&out) + if err := enc.Encode(&smallStructData); err != nil { + b.Error(err) + } + l = int64(len(out)) + out = nil + } + + b.SetBytes(l) +} + +func BenchmarkCodec_Marshal_M(b *testing.B) { + var h codec.Handle = new(codec.JsonHandle) + + var out []byte + enc := codec.NewEncoderBytes(&out, h) + + var l int64 + for i := 0; i < b.N; i++ { + enc.ResetBytes(&out) + if err := enc.Encode(&largeStructData); err != nil { + b.Error(err) + } + l = int64(len(out)) + out = nil + } + + b.SetBytes(l) +} + +func BenchmarkCodec_Marshal_L(b *testing.B) { + var h codec.Handle = new(codec.JsonHandle) + + var out []byte + enc := codec.NewEncoderBytes(&out, h) + + var l int64 + for i := 0; i < b.N; i++ { + enc.ResetBytes(&out) + if err := enc.Encode(&xlStructData); err != nil { + b.Error(err) + } + l = int64(len(out)) + out = nil + } + + b.SetBytes(l) +} + +func BenchmarkCodec_Marshal_S_Reuse(b *testing.B) { + var h codec.Handle = new(codec.JsonHandle) + + var out []byte + enc := codec.NewEncoderBytes(&out, h) + + var l int64 + for i := 0; i < b.N; i++ { + enc.ResetBytes(&out) + if err := enc.Encode(&smallStructData); err != nil { + b.Error(err) + } + l = int64(len(out)) + out = out[:0] + } + + b.SetBytes(l) +} + +func BenchmarkCodec_Marshal_M_Reuse(b *testing.B) { + var h codec.Handle = new(codec.JsonHandle) + + var out []byte + enc := codec.NewEncoderBytes(&out, h) + + var l int64 + for i := 0; i < b.N; i++ { + enc.ResetBytes(&out) + if err := enc.Encode(&largeStructData); err != nil { + b.Error(err) + } + l = int64(len(out)) + out = out[:0] + } + + b.SetBytes(l) +} + +func BenchmarkCodec_Marshal_L_Reuse(b *testing.B) { + var h codec.Handle = new(codec.JsonHandle) + + var out []byte + enc := codec.NewEncoderBytes(&out, h) + + var l int64 + for i := 0; i < b.N; i++ { + enc.ResetBytes(&out) + if err := enc.Encode(&xlStructData); err != nil { + b.Error(err) + } + l = int64(len(out)) + out = out[:0] + } + + b.SetBytes(l) +} + +func BenchmarkCodec_Marshal_S_Parallel(b *testing.B) { + var l int64 + + b.RunParallel(func(pb *testing.PB) { + var out []byte + + var h codec.Handle = new(codec.JsonHandle) + enc := codec.NewEncoderBytes(&out, h) + + for pb.Next() { + enc.ResetBytes(&out) + if err := enc.Encode(&smallStructData); err != nil { + b.Error(err) + } + l = int64(len(out)) + out = nil + } + }) + + b.SetBytes(l) +} + +func BenchmarkCodec_Marshal_M_Parallel(b *testing.B) { + var l int64 + + b.RunParallel(func(pb *testing.PB) { + var h codec.Handle = new(codec.JsonHandle) + + var out []byte + enc := codec.NewEncoderBytes(&out, h) + + for pb.Next() { + enc.ResetBytes(&out) + if err := enc.Encode(&largeStructData); err != nil { + b.Error(err) + } + l = int64(len(out)) + out = nil + } + }) + b.SetBytes(l) +} + +func BenchmarkCodec_Marshal_L_Parallel(b *testing.B) { + var l int64 + + b.RunParallel(func(pb *testing.PB) { + var h codec.Handle = new(codec.JsonHandle) + + var out []byte + enc := codec.NewEncoderBytes(&out, h) + + for pb.Next() { + enc.ResetBytes(&out) + if err := enc.Encode(&xlStructData); err != nil { + b.Error(err) + } + l = int64(len(out)) + out = nil + } + }) + b.SetBytes(l) +} + +func BenchmarkCodec_Marshal_S_Parallel_Reuse(b *testing.B) { + var l int64 + + b.RunParallel(func(pb *testing.PB) { + var out []byte + + var h codec.Handle = new(codec.JsonHandle) + enc := codec.NewEncoderBytes(&out, h) + + for pb.Next() { + enc.ResetBytes(&out) + if err := enc.Encode(&smallStructData); err != nil { + b.Error(err) + } + l = int64(len(out)) + out = out[:0] + } + }) + + b.SetBytes(l) +} + +func BenchmarkCodec_Marshal_M_Parallel_Reuse(b *testing.B) { + var l int64 + + b.RunParallel(func(pb *testing.PB) { + var h codec.Handle = new(codec.JsonHandle) + + var out []byte + enc := codec.NewEncoderBytes(&out, h) + + for pb.Next() { + enc.ResetBytes(&out) + if err := enc.Encode(&largeStructData); err != nil { + b.Error(err) + } + l = int64(len(out)) + out = out[:0] + } + }) + b.SetBytes(l) +} + +func BenchmarkCodec_Marshal_L_Parallel_Reuse(b *testing.B) { + var l int64 + + b.RunParallel(func(pb *testing.PB) { + var h codec.Handle = new(codec.JsonHandle) + + var out []byte + enc := codec.NewEncoderBytes(&out, h) + + for pb.Next() { + enc.ResetBytes(&out) + if err := enc.Encode(&xlStructData); err != nil { + b.Error(err) + } + l = int64(len(out)) + out = out[:0] + } + }) + b.SetBytes(l) +} diff --git a/src/vendor/github.com/mailru/easyjson/benchmark/data.go b/src/vendor/github.com/mailru/easyjson/benchmark/data.go new file mode 100644 index 00000000..d2c689cd --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/benchmark/data.go @@ -0,0 +1,148 @@ +// Package benchmark provides a simple benchmark for easyjson against default serialization and ffjson. +// The data example is taken from https://dev.twitter.com/rest/reference/get/search/tweets +package benchmark + +import ( + "io/ioutil" +) + +var largeStructText, _ = ioutil.ReadFile("example.json") +var xlStructData XLStruct + +func init() { + for i := 0; i < 50; i++ { + xlStructData.Data = append(xlStructData.Data, largeStructData) + } +} + +var smallStructText = []byte(`{"hashtags":[{"indices":[5, 10],"text":"some-text"}],"urls":[],"user_mentions":[]}`) +var smallStructData = Entities{ + Hashtags: []Hashtag{{Indices: []int{5, 10}, Text: "some-text"}}, + Urls: []*string{}, + UserMentions: []*string{}, +} + +type SearchMetadata struct { + CompletedIn float64 `json:"completed_in"` + Count int `json:"count"` + MaxID int `json:"max_id"` + MaxIDStr string `json:"max_id_str"` + NextResults string `json:"next_results"` + Query string `json:"query"` + RefreshURL string `json:"refresh_url"` + SinceID int `json:"since_id"` + SinceIDStr string `json:"since_id_str"` +} + +type Hashtag struct { + Indices []int `json:"indices"` + Text string `json:"text"` +} + +//easyjson:json +type Entities struct { + Hashtags []Hashtag `json:"hashtags"` + Urls []*string `json:"urls"` + UserMentions []*string `json:"user_mentions"` +} + +type UserEntityDescription struct { + Urls []*string `json:"urls"` +} + +type URL struct { + ExpandedURL *string `json:"expanded_url"` + Indices []int `json:"indices"` + URL string `json:"url"` +} + +type UserEntityURL struct { + Urls []URL `json:"urls"` +} + +type UserEntities struct { + Description UserEntityDescription `json:"description"` + URL UserEntityURL `json:"url"` +} + +type User struct { + ContributorsEnabled bool `json:"contributors_enabled"` + CreatedAt string `json:"created_at"` + DefaultProfile bool `json:"default_profile"` + DefaultProfileImage bool `json:"default_profile_image"` + Description string `json:"description"` + Entities UserEntities `json:"entities"` + FavouritesCount int `json:"favourites_count"` + FollowRequestSent *string `json:"follow_request_sent"` + FollowersCount int `json:"followers_count"` + Following *string `json:"following"` + FriendsCount int `json:"friends_count"` + GeoEnabled bool `json:"geo_enabled"` + ID int `json:"id"` + IDStr string `json:"id_str"` + IsTranslator bool `json:"is_translator"` + Lang string `json:"lang"` + ListedCount int `json:"listed_count"` + Location string `json:"location"` + Name string `json:"name"` + Notifications *string `json:"notifications"` + ProfileBackgroundColor string `json:"profile_background_color"` + ProfileBackgroundImageURL string `json:"profile_background_image_url"` + ProfileBackgroundImageURLHTTPS string `json:"profile_background_image_url_https"` + ProfileBackgroundTile bool `json:"profile_background_tile"` + ProfileImageURL string `json:"profile_image_url"` + ProfileImageURLHTTPS string `json:"profile_image_url_https"` + ProfileLinkColor string `json:"profile_link_color"` + ProfileSidebarBorderColor string `json:"profile_sidebar_border_color"` + ProfileSidebarFillColor string `json:"profile_sidebar_fill_color"` + ProfileTextColor string `json:"profile_text_color"` + ProfileUseBackgroundImage bool `json:"profile_use_background_image"` + Protected bool `json:"protected"` + ScreenName string `json:"screen_name"` + ShowAllInlineMedia bool `json:"show_all_inline_media"` + StatusesCount int `json:"statuses_count"` + TimeZone string `json:"time_zone"` + URL *string `json:"url"` + UtcOffset int `json:"utc_offset"` + Verified bool `json:"verified"` +} + +type StatusMetadata struct { + IsoLanguageCode string `json:"iso_language_code"` + ResultType string `json:"result_type"` +} + +type Status struct { + Contributors *string `json:"contributors"` + Coordinates *string `json:"coordinates"` + CreatedAt string `json:"created_at"` + Entities Entities `json:"entities"` + Favorited bool `json:"favorited"` + Geo *string `json:"geo"` + ID int64 `json:"id"` + IDStr string `json:"id_str"` + InReplyToScreenName *string `json:"in_reply_to_screen_name"` + InReplyToStatusID *string `json:"in_reply_to_status_id"` + InReplyToStatusIDStr *string `json:"in_reply_to_status_id_str"` + InReplyToUserID *string `json:"in_reply_to_user_id"` + InReplyToUserIDStr *string `json:"in_reply_to_user_id_str"` + Metadata StatusMetadata `json:"metadata"` + Place *string `json:"place"` + RetweetCount int `json:"retweet_count"` + Retweeted bool `json:"retweeted"` + Source string `json:"source"` + Text string `json:"text"` + Truncated bool `json:"truncated"` + User User `json:"user"` +} + +//easyjson:json +type LargeStruct struct { + SearchMetadata SearchMetadata `json:"search_metadata"` + Statuses []Status `json:"statuses"` +} + +//easyjson:json +type XLStruct struct { + Data []LargeStruct +} diff --git a/src/vendor/github.com/mailru/easyjson/benchmark/data_codec.go b/src/vendor/github.com/mailru/easyjson/benchmark/data_codec.go new file mode 100644 index 00000000..d2d83fac --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/benchmark/data_codec.go @@ -0,0 +1,6914 @@ +//+build use_codec +//+build !easyjson_nounsafe +//+build !appengine + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package benchmark + +import ( + "errors" + "fmt" + "reflect" + "runtime" + "unsafe" + + codec1978 "github.com/ugorji/go/codec" +) + +const ( + // ----- content types ---- + codecSelferC_UTF89225 = 1 + codecSelferC_RAW9225 = 0 + // ----- value types used ---- + codecSelferValueTypeArray9225 = 10 + codecSelferValueTypeMap9225 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey9225 = 2 + codecSelfer_containerMapValue9225 = 3 + codecSelfer_containerMapEnd9225 = 4 + codecSelfer_containerArrayElem9225 = 6 + codecSelfer_containerArrayEnd9225 = 7 +) + +var ( + codecSelferBitsize9225 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr9225 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelferUnsafeString9225 struct { + Data uintptr + Len int +} + +type codecSelfer9225 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 unsafe.Pointer + _ = v0 + } +} + +func (x *SearchMetadata) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [9]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(9) + } else { + yynn2 = 9 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeFloat64(float64(x.CompletedIn)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("completed_in")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeFloat64(float64(x.CompletedIn)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Count)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("count")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Count)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.MaxID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("max_id")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.MaxID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.MaxIDStr)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("max_id_str")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.MaxIDStr)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.NextResults)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("next_results")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.NextResults)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.Query)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("query")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.Query)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.RefreshURL)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("refresh_url")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.RefreshURL)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeInt(int64(x.SinceID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("since_id")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeInt(int64(x.SinceID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.SinceIDStr)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("since_id_str")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.SinceIDStr)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd9225) + } + } + } +} + +func (x *SearchMetadata) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap9225 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd9225) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray9225 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225) + } + } +} + +func (x *SearchMetadata) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey9225) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)} + yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr)) + z.DecSendContainerState(codecSelfer_containerMapValue9225) + switch yys3 { + case "completed_in": + if r.TryDecodeAsNil() { + x.CompletedIn = 0 + } else { + yyv4 := &x.CompletedIn + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*float64)(yyv4)) = float64(r.DecodeFloat(false)) + } + } + case "count": + if r.TryDecodeAsNil() { + x.Count = 0 + } else { + yyv6 := &x.Count + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int)(yyv6)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + case "max_id": + if r.TryDecodeAsNil() { + x.MaxID = 0 + } else { + yyv8 := &x.MaxID + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int)(yyv8)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + case "max_id_str": + if r.TryDecodeAsNil() { + x.MaxIDStr = "" + } else { + yyv10 := &x.MaxIDStr + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "next_results": + if r.TryDecodeAsNil() { + x.NextResults = "" + } else { + yyv12 := &x.NextResults + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "query": + if r.TryDecodeAsNil() { + x.Query = "" + } else { + yyv14 := &x.Query + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "refresh_url": + if r.TryDecodeAsNil() { + x.RefreshURL = "" + } else { + yyv16 := &x.RefreshURL + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + case "since_id": + if r.TryDecodeAsNil() { + x.SinceID = 0 + } else { + yyv18 := &x.SinceID + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*int)(yyv18)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + case "since_id_str": + if r.TryDecodeAsNil() { + x.SinceIDStr = "" + } else { + yyv20 := &x.SinceIDStr + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd9225) +} + +func (x *SearchMetadata) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj22 int + var yyb22 bool + var yyhl22 bool = l >= 0 + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.CompletedIn = 0 + } else { + yyv23 := &x.CompletedIn + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*float64)(yyv23)) = float64(r.DecodeFloat(false)) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Count = 0 + } else { + yyv25 := &x.Count + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int)(yyv25)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.MaxID = 0 + } else { + yyv27 := &x.MaxID + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int)(yyv27)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.MaxIDStr = "" + } else { + yyv29 := &x.MaxIDStr + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.NextResults = "" + } else { + yyv31 := &x.NextResults + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Query = "" + } else { + yyv33 := &x.Query + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*string)(yyv33)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.RefreshURL = "" + } else { + yyv35 := &x.RefreshURL + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.SinceID = 0 + } else { + yyv37 := &x.SinceID + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*int)(yyv37)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.SinceIDStr = "" + } else { + yyv39 := &x.SinceIDStr + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + for { + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + z.DecStructFieldNotFound(yyj22-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) +} + +func (x *Hashtag) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.Indices == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceIntV(x.Indices, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("indices")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.Indices == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceIntV(x.Indices, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.Text)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("text")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.Text)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd9225) + } + } + } +} + +func (x *Hashtag) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap9225 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd9225) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray9225 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225) + } + } +} + +func (x *Hashtag) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey9225) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)} + yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr)) + z.DecSendContainerState(codecSelfer_containerMapValue9225) + switch yys3 { + case "indices": + if r.TryDecodeAsNil() { + x.Indices = nil + } else { + yyv4 := &x.Indices + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceIntX(yyv4, false, d) + } + } + case "text": + if r.TryDecodeAsNil() { + x.Text = "" + } else { + yyv6 := &x.Text + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd9225) +} + +func (x *Hashtag) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Indices = nil + } else { + yyv9 := &x.Indices + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + z.F.DecSliceIntX(yyv9, false, d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Text = "" + } else { + yyv11 := &x.Text + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) +} + +func (x *Entities) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.Hashtags == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceHashtag(([]Hashtag)(x.Hashtags), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("hashtags")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.Hashtags == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceHashtag(([]Hashtag)(x.Hashtags), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.Urls == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSlicePtrtostring(([]*string)(x.Urls), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("urls")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.Urls == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSlicePtrtostring(([]*string)(x.Urls), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.UserMentions == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSlicePtrtostring(([]*string)(x.UserMentions), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("user_mentions")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.UserMentions == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encSlicePtrtostring(([]*string)(x.UserMentions), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd9225) + } + } + } +} + +func (x *Entities) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap9225 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd9225) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray9225 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225) + } + } +} + +func (x *Entities) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey9225) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)} + yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr)) + z.DecSendContainerState(codecSelfer_containerMapValue9225) + switch yys3 { + case "hashtags": + if r.TryDecodeAsNil() { + x.Hashtags = nil + } else { + yyv4 := &x.Hashtags + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceHashtag((*[]Hashtag)(yyv4), d) + } + } + case "urls": + if r.TryDecodeAsNil() { + x.Urls = nil + } else { + yyv6 := &x.Urls + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSlicePtrtostring((*[]*string)(yyv6), d) + } + } + case "user_mentions": + if r.TryDecodeAsNil() { + x.UserMentions = nil + } else { + yyv8 := &x.UserMentions + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + h.decSlicePtrtostring((*[]*string)(yyv8), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd9225) +} + +func (x *Entities) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Hashtags = nil + } else { + yyv11 := &x.Hashtags + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceHashtag((*[]Hashtag)(yyv11), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Urls = nil + } else { + yyv13 := &x.Urls + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSlicePtrtostring((*[]*string)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.UserMentions = nil + } else { + yyv15 := &x.UserMentions + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + h.decSlicePtrtostring((*[]*string)(yyv15), d) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) +} + +func (x *UserEntityDescription) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.Urls == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePtrtostring(([]*string)(x.Urls), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("urls")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.Urls == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePtrtostring(([]*string)(x.Urls), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd9225) + } + } + } +} + +func (x *UserEntityDescription) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap9225 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd9225) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray9225 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225) + } + } +} + +func (x *UserEntityDescription) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey9225) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)} + yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr)) + z.DecSendContainerState(codecSelfer_containerMapValue9225) + switch yys3 { + case "urls": + if r.TryDecodeAsNil() { + x.Urls = nil + } else { + yyv4 := &x.Urls + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePtrtostring((*[]*string)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd9225) +} + +func (x *UserEntityDescription) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Urls = nil + } else { + yyv7 := &x.Urls + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSlicePtrtostring((*[]*string)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) +} + +func (x *URL) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.ExpandedURL == nil { + r.EncodeNil() + } else { + yy4 := *x.ExpandedURL + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy4)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("expanded_url")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.ExpandedURL == nil { + r.EncodeNil() + } else { + yy6 := *x.ExpandedURL + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy6)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.Indices == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + z.F.EncSliceIntV(x.Indices, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("indices")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.Indices == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceIntV(x.Indices, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.URL)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("url")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.URL)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd9225) + } + } + } +} + +func (x *URL) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap9225 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd9225) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray9225 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225) + } + } +} + +func (x *URL) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey9225) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)} + yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr)) + z.DecSendContainerState(codecSelfer_containerMapValue9225) + switch yys3 { + case "expanded_url": + if r.TryDecodeAsNil() { + if x.ExpandedURL != nil { + x.ExpandedURL = nil + } + } else { + if x.ExpandedURL == nil { + x.ExpandedURL = new(string) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(x.ExpandedURL)) = r.DecodeString() + } + } + case "indices": + if r.TryDecodeAsNil() { + x.Indices = nil + } else { + yyv6 := &x.Indices + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceIntX(yyv6, false, d) + } + } + case "url": + if r.TryDecodeAsNil() { + x.URL = "" + } else { + yyv8 := &x.URL + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd9225) +} + +func (x *URL) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + if x.ExpandedURL != nil { + x.ExpandedURL = nil + } + } else { + if x.ExpandedURL == nil { + x.ExpandedURL = new(string) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(x.ExpandedURL)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Indices = nil + } else { + yyv13 := &x.Indices + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecSliceIntX(yyv13, false, d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.URL = "" + } else { + yyv15 := &x.URL + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) +} + +func (x *UserEntityURL) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.Urls == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceURL(([]URL)(x.Urls), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("urls")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.Urls == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceURL(([]URL)(x.Urls), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd9225) + } + } + } +} + +func (x *UserEntityURL) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap9225 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd9225) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray9225 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225) + } + } +} + +func (x *UserEntityURL) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey9225) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)} + yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr)) + z.DecSendContainerState(codecSelfer_containerMapValue9225) + switch yys3 { + case "urls": + if r.TryDecodeAsNil() { + x.Urls = nil + } else { + yyv4 := &x.Urls + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceURL((*[]URL)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd9225) +} + +func (x *UserEntityURL) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Urls = nil + } else { + yyv7 := &x.Urls + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceURL((*[]URL)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) +} + +func (x *UserEntities) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yy4 := &x.Description + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("description")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yy6 := &x.Description + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yy9 := &x.URL + yy9.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("url")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yy11 := &x.URL + yy11.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd9225) + } + } + } +} + +func (x *UserEntities) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap9225 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd9225) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray9225 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225) + } + } +} + +func (x *UserEntities) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey9225) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)} + yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr)) + z.DecSendContainerState(codecSelfer_containerMapValue9225) + switch yys3 { + case "description": + if r.TryDecodeAsNil() { + x.Description = UserEntityDescription{} + } else { + yyv4 := &x.Description + yyv4.CodecDecodeSelf(d) + } + case "url": + if r.TryDecodeAsNil() { + x.URL = UserEntityURL{} + } else { + yyv5 := &x.URL + yyv5.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd9225) +} + +func (x *UserEntities) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Description = UserEntityDescription{} + } else { + yyv7 := &x.Description + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.URL = UserEntityURL{} + } else { + yyv8 := &x.URL + yyv8.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) +} + +func (x *User) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [39]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(39) + } else { + yynn2 = 39 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.ContributorsEnabled)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("contributors_enabled")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.ContributorsEnabled)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.CreatedAt)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("created_at")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.CreatedAt)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.DefaultProfile)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("default_profile")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.DefaultProfile)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.DefaultProfileImage)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("default_profile_image")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.DefaultProfileImage)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.Description)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("description")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.Description)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yy19 := &x.Entities + yy19.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("entities")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yy21 := &x.Entities + yy21.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeInt(int64(x.FavouritesCount)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("favourites_count")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeInt(int64(x.FavouritesCount)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.FollowRequestSent == nil { + r.EncodeNil() + } else { + yy27 := *x.FollowRequestSent + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy27)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("follow_request_sent")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.FollowRequestSent == nil { + r.EncodeNil() + } else { + yy29 := *x.FollowRequestSent + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy29)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeInt(int64(x.FollowersCount)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("followers_count")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym33 := z.EncBinary() + _ = yym33 + if false { + } else { + r.EncodeInt(int64(x.FollowersCount)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.Following == nil { + r.EncodeNil() + } else { + yy35 := *x.Following + yym36 := z.EncBinary() + _ = yym36 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy35)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("following")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.Following == nil { + r.EncodeNil() + } else { + yy37 := *x.Following + yym38 := z.EncBinary() + _ = yym38 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy37)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym40 := z.EncBinary() + _ = yym40 + if false { + } else { + r.EncodeInt(int64(x.FriendsCount)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("friends_count")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym41 := z.EncBinary() + _ = yym41 + if false { + } else { + r.EncodeInt(int64(x.FriendsCount)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym43 := z.EncBinary() + _ = yym43 + if false { + } else { + r.EncodeBool(bool(x.GeoEnabled)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("geo_enabled")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym44 := z.EncBinary() + _ = yym44 + if false { + } else { + r.EncodeBool(bool(x.GeoEnabled)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym46 := z.EncBinary() + _ = yym46 + if false { + } else { + r.EncodeInt(int64(x.ID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("id")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym47 := z.EncBinary() + _ = yym47 + if false { + } else { + r.EncodeInt(int64(x.ID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym49 := z.EncBinary() + _ = yym49 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.IDStr)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("id_str")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym50 := z.EncBinary() + _ = yym50 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.IDStr)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym52 := z.EncBinary() + _ = yym52 + if false { + } else { + r.EncodeBool(bool(x.IsTranslator)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("is_translator")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym53 := z.EncBinary() + _ = yym53 + if false { + } else { + r.EncodeBool(bool(x.IsTranslator)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym55 := z.EncBinary() + _ = yym55 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.Lang)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("lang")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym56 := z.EncBinary() + _ = yym56 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.Lang)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym58 := z.EncBinary() + _ = yym58 + if false { + } else { + r.EncodeInt(int64(x.ListedCount)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("listed_count")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym59 := z.EncBinary() + _ = yym59 + if false { + } else { + r.EncodeInt(int64(x.ListedCount)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym61 := z.EncBinary() + _ = yym61 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.Location)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("location")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym62 := z.EncBinary() + _ = yym62 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.Location)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym64 := z.EncBinary() + _ = yym64 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym65 := z.EncBinary() + _ = yym65 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.Notifications == nil { + r.EncodeNil() + } else { + yy67 := *x.Notifications + yym68 := z.EncBinary() + _ = yym68 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy67)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("notifications")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.Notifications == nil { + r.EncodeNil() + } else { + yy69 := *x.Notifications + yym70 := z.EncBinary() + _ = yym70 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy69)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym72 := z.EncBinary() + _ = yym72 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ProfileBackgroundColor)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("profile_background_color")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym73 := z.EncBinary() + _ = yym73 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ProfileBackgroundColor)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym75 := z.EncBinary() + _ = yym75 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ProfileBackgroundImageURL)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("profile_background_image_url")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym76 := z.EncBinary() + _ = yym76 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ProfileBackgroundImageURL)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym78 := z.EncBinary() + _ = yym78 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ProfileBackgroundImageURLHTTPS)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("profile_background_image_url_https")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym79 := z.EncBinary() + _ = yym79 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ProfileBackgroundImageURLHTTPS)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym81 := z.EncBinary() + _ = yym81 + if false { + } else { + r.EncodeBool(bool(x.ProfileBackgroundTile)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("profile_background_tile")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym82 := z.EncBinary() + _ = yym82 + if false { + } else { + r.EncodeBool(bool(x.ProfileBackgroundTile)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym84 := z.EncBinary() + _ = yym84 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ProfileImageURL)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("profile_image_url")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym85 := z.EncBinary() + _ = yym85 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ProfileImageURL)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym87 := z.EncBinary() + _ = yym87 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ProfileImageURLHTTPS)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("profile_image_url_https")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym88 := z.EncBinary() + _ = yym88 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ProfileImageURLHTTPS)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym90 := z.EncBinary() + _ = yym90 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ProfileLinkColor)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("profile_link_color")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym91 := z.EncBinary() + _ = yym91 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ProfileLinkColor)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym93 := z.EncBinary() + _ = yym93 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ProfileSidebarBorderColor)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("profile_sidebar_border_color")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym94 := z.EncBinary() + _ = yym94 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ProfileSidebarBorderColor)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym96 := z.EncBinary() + _ = yym96 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ProfileSidebarFillColor)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("profile_sidebar_fill_color")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym97 := z.EncBinary() + _ = yym97 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ProfileSidebarFillColor)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym99 := z.EncBinary() + _ = yym99 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ProfileTextColor)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("profile_text_color")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym100 := z.EncBinary() + _ = yym100 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ProfileTextColor)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym102 := z.EncBinary() + _ = yym102 + if false { + } else { + r.EncodeBool(bool(x.ProfileUseBackgroundImage)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("profile_use_background_image")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym103 := z.EncBinary() + _ = yym103 + if false { + } else { + r.EncodeBool(bool(x.ProfileUseBackgroundImage)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym105 := z.EncBinary() + _ = yym105 + if false { + } else { + r.EncodeBool(bool(x.Protected)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("protected")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym106 := z.EncBinary() + _ = yym106 + if false { + } else { + r.EncodeBool(bool(x.Protected)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym108 := z.EncBinary() + _ = yym108 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ScreenName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("screen_name")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym109 := z.EncBinary() + _ = yym109 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ScreenName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym111 := z.EncBinary() + _ = yym111 + if false { + } else { + r.EncodeBool(bool(x.ShowAllInlineMedia)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("show_all_inline_media")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym112 := z.EncBinary() + _ = yym112 + if false { + } else { + r.EncodeBool(bool(x.ShowAllInlineMedia)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym114 := z.EncBinary() + _ = yym114 + if false { + } else { + r.EncodeInt(int64(x.StatusesCount)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("statuses_count")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym115 := z.EncBinary() + _ = yym115 + if false { + } else { + r.EncodeInt(int64(x.StatusesCount)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym117 := z.EncBinary() + _ = yym117 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.TimeZone)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("time_zone")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym118 := z.EncBinary() + _ = yym118 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.TimeZone)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.URL == nil { + r.EncodeNil() + } else { + yy120 := *x.URL + yym121 := z.EncBinary() + _ = yym121 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy120)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("url")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.URL == nil { + r.EncodeNil() + } else { + yy122 := *x.URL + yym123 := z.EncBinary() + _ = yym123 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy122)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym125 := z.EncBinary() + _ = yym125 + if false { + } else { + r.EncodeInt(int64(x.UtcOffset)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("utc_offset")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym126 := z.EncBinary() + _ = yym126 + if false { + } else { + r.EncodeInt(int64(x.UtcOffset)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym128 := z.EncBinary() + _ = yym128 + if false { + } else { + r.EncodeBool(bool(x.Verified)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("verified")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym129 := z.EncBinary() + _ = yym129 + if false { + } else { + r.EncodeBool(bool(x.Verified)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd9225) + } + } + } +} + +func (x *User) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap9225 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd9225) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray9225 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225) + } + } +} + +func (x *User) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey9225) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)} + yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr)) + z.DecSendContainerState(codecSelfer_containerMapValue9225) + switch yys3 { + case "contributors_enabled": + if r.TryDecodeAsNil() { + x.ContributorsEnabled = false + } else { + yyv4 := &x.ContributorsEnabled + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } + } + case "created_at": + if r.TryDecodeAsNil() { + x.CreatedAt = "" + } else { + yyv6 := &x.CreatedAt + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "default_profile": + if r.TryDecodeAsNil() { + x.DefaultProfile = false + } else { + yyv8 := &x.DefaultProfile + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "default_profile_image": + if r.TryDecodeAsNil() { + x.DefaultProfileImage = false + } else { + yyv10 := &x.DefaultProfileImage + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "description": + if r.TryDecodeAsNil() { + x.Description = "" + } else { + yyv12 := &x.Description + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "entities": + if r.TryDecodeAsNil() { + x.Entities = UserEntities{} + } else { + yyv14 := &x.Entities + yyv14.CodecDecodeSelf(d) + } + case "favourites_count": + if r.TryDecodeAsNil() { + x.FavouritesCount = 0 + } else { + yyv15 := &x.FavouritesCount + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int)(yyv15)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + case "follow_request_sent": + if r.TryDecodeAsNil() { + if x.FollowRequestSent != nil { + x.FollowRequestSent = nil + } + } else { + if x.FollowRequestSent == nil { + x.FollowRequestSent = new(string) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(x.FollowRequestSent)) = r.DecodeString() + } + } + case "followers_count": + if r.TryDecodeAsNil() { + x.FollowersCount = 0 + } else { + yyv19 := &x.FollowersCount + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int)(yyv19)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + case "following": + if r.TryDecodeAsNil() { + if x.Following != nil { + x.Following = nil + } + } else { + if x.Following == nil { + x.Following = new(string) + } + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(x.Following)) = r.DecodeString() + } + } + case "friends_count": + if r.TryDecodeAsNil() { + x.FriendsCount = 0 + } else { + yyv23 := &x.FriendsCount + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int)(yyv23)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + case "geo_enabled": + if r.TryDecodeAsNil() { + x.GeoEnabled = false + } else { + yyv25 := &x.GeoEnabled + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + case "id": + if r.TryDecodeAsNil() { + x.ID = 0 + } else { + yyv27 := &x.ID + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int)(yyv27)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + case "id_str": + if r.TryDecodeAsNil() { + x.IDStr = "" + } else { + yyv29 := &x.IDStr + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + case "is_translator": + if r.TryDecodeAsNil() { + x.IsTranslator = false + } else { + yyv31 := &x.IsTranslator + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*bool)(yyv31)) = r.DecodeBool() + } + } + case "lang": + if r.TryDecodeAsNil() { + x.Lang = "" + } else { + yyv33 := &x.Lang + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*string)(yyv33)) = r.DecodeString() + } + } + case "listed_count": + if r.TryDecodeAsNil() { + x.ListedCount = 0 + } else { + yyv35 := &x.ListedCount + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*int)(yyv35)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + case "location": + if r.TryDecodeAsNil() { + x.Location = "" + } else { + yyv37 := &x.Location + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv39 := &x.Name + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + case "notifications": + if r.TryDecodeAsNil() { + if x.Notifications != nil { + x.Notifications = nil + } + } else { + if x.Notifications == nil { + x.Notifications = new(string) + } + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*string)(x.Notifications)) = r.DecodeString() + } + } + case "profile_background_color": + if r.TryDecodeAsNil() { + x.ProfileBackgroundColor = "" + } else { + yyv43 := &x.ProfileBackgroundColor + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*string)(yyv43)) = r.DecodeString() + } + } + case "profile_background_image_url": + if r.TryDecodeAsNil() { + x.ProfileBackgroundImageURL = "" + } else { + yyv45 := &x.ProfileBackgroundImageURL + yym46 := z.DecBinary() + _ = yym46 + if false { + } else { + *((*string)(yyv45)) = r.DecodeString() + } + } + case "profile_background_image_url_https": + if r.TryDecodeAsNil() { + x.ProfileBackgroundImageURLHTTPS = "" + } else { + yyv47 := &x.ProfileBackgroundImageURLHTTPS + yym48 := z.DecBinary() + _ = yym48 + if false { + } else { + *((*string)(yyv47)) = r.DecodeString() + } + } + case "profile_background_tile": + if r.TryDecodeAsNil() { + x.ProfileBackgroundTile = false + } else { + yyv49 := &x.ProfileBackgroundTile + yym50 := z.DecBinary() + _ = yym50 + if false { + } else { + *((*bool)(yyv49)) = r.DecodeBool() + } + } + case "profile_image_url": + if r.TryDecodeAsNil() { + x.ProfileImageURL = "" + } else { + yyv51 := &x.ProfileImageURL + yym52 := z.DecBinary() + _ = yym52 + if false { + } else { + *((*string)(yyv51)) = r.DecodeString() + } + } + case "profile_image_url_https": + if r.TryDecodeAsNil() { + x.ProfileImageURLHTTPS = "" + } else { + yyv53 := &x.ProfileImageURLHTTPS + yym54 := z.DecBinary() + _ = yym54 + if false { + } else { + *((*string)(yyv53)) = r.DecodeString() + } + } + case "profile_link_color": + if r.TryDecodeAsNil() { + x.ProfileLinkColor = "" + } else { + yyv55 := &x.ProfileLinkColor + yym56 := z.DecBinary() + _ = yym56 + if false { + } else { + *((*string)(yyv55)) = r.DecodeString() + } + } + case "profile_sidebar_border_color": + if r.TryDecodeAsNil() { + x.ProfileSidebarBorderColor = "" + } else { + yyv57 := &x.ProfileSidebarBorderColor + yym58 := z.DecBinary() + _ = yym58 + if false { + } else { + *((*string)(yyv57)) = r.DecodeString() + } + } + case "profile_sidebar_fill_color": + if r.TryDecodeAsNil() { + x.ProfileSidebarFillColor = "" + } else { + yyv59 := &x.ProfileSidebarFillColor + yym60 := z.DecBinary() + _ = yym60 + if false { + } else { + *((*string)(yyv59)) = r.DecodeString() + } + } + case "profile_text_color": + if r.TryDecodeAsNil() { + x.ProfileTextColor = "" + } else { + yyv61 := &x.ProfileTextColor + yym62 := z.DecBinary() + _ = yym62 + if false { + } else { + *((*string)(yyv61)) = r.DecodeString() + } + } + case "profile_use_background_image": + if r.TryDecodeAsNil() { + x.ProfileUseBackgroundImage = false + } else { + yyv63 := &x.ProfileUseBackgroundImage + yym64 := z.DecBinary() + _ = yym64 + if false { + } else { + *((*bool)(yyv63)) = r.DecodeBool() + } + } + case "protected": + if r.TryDecodeAsNil() { + x.Protected = false + } else { + yyv65 := &x.Protected + yym66 := z.DecBinary() + _ = yym66 + if false { + } else { + *((*bool)(yyv65)) = r.DecodeBool() + } + } + case "screen_name": + if r.TryDecodeAsNil() { + x.ScreenName = "" + } else { + yyv67 := &x.ScreenName + yym68 := z.DecBinary() + _ = yym68 + if false { + } else { + *((*string)(yyv67)) = r.DecodeString() + } + } + case "show_all_inline_media": + if r.TryDecodeAsNil() { + x.ShowAllInlineMedia = false + } else { + yyv69 := &x.ShowAllInlineMedia + yym70 := z.DecBinary() + _ = yym70 + if false { + } else { + *((*bool)(yyv69)) = r.DecodeBool() + } + } + case "statuses_count": + if r.TryDecodeAsNil() { + x.StatusesCount = 0 + } else { + yyv71 := &x.StatusesCount + yym72 := z.DecBinary() + _ = yym72 + if false { + } else { + *((*int)(yyv71)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + case "time_zone": + if r.TryDecodeAsNil() { + x.TimeZone = "" + } else { + yyv73 := &x.TimeZone + yym74 := z.DecBinary() + _ = yym74 + if false { + } else { + *((*string)(yyv73)) = r.DecodeString() + } + } + case "url": + if r.TryDecodeAsNil() { + if x.URL != nil { + x.URL = nil + } + } else { + if x.URL == nil { + x.URL = new(string) + } + yym76 := z.DecBinary() + _ = yym76 + if false { + } else { + *((*string)(x.URL)) = r.DecodeString() + } + } + case "utc_offset": + if r.TryDecodeAsNil() { + x.UtcOffset = 0 + } else { + yyv77 := &x.UtcOffset + yym78 := z.DecBinary() + _ = yym78 + if false { + } else { + *((*int)(yyv77)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + case "verified": + if r.TryDecodeAsNil() { + x.Verified = false + } else { + yyv79 := &x.Verified + yym80 := z.DecBinary() + _ = yym80 + if false { + } else { + *((*bool)(yyv79)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd9225) +} + +func (x *User) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj81 int + var yyb81 bool + var yyhl81 bool = l >= 0 + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.ContributorsEnabled = false + } else { + yyv82 := &x.ContributorsEnabled + yym83 := z.DecBinary() + _ = yym83 + if false { + } else { + *((*bool)(yyv82)) = r.DecodeBool() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.CreatedAt = "" + } else { + yyv84 := &x.CreatedAt + yym85 := z.DecBinary() + _ = yym85 + if false { + } else { + *((*string)(yyv84)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.DefaultProfile = false + } else { + yyv86 := &x.DefaultProfile + yym87 := z.DecBinary() + _ = yym87 + if false { + } else { + *((*bool)(yyv86)) = r.DecodeBool() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.DefaultProfileImage = false + } else { + yyv88 := &x.DefaultProfileImage + yym89 := z.DecBinary() + _ = yym89 + if false { + } else { + *((*bool)(yyv88)) = r.DecodeBool() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Description = "" + } else { + yyv90 := &x.Description + yym91 := z.DecBinary() + _ = yym91 + if false { + } else { + *((*string)(yyv90)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Entities = UserEntities{} + } else { + yyv92 := &x.Entities + yyv92.CodecDecodeSelf(d) + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.FavouritesCount = 0 + } else { + yyv93 := &x.FavouritesCount + yym94 := z.DecBinary() + _ = yym94 + if false { + } else { + *((*int)(yyv93)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + if x.FollowRequestSent != nil { + x.FollowRequestSent = nil + } + } else { + if x.FollowRequestSent == nil { + x.FollowRequestSent = new(string) + } + yym96 := z.DecBinary() + _ = yym96 + if false { + } else { + *((*string)(x.FollowRequestSent)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.FollowersCount = 0 + } else { + yyv97 := &x.FollowersCount + yym98 := z.DecBinary() + _ = yym98 + if false { + } else { + *((*int)(yyv97)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + if x.Following != nil { + x.Following = nil + } + } else { + if x.Following == nil { + x.Following = new(string) + } + yym100 := z.DecBinary() + _ = yym100 + if false { + } else { + *((*string)(x.Following)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.FriendsCount = 0 + } else { + yyv101 := &x.FriendsCount + yym102 := z.DecBinary() + _ = yym102 + if false { + } else { + *((*int)(yyv101)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.GeoEnabled = false + } else { + yyv103 := &x.GeoEnabled + yym104 := z.DecBinary() + _ = yym104 + if false { + } else { + *((*bool)(yyv103)) = r.DecodeBool() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.ID = 0 + } else { + yyv105 := &x.ID + yym106 := z.DecBinary() + _ = yym106 + if false { + } else { + *((*int)(yyv105)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.IDStr = "" + } else { + yyv107 := &x.IDStr + yym108 := z.DecBinary() + _ = yym108 + if false { + } else { + *((*string)(yyv107)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.IsTranslator = false + } else { + yyv109 := &x.IsTranslator + yym110 := z.DecBinary() + _ = yym110 + if false { + } else { + *((*bool)(yyv109)) = r.DecodeBool() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Lang = "" + } else { + yyv111 := &x.Lang + yym112 := z.DecBinary() + _ = yym112 + if false { + } else { + *((*string)(yyv111)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.ListedCount = 0 + } else { + yyv113 := &x.ListedCount + yym114 := z.DecBinary() + _ = yym114 + if false { + } else { + *((*int)(yyv113)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Location = "" + } else { + yyv115 := &x.Location + yym116 := z.DecBinary() + _ = yym116 + if false { + } else { + *((*string)(yyv115)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv117 := &x.Name + yym118 := z.DecBinary() + _ = yym118 + if false { + } else { + *((*string)(yyv117)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + if x.Notifications != nil { + x.Notifications = nil + } + } else { + if x.Notifications == nil { + x.Notifications = new(string) + } + yym120 := z.DecBinary() + _ = yym120 + if false { + } else { + *((*string)(x.Notifications)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.ProfileBackgroundColor = "" + } else { + yyv121 := &x.ProfileBackgroundColor + yym122 := z.DecBinary() + _ = yym122 + if false { + } else { + *((*string)(yyv121)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.ProfileBackgroundImageURL = "" + } else { + yyv123 := &x.ProfileBackgroundImageURL + yym124 := z.DecBinary() + _ = yym124 + if false { + } else { + *((*string)(yyv123)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.ProfileBackgroundImageURLHTTPS = "" + } else { + yyv125 := &x.ProfileBackgroundImageURLHTTPS + yym126 := z.DecBinary() + _ = yym126 + if false { + } else { + *((*string)(yyv125)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.ProfileBackgroundTile = false + } else { + yyv127 := &x.ProfileBackgroundTile + yym128 := z.DecBinary() + _ = yym128 + if false { + } else { + *((*bool)(yyv127)) = r.DecodeBool() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.ProfileImageURL = "" + } else { + yyv129 := &x.ProfileImageURL + yym130 := z.DecBinary() + _ = yym130 + if false { + } else { + *((*string)(yyv129)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.ProfileImageURLHTTPS = "" + } else { + yyv131 := &x.ProfileImageURLHTTPS + yym132 := z.DecBinary() + _ = yym132 + if false { + } else { + *((*string)(yyv131)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.ProfileLinkColor = "" + } else { + yyv133 := &x.ProfileLinkColor + yym134 := z.DecBinary() + _ = yym134 + if false { + } else { + *((*string)(yyv133)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.ProfileSidebarBorderColor = "" + } else { + yyv135 := &x.ProfileSidebarBorderColor + yym136 := z.DecBinary() + _ = yym136 + if false { + } else { + *((*string)(yyv135)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.ProfileSidebarFillColor = "" + } else { + yyv137 := &x.ProfileSidebarFillColor + yym138 := z.DecBinary() + _ = yym138 + if false { + } else { + *((*string)(yyv137)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.ProfileTextColor = "" + } else { + yyv139 := &x.ProfileTextColor + yym140 := z.DecBinary() + _ = yym140 + if false { + } else { + *((*string)(yyv139)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.ProfileUseBackgroundImage = false + } else { + yyv141 := &x.ProfileUseBackgroundImage + yym142 := z.DecBinary() + _ = yym142 + if false { + } else { + *((*bool)(yyv141)) = r.DecodeBool() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Protected = false + } else { + yyv143 := &x.Protected + yym144 := z.DecBinary() + _ = yym144 + if false { + } else { + *((*bool)(yyv143)) = r.DecodeBool() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.ScreenName = "" + } else { + yyv145 := &x.ScreenName + yym146 := z.DecBinary() + _ = yym146 + if false { + } else { + *((*string)(yyv145)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.ShowAllInlineMedia = false + } else { + yyv147 := &x.ShowAllInlineMedia + yym148 := z.DecBinary() + _ = yym148 + if false { + } else { + *((*bool)(yyv147)) = r.DecodeBool() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.StatusesCount = 0 + } else { + yyv149 := &x.StatusesCount + yym150 := z.DecBinary() + _ = yym150 + if false { + } else { + *((*int)(yyv149)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.TimeZone = "" + } else { + yyv151 := &x.TimeZone + yym152 := z.DecBinary() + _ = yym152 + if false { + } else { + *((*string)(yyv151)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + if x.URL != nil { + x.URL = nil + } + } else { + if x.URL == nil { + x.URL = new(string) + } + yym154 := z.DecBinary() + _ = yym154 + if false { + } else { + *((*string)(x.URL)) = r.DecodeString() + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.UtcOffset = 0 + } else { + yyv155 := &x.UtcOffset + yym156 := z.DecBinary() + _ = yym156 + if false { + } else { + *((*int)(yyv155)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Verified = false + } else { + yyv157 := &x.Verified + yym158 := z.DecBinary() + _ = yym158 + if false { + } else { + *((*bool)(yyv157)) = r.DecodeBool() + } + } + for { + yyj81++ + if yyhl81 { + yyb81 = yyj81 > l + } else { + yyb81 = r.CheckBreak() + } + if yyb81 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + z.DecStructFieldNotFound(yyj81-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) +} + +func (x *StatusMetadata) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.IsoLanguageCode)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("iso_language_code")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.IsoLanguageCode)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ResultType)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("result_type")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.ResultType)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd9225) + } + } + } +} + +func (x *StatusMetadata) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap9225 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd9225) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray9225 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225) + } + } +} + +func (x *StatusMetadata) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey9225) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)} + yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr)) + z.DecSendContainerState(codecSelfer_containerMapValue9225) + switch yys3 { + case "iso_language_code": + if r.TryDecodeAsNil() { + x.IsoLanguageCode = "" + } else { + yyv4 := &x.IsoLanguageCode + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "result_type": + if r.TryDecodeAsNil() { + x.ResultType = "" + } else { + yyv6 := &x.ResultType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd9225) +} + +func (x *StatusMetadata) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.IsoLanguageCode = "" + } else { + yyv9 := &x.IsoLanguageCode + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.ResultType = "" + } else { + yyv11 := &x.ResultType + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) +} + +func (x *Status) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [21]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(21) + } else { + yynn2 = 21 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.Contributors == nil { + r.EncodeNil() + } else { + yy4 := *x.Contributors + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy4)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("contributors")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.Contributors == nil { + r.EncodeNil() + } else { + yy6 := *x.Contributors + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy6)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.Coordinates == nil { + r.EncodeNil() + } else { + yy9 := *x.Coordinates + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy9)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("coordinates")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.Coordinates == nil { + r.EncodeNil() + } else { + yy11 := *x.Coordinates + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy11)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.CreatedAt)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("created_at")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.CreatedAt)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yy17 := &x.Entities + yy17.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("entities")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yy19 := &x.Entities + yy19.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeBool(bool(x.Favorited)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("favorited")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(x.Favorited)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.Geo == nil { + r.EncodeNil() + } else { + yy25 := *x.Geo + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy25)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("geo")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.Geo == nil { + r.EncodeNil() + } else { + yy27 := *x.Geo + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy27)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + r.EncodeInt(int64(x.ID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("id")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeInt(int64(x.ID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym33 := z.EncBinary() + _ = yym33 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.IDStr)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("id_str")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym34 := z.EncBinary() + _ = yym34 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.IDStr)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.InReplyToScreenName == nil { + r.EncodeNil() + } else { + yy36 := *x.InReplyToScreenName + yym37 := z.EncBinary() + _ = yym37 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy36)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("in_reply_to_screen_name")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.InReplyToScreenName == nil { + r.EncodeNil() + } else { + yy38 := *x.InReplyToScreenName + yym39 := z.EncBinary() + _ = yym39 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy38)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.InReplyToStatusID == nil { + r.EncodeNil() + } else { + yy41 := *x.InReplyToStatusID + yym42 := z.EncBinary() + _ = yym42 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy41)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("in_reply_to_status_id")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.InReplyToStatusID == nil { + r.EncodeNil() + } else { + yy43 := *x.InReplyToStatusID + yym44 := z.EncBinary() + _ = yym44 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy43)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.InReplyToStatusIDStr == nil { + r.EncodeNil() + } else { + yy46 := *x.InReplyToStatusIDStr + yym47 := z.EncBinary() + _ = yym47 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy46)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("in_reply_to_status_id_str")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.InReplyToStatusIDStr == nil { + r.EncodeNil() + } else { + yy48 := *x.InReplyToStatusIDStr + yym49 := z.EncBinary() + _ = yym49 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy48)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.InReplyToUserID == nil { + r.EncodeNil() + } else { + yy51 := *x.InReplyToUserID + yym52 := z.EncBinary() + _ = yym52 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy51)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("in_reply_to_user_id")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.InReplyToUserID == nil { + r.EncodeNil() + } else { + yy53 := *x.InReplyToUserID + yym54 := z.EncBinary() + _ = yym54 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy53)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.InReplyToUserIDStr == nil { + r.EncodeNil() + } else { + yy56 := *x.InReplyToUserIDStr + yym57 := z.EncBinary() + _ = yym57 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy56)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("in_reply_to_user_id_str")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.InReplyToUserIDStr == nil { + r.EncodeNil() + } else { + yy58 := *x.InReplyToUserIDStr + yym59 := z.EncBinary() + _ = yym59 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy58)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yy61 := &x.Metadata + yy61.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yy63 := &x.Metadata + yy63.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.Place == nil { + r.EncodeNil() + } else { + yy66 := *x.Place + yym67 := z.EncBinary() + _ = yym67 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy66)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("place")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.Place == nil { + r.EncodeNil() + } else { + yy68 := *x.Place + yym69 := z.EncBinary() + _ = yym69 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy68)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym71 := z.EncBinary() + _ = yym71 + if false { + } else { + r.EncodeInt(int64(x.RetweetCount)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("retweet_count")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym72 := z.EncBinary() + _ = yym72 + if false { + } else { + r.EncodeInt(int64(x.RetweetCount)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym74 := z.EncBinary() + _ = yym74 + if false { + } else { + r.EncodeBool(bool(x.Retweeted)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("retweeted")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym75 := z.EncBinary() + _ = yym75 + if false { + } else { + r.EncodeBool(bool(x.Retweeted)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym77 := z.EncBinary() + _ = yym77 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.Source)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("source")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym78 := z.EncBinary() + _ = yym78 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.Source)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym80 := z.EncBinary() + _ = yym80 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.Text)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("text")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym81 := z.EncBinary() + _ = yym81 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(x.Text)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yym83 := z.EncBinary() + _ = yym83 + if false { + } else { + r.EncodeBool(bool(x.Truncated)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("truncated")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yym84 := z.EncBinary() + _ = yym84 + if false { + } else { + r.EncodeBool(bool(x.Truncated)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yy86 := &x.User + yy86.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yy88 := &x.User + yy88.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd9225) + } + } + } +} + +func (x *Status) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap9225 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd9225) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray9225 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225) + } + } +} + +func (x *Status) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey9225) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)} + yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr)) + z.DecSendContainerState(codecSelfer_containerMapValue9225) + switch yys3 { + case "contributors": + if r.TryDecodeAsNil() { + if x.Contributors != nil { + x.Contributors = nil + } + } else { + if x.Contributors == nil { + x.Contributors = new(string) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(x.Contributors)) = r.DecodeString() + } + } + case "coordinates": + if r.TryDecodeAsNil() { + if x.Coordinates != nil { + x.Coordinates = nil + } + } else { + if x.Coordinates == nil { + x.Coordinates = new(string) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(x.Coordinates)) = r.DecodeString() + } + } + case "created_at": + if r.TryDecodeAsNil() { + x.CreatedAt = "" + } else { + yyv8 := &x.CreatedAt + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "entities": + if r.TryDecodeAsNil() { + x.Entities = Entities{} + } else { + yyv10 := &x.Entities + yyv10.CodecDecodeSelf(d) + } + case "favorited": + if r.TryDecodeAsNil() { + x.Favorited = false + } else { + yyv11 := &x.Favorited + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + case "geo": + if r.TryDecodeAsNil() { + if x.Geo != nil { + x.Geo = nil + } + } else { + if x.Geo == nil { + x.Geo = new(string) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(x.Geo)) = r.DecodeString() + } + } + case "id": + if r.TryDecodeAsNil() { + x.ID = 0 + } else { + yyv15 := &x.ID + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int64)(yyv15)) = int64(r.DecodeInt(64)) + } + } + case "id_str": + if r.TryDecodeAsNil() { + x.IDStr = "" + } else { + yyv17 := &x.IDStr + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + case "in_reply_to_screen_name": + if r.TryDecodeAsNil() { + if x.InReplyToScreenName != nil { + x.InReplyToScreenName = nil + } + } else { + if x.InReplyToScreenName == nil { + x.InReplyToScreenName = new(string) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(x.InReplyToScreenName)) = r.DecodeString() + } + } + case "in_reply_to_status_id": + if r.TryDecodeAsNil() { + if x.InReplyToStatusID != nil { + x.InReplyToStatusID = nil + } + } else { + if x.InReplyToStatusID == nil { + x.InReplyToStatusID = new(string) + } + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(x.InReplyToStatusID)) = r.DecodeString() + } + } + case "in_reply_to_status_id_str": + if r.TryDecodeAsNil() { + if x.InReplyToStatusIDStr != nil { + x.InReplyToStatusIDStr = nil + } + } else { + if x.InReplyToStatusIDStr == nil { + x.InReplyToStatusIDStr = new(string) + } + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(x.InReplyToStatusIDStr)) = r.DecodeString() + } + } + case "in_reply_to_user_id": + if r.TryDecodeAsNil() { + if x.InReplyToUserID != nil { + x.InReplyToUserID = nil + } + } else { + if x.InReplyToUserID == nil { + x.InReplyToUserID = new(string) + } + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(x.InReplyToUserID)) = r.DecodeString() + } + } + case "in_reply_to_user_id_str": + if r.TryDecodeAsNil() { + if x.InReplyToUserIDStr != nil { + x.InReplyToUserIDStr = nil + } + } else { + if x.InReplyToUserIDStr == nil { + x.InReplyToUserIDStr = new(string) + } + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(x.InReplyToUserIDStr)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.Metadata = StatusMetadata{} + } else { + yyv29 := &x.Metadata + yyv29.CodecDecodeSelf(d) + } + case "place": + if r.TryDecodeAsNil() { + if x.Place != nil { + x.Place = nil + } + } else { + if x.Place == nil { + x.Place = new(string) + } + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*string)(x.Place)) = r.DecodeString() + } + } + case "retweet_count": + if r.TryDecodeAsNil() { + x.RetweetCount = 0 + } else { + yyv32 := &x.RetweetCount + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*int)(yyv32)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + case "retweeted": + if r.TryDecodeAsNil() { + x.Retweeted = false + } else { + yyv34 := &x.Retweeted + yym35 := z.DecBinary() + _ = yym35 + if false { + } else { + *((*bool)(yyv34)) = r.DecodeBool() + } + } + case "source": + if r.TryDecodeAsNil() { + x.Source = "" + } else { + yyv36 := &x.Source + yym37 := z.DecBinary() + _ = yym37 + if false { + } else { + *((*string)(yyv36)) = r.DecodeString() + } + } + case "text": + if r.TryDecodeAsNil() { + x.Text = "" + } else { + yyv38 := &x.Text + yym39 := z.DecBinary() + _ = yym39 + if false { + } else { + *((*string)(yyv38)) = r.DecodeString() + } + } + case "truncated": + if r.TryDecodeAsNil() { + x.Truncated = false + } else { + yyv40 := &x.Truncated + yym41 := z.DecBinary() + _ = yym41 + if false { + } else { + *((*bool)(yyv40)) = r.DecodeBool() + } + } + case "user": + if r.TryDecodeAsNil() { + x.User = User{} + } else { + yyv42 := &x.User + yyv42.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd9225) +} + +func (x *Status) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj43 int + var yyb43 bool + var yyhl43 bool = l >= 0 + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + if x.Contributors != nil { + x.Contributors = nil + } + } else { + if x.Contributors == nil { + x.Contributors = new(string) + } + yym45 := z.DecBinary() + _ = yym45 + if false { + } else { + *((*string)(x.Contributors)) = r.DecodeString() + } + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + if x.Coordinates != nil { + x.Coordinates = nil + } + } else { + if x.Coordinates == nil { + x.Coordinates = new(string) + } + yym47 := z.DecBinary() + _ = yym47 + if false { + } else { + *((*string)(x.Coordinates)) = r.DecodeString() + } + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.CreatedAt = "" + } else { + yyv48 := &x.CreatedAt + yym49 := z.DecBinary() + _ = yym49 + if false { + } else { + *((*string)(yyv48)) = r.DecodeString() + } + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Entities = Entities{} + } else { + yyv50 := &x.Entities + yyv50.CodecDecodeSelf(d) + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Favorited = false + } else { + yyv51 := &x.Favorited + yym52 := z.DecBinary() + _ = yym52 + if false { + } else { + *((*bool)(yyv51)) = r.DecodeBool() + } + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + if x.Geo != nil { + x.Geo = nil + } + } else { + if x.Geo == nil { + x.Geo = new(string) + } + yym54 := z.DecBinary() + _ = yym54 + if false { + } else { + *((*string)(x.Geo)) = r.DecodeString() + } + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.ID = 0 + } else { + yyv55 := &x.ID + yym56 := z.DecBinary() + _ = yym56 + if false { + } else { + *((*int64)(yyv55)) = int64(r.DecodeInt(64)) + } + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.IDStr = "" + } else { + yyv57 := &x.IDStr + yym58 := z.DecBinary() + _ = yym58 + if false { + } else { + *((*string)(yyv57)) = r.DecodeString() + } + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + if x.InReplyToScreenName != nil { + x.InReplyToScreenName = nil + } + } else { + if x.InReplyToScreenName == nil { + x.InReplyToScreenName = new(string) + } + yym60 := z.DecBinary() + _ = yym60 + if false { + } else { + *((*string)(x.InReplyToScreenName)) = r.DecodeString() + } + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + if x.InReplyToStatusID != nil { + x.InReplyToStatusID = nil + } + } else { + if x.InReplyToStatusID == nil { + x.InReplyToStatusID = new(string) + } + yym62 := z.DecBinary() + _ = yym62 + if false { + } else { + *((*string)(x.InReplyToStatusID)) = r.DecodeString() + } + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + if x.InReplyToStatusIDStr != nil { + x.InReplyToStatusIDStr = nil + } + } else { + if x.InReplyToStatusIDStr == nil { + x.InReplyToStatusIDStr = new(string) + } + yym64 := z.DecBinary() + _ = yym64 + if false { + } else { + *((*string)(x.InReplyToStatusIDStr)) = r.DecodeString() + } + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + if x.InReplyToUserID != nil { + x.InReplyToUserID = nil + } + } else { + if x.InReplyToUserID == nil { + x.InReplyToUserID = new(string) + } + yym66 := z.DecBinary() + _ = yym66 + if false { + } else { + *((*string)(x.InReplyToUserID)) = r.DecodeString() + } + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + if x.InReplyToUserIDStr != nil { + x.InReplyToUserIDStr = nil + } + } else { + if x.InReplyToUserIDStr == nil { + x.InReplyToUserIDStr = new(string) + } + yym68 := z.DecBinary() + _ = yym68 + if false { + } else { + *((*string)(x.InReplyToUserIDStr)) = r.DecodeString() + } + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Metadata = StatusMetadata{} + } else { + yyv69 := &x.Metadata + yyv69.CodecDecodeSelf(d) + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + if x.Place != nil { + x.Place = nil + } + } else { + if x.Place == nil { + x.Place = new(string) + } + yym71 := z.DecBinary() + _ = yym71 + if false { + } else { + *((*string)(x.Place)) = r.DecodeString() + } + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.RetweetCount = 0 + } else { + yyv72 := &x.RetweetCount + yym73 := z.DecBinary() + _ = yym73 + if false { + } else { + *((*int)(yyv72)) = int(r.DecodeInt(codecSelferBitsize9225)) + } + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Retweeted = false + } else { + yyv74 := &x.Retweeted + yym75 := z.DecBinary() + _ = yym75 + if false { + } else { + *((*bool)(yyv74)) = r.DecodeBool() + } + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Source = "" + } else { + yyv76 := &x.Source + yym77 := z.DecBinary() + _ = yym77 + if false { + } else { + *((*string)(yyv76)) = r.DecodeString() + } + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Text = "" + } else { + yyv78 := &x.Text + yym79 := z.DecBinary() + _ = yym79 + if false { + } else { + *((*string)(yyv78)) = r.DecodeString() + } + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Truncated = false + } else { + yyv80 := &x.Truncated + yym81 := z.DecBinary() + _ = yym81 + if false { + } else { + *((*bool)(yyv80)) = r.DecodeBool() + } + } + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.User = User{} + } else { + yyv82 := &x.User + yyv82.CodecDecodeSelf(d) + } + for { + yyj43++ + if yyhl43 { + yyb43 = yyj43 > l + } else { + yyb43 = r.CheckBreak() + } + if yyb43 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + z.DecStructFieldNotFound(yyj43-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) +} + +func (x *LargeStruct) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yy4 := &x.SearchMetadata + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("search_metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + yy6 := &x.SearchMetadata + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.Statuses == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceStatus(([]Status)(x.Statuses), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("statuses")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.Statuses == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceStatus(([]Status)(x.Statuses), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd9225) + } + } + } +} + +func (x *LargeStruct) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap9225 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd9225) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray9225 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225) + } + } +} + +func (x *LargeStruct) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey9225) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)} + yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr)) + z.DecSendContainerState(codecSelfer_containerMapValue9225) + switch yys3 { + case "search_metadata": + if r.TryDecodeAsNil() { + x.SearchMetadata = SearchMetadata{} + } else { + yyv4 := &x.SearchMetadata + yyv4.CodecDecodeSelf(d) + } + case "statuses": + if r.TryDecodeAsNil() { + x.Statuses = nil + } else { + yyv5 := &x.Statuses + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceStatus((*[]Status)(yyv5), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd9225) +} + +func (x *LargeStruct) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.SearchMetadata = SearchMetadata{} + } else { + yyv8 := &x.SearchMetadata + yyv8.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Statuses = nil + } else { + yyv9 := &x.Statuses + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceStatus((*[]Status)(yyv9), d) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) +} + +func (x *XLStruct) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if x.Data == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceLargeStruct(([]LargeStruct)(x.Data), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey9225) + r.EncodeString(codecSelferC_UTF89225, string("Data")) + z.EncSendContainerState(codecSelfer_containerMapValue9225) + if x.Data == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceLargeStruct(([]LargeStruct)(x.Data), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd9225) + } + } + } +} + +func (x *XLStruct) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap9225 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd9225) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray9225 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr9225) + } + } +} + +func (x *XLStruct) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey9225) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3SlcHdr := codecSelferUnsafeString9225{uintptr(unsafe.Pointer(&yys3Slc[0])), len(yys3Slc)} + yys3 := *(*string)(unsafe.Pointer(&yys3SlcHdr)) + z.DecSendContainerState(codecSelfer_containerMapValue9225) + switch yys3 { + case "Data": + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv4 := &x.Data + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceLargeStruct((*[]LargeStruct)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd9225) +} + +func (x *XLStruct) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv7 := &x.Data + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceLargeStruct((*[]LargeStruct)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem9225) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd9225) +} + +func (x codecSelfer9225) encSliceHashtag(v []Hashtag, e *codec1978.Encoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd9225) +} + +func (x codecSelfer9225) decSliceHashtag(v *[]Hashtag, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Hashtag{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Hashtag, yyrl1) + } + } else { + yyv1 = make([]Hashtag, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Hashtag{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Hashtag{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Hashtag{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Hashtag{}) // var yyz1 Hashtag + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Hashtag{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Hashtag{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer9225) encSlicePtrtostring(v []*string, e *codec1978.Encoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + if yyv1 == nil { + r.EncodeNil() + } else { + yy2 := *yyv1 + yym3 := z.EncBinary() + _ = yym3 + if false { + } else { + r.EncodeString(codecSelferC_UTF89225, string(yy2)) + } + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd9225) +} + +func (x codecSelfer9225) decSlicePtrtostring(v *[]*string, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*string{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*string, yyrl1) + } + } else { + yyv1 = make([]*string, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = "" + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(string) + } + yyw2 := yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyw2)) = r.DecodeString() + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, nil) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = "" + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(string) + } + yyw4 := yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyw4)) = r.DecodeString() + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) // var yyz1 *string + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = "" + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(string) + } + yyw6 := yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyw6)) = r.DecodeString() + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []*string{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer9225) encSliceURL(v []URL, e *codec1978.Encoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd9225) +} + +func (x codecSelfer9225) decSliceURL(v *[]URL, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []URL{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]URL, yyrl1) + } + } else { + yyv1 = make([]URL, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = URL{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, URL{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = URL{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, URL{}) // var yyz1 URL + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = URL{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []URL{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer9225) encSliceStatus(v []Status, e *codec1978.Encoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd9225) +} + +func (x codecSelfer9225) decSliceStatus(v *[]Status, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Status{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 752) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Status, yyrl1) + } + } else { + yyv1 = make([]Status, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Status{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Status{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Status{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Status{}) // var yyz1 Status + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Status{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Status{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer9225) encSliceLargeStruct(v []LargeStruct, e *codec1978.Encoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem9225) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd9225) +} + +func (x codecSelfer9225) decSliceLargeStruct(v *[]LargeStruct, d *codec1978.Decoder) { + var h codecSelfer9225 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []LargeStruct{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 136) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]LargeStruct, yyrl1) + } + } else { + yyv1 = make([]LargeStruct, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LargeStruct{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, LargeStruct{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LargeStruct{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, LargeStruct{}) // var yyz1 LargeStruct + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = LargeStruct{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []LargeStruct{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/src/vendor/github.com/mailru/easyjson/benchmark/data_ffjson.go b/src/vendor/github.com/mailru/easyjson/benchmark/data_ffjson.go new file mode 100644 index 00000000..9f000d3a --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/benchmark/data_ffjson.go @@ -0,0 +1,6723 @@ +// +build use_ffjson + +// DO NOT EDIT! +// Code generated by ffjson +// source: .root/src/github.com/mailru/easyjson/benchmark/data.go +// DO NOT EDIT! + +package benchmark + +import ( + "bytes" + "errors" + "fmt" + fflib "github.com/pquerna/ffjson/fflib/v1" +) + +func (mj *Entities) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Entities) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"hashtags":`) + if mj.Hashtags != nil { + buf.WriteString(`[`) + for i, v := range mj.Hashtags { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"urls":`) + if mj.Urls != nil { + buf.WriteString(`[`) + for i, v := range mj.Urls { + if i != 0 { + buf.WriteString(`,`) + } + if v != nil { + fflib.WriteJsonString(buf, string(*v)) + } else { + buf.WriteString(`null`) + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"user_mentions":`) + if mj.UserMentions != nil { + buf.WriteString(`[`) + for i, v := range mj.UserMentions { + if i != 0 { + buf.WriteString(`,`) + } + if v != nil { + fflib.WriteJsonString(buf, string(*v)) + } else { + buf.WriteString(`null`) + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Entitiesbase = iota + ffj_t_Entitiesno_such_key + + ffj_t_Entities_Hashtags + + ffj_t_Entities_Urls + + ffj_t_Entities_UserMentions +) + +var ffj_key_Entities_Hashtags = []byte("hashtags") + +var ffj_key_Entities_Urls = []byte("urls") + +var ffj_key_Entities_UserMentions = []byte("user_mentions") + +func (uj *Entities) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Entities) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Entitiesbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Entitiesno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'h': + + if bytes.Equal(ffj_key_Entities_Hashtags, kn) { + currentKey = ffj_t_Entities_Hashtags + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_Entities_Urls, kn) { + currentKey = ffj_t_Entities_Urls + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Entities_UserMentions, kn) { + currentKey = ffj_t_Entities_UserMentions + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Entities_UserMentions, kn) { + currentKey = ffj_t_Entities_UserMentions + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Entities_Urls, kn) { + currentKey = ffj_t_Entities_Urls + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Entities_Hashtags, kn) { + currentKey = ffj_t_Entities_Hashtags + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Entitiesno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Entities_Hashtags: + goto handle_Hashtags + + case ffj_t_Entities_Urls: + goto handle_Urls + + case ffj_t_Entities_UserMentions: + goto handle_UserMentions + + case ffj_t_Entitiesno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Hashtags: + + /* handler: uj.Hashtags type=[]benchmark.Hashtag kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Hashtags = nil + } else { + + uj.Hashtags = make([]Hashtag, 0) + + wantVal := true + + for { + + var tmp_uj__Hashtags Hashtag + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Hashtags type=benchmark.Hashtag kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Hashtags.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Hashtags = append(uj.Hashtags, tmp_uj__Hashtags) + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Urls: + + /* handler: uj.Urls type=[]*string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Urls = nil + } else { + + uj.Urls = make([]*string, 0) + + wantVal := true + + for { + + var tmp_uj__Urls *string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Urls type=*string kind=ptr quoted=false*/ + + { + + if tok == fflib.FFTok_null { + tmp_uj__Urls = nil + } else { + if tmp_uj__Urls == nil { + tmp_uj__Urls = new(string) + } + + /* handler: tmp_uj__Urls type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + tmp_uj__Urls = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + tmp_uj__Urls = &tval + + } + } + + } + } + + uj.Urls = append(uj.Urls, tmp_uj__Urls) + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UserMentions: + + /* handler: uj.UserMentions type=[]*string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.UserMentions = nil + } else { + + uj.UserMentions = make([]*string, 0) + + wantVal := true + + for { + + var tmp_uj__UserMentions *string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__UserMentions type=*string kind=ptr quoted=false*/ + + { + + if tok == fflib.FFTok_null { + tmp_uj__UserMentions = nil + } else { + if tmp_uj__UserMentions == nil { + tmp_uj__UserMentions = new(string) + } + + /* handler: tmp_uj__UserMentions type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + tmp_uj__UserMentions = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + tmp_uj__UserMentions = &tval + + } + } + + } + } + + uj.UserMentions = append(uj.UserMentions, tmp_uj__UserMentions) + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + return nil +} + +func (mj *Hashtag) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Hashtag) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"indices":`) + if mj.Indices != nil { + buf.WriteString(`[`) + for i, v := range mj.Indices { + if i != 0 { + buf.WriteString(`,`) + } + fflib.FormatBits2(buf, uint64(v), 10, v < 0) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"text":`) + fflib.WriteJsonString(buf, string(mj.Text)) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Hashtagbase = iota + ffj_t_Hashtagno_such_key + + ffj_t_Hashtag_Indices + + ffj_t_Hashtag_Text +) + +var ffj_key_Hashtag_Indices = []byte("indices") + +var ffj_key_Hashtag_Text = []byte("text") + +func (uj *Hashtag) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Hashtag) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Hashtagbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Hashtagno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'i': + + if bytes.Equal(ffj_key_Hashtag_Indices, kn) { + currentKey = ffj_t_Hashtag_Indices + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Hashtag_Text, kn) { + currentKey = ffj_t_Hashtag_Text + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_Hashtag_Text, kn) { + currentKey = ffj_t_Hashtag_Text + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Hashtag_Indices, kn) { + currentKey = ffj_t_Hashtag_Indices + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Hashtagno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Hashtag_Indices: + goto handle_Indices + + case ffj_t_Hashtag_Text: + goto handle_Text + + case ffj_t_Hashtagno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Indices: + + /* handler: uj.Indices type=[]int kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Indices = nil + } else { + + uj.Indices = make([]int, 0) + + wantVal := true + + for { + + var tmp_uj__Indices int + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Indices type=int kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + tmp_uj__Indices = int(tval) + + } + } + + uj.Indices = append(uj.Indices, tmp_uj__Indices) + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Text: + + /* handler: uj.Text type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Text = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + return nil +} + +func (mj *LargeStruct) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *LargeStruct) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"search_metadata":`) + + { + + err = mj.SearchMetadata.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"statuses":`) + if mj.Statuses != nil { + buf.WriteString(`[`) + for i, v := range mj.Statuses { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_LargeStructbase = iota + ffj_t_LargeStructno_such_key + + ffj_t_LargeStruct_SearchMetadata + + ffj_t_LargeStruct_Statuses +) + +var ffj_key_LargeStruct_SearchMetadata = []byte("search_metadata") + +var ffj_key_LargeStruct_Statuses = []byte("statuses") + +func (uj *LargeStruct) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *LargeStruct) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_LargeStructbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_LargeStructno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 's': + + if bytes.Equal(ffj_key_LargeStruct_SearchMetadata, kn) { + currentKey = ffj_t_LargeStruct_SearchMetadata + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_LargeStruct_Statuses, kn) { + currentKey = ffj_t_LargeStruct_Statuses + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_LargeStruct_Statuses, kn) { + currentKey = ffj_t_LargeStruct_Statuses + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_LargeStruct_SearchMetadata, kn) { + currentKey = ffj_t_LargeStruct_SearchMetadata + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_LargeStructno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_LargeStruct_SearchMetadata: + goto handle_SearchMetadata + + case ffj_t_LargeStruct_Statuses: + goto handle_Statuses + + case ffj_t_LargeStructno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_SearchMetadata: + + /* handler: uj.SearchMetadata type=benchmark.SearchMetadata kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.SearchMetadata.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Statuses: + + /* handler: uj.Statuses type=[]benchmark.Status kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Statuses = nil + } else { + + uj.Statuses = make([]Status, 0) + + wantVal := true + + for { + + var tmp_uj__Statuses Status + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Statuses type=benchmark.Status kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Statuses.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Statuses = append(uj.Statuses, tmp_uj__Statuses) + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + return nil +} + +func (mj *SearchMetadata) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *SearchMetadata) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"completed_in":`) + fflib.AppendFloat(buf, float64(mj.CompletedIn), 'g', -1, 64) + buf.WriteString(`,"count":`) + fflib.FormatBits2(buf, uint64(mj.Count), 10, mj.Count < 0) + buf.WriteString(`,"max_id":`) + fflib.FormatBits2(buf, uint64(mj.MaxID), 10, mj.MaxID < 0) + buf.WriteString(`,"max_id_str":`) + fflib.WriteJsonString(buf, string(mj.MaxIDStr)) + buf.WriteString(`,"next_results":`) + fflib.WriteJsonString(buf, string(mj.NextResults)) + buf.WriteString(`,"query":`) + fflib.WriteJsonString(buf, string(mj.Query)) + buf.WriteString(`,"refresh_url":`) + fflib.WriteJsonString(buf, string(mj.RefreshURL)) + buf.WriteString(`,"since_id":`) + fflib.FormatBits2(buf, uint64(mj.SinceID), 10, mj.SinceID < 0) + buf.WriteString(`,"since_id_str":`) + fflib.WriteJsonString(buf, string(mj.SinceIDStr)) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_SearchMetadatabase = iota + ffj_t_SearchMetadatano_such_key + + ffj_t_SearchMetadata_CompletedIn + + ffj_t_SearchMetadata_Count + + ffj_t_SearchMetadata_MaxID + + ffj_t_SearchMetadata_MaxIDStr + + ffj_t_SearchMetadata_NextResults + + ffj_t_SearchMetadata_Query + + ffj_t_SearchMetadata_RefreshURL + + ffj_t_SearchMetadata_SinceID + + ffj_t_SearchMetadata_SinceIDStr +) + +var ffj_key_SearchMetadata_CompletedIn = []byte("completed_in") + +var ffj_key_SearchMetadata_Count = []byte("count") + +var ffj_key_SearchMetadata_MaxID = []byte("max_id") + +var ffj_key_SearchMetadata_MaxIDStr = []byte("max_id_str") + +var ffj_key_SearchMetadata_NextResults = []byte("next_results") + +var ffj_key_SearchMetadata_Query = []byte("query") + +var ffj_key_SearchMetadata_RefreshURL = []byte("refresh_url") + +var ffj_key_SearchMetadata_SinceID = []byte("since_id") + +var ffj_key_SearchMetadata_SinceIDStr = []byte("since_id_str") + +func (uj *SearchMetadata) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *SearchMetadata) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_SearchMetadatabase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_SearchMetadatano_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_SearchMetadata_CompletedIn, kn) { + currentKey = ffj_t_SearchMetadata_CompletedIn + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_SearchMetadata_Count, kn) { + currentKey = ffj_t_SearchMetadata_Count + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffj_key_SearchMetadata_MaxID, kn) { + currentKey = ffj_t_SearchMetadata_MaxID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_SearchMetadata_MaxIDStr, kn) { + currentKey = ffj_t_SearchMetadata_MaxIDStr + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_SearchMetadata_NextResults, kn) { + currentKey = ffj_t_SearchMetadata_NextResults + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'q': + + if bytes.Equal(ffj_key_SearchMetadata_Query, kn) { + currentKey = ffj_t_SearchMetadata_Query + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_SearchMetadata_RefreshURL, kn) { + currentKey = ffj_t_SearchMetadata_RefreshURL + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_SearchMetadata_SinceID, kn) { + currentKey = ffj_t_SearchMetadata_SinceID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_SearchMetadata_SinceIDStr, kn) { + currentKey = ffj_t_SearchMetadata_SinceIDStr + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_SearchMetadata_SinceIDStr, kn) { + currentKey = ffj_t_SearchMetadata_SinceIDStr + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_SearchMetadata_SinceID, kn) { + currentKey = ffj_t_SearchMetadata_SinceID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_SearchMetadata_RefreshURL, kn) { + currentKey = ffj_t_SearchMetadata_RefreshURL + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_SearchMetadata_Query, kn) { + currentKey = ffj_t_SearchMetadata_Query + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_SearchMetadata_NextResults, kn) { + currentKey = ffj_t_SearchMetadata_NextResults + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_SearchMetadata_MaxIDStr, kn) { + currentKey = ffj_t_SearchMetadata_MaxIDStr + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_SearchMetadata_MaxID, kn) { + currentKey = ffj_t_SearchMetadata_MaxID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_SearchMetadata_Count, kn) { + currentKey = ffj_t_SearchMetadata_Count + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_SearchMetadata_CompletedIn, kn) { + currentKey = ffj_t_SearchMetadata_CompletedIn + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_SearchMetadatano_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_SearchMetadata_CompletedIn: + goto handle_CompletedIn + + case ffj_t_SearchMetadata_Count: + goto handle_Count + + case ffj_t_SearchMetadata_MaxID: + goto handle_MaxID + + case ffj_t_SearchMetadata_MaxIDStr: + goto handle_MaxIDStr + + case ffj_t_SearchMetadata_NextResults: + goto handle_NextResults + + case ffj_t_SearchMetadata_Query: + goto handle_Query + + case ffj_t_SearchMetadata_RefreshURL: + goto handle_RefreshURL + + case ffj_t_SearchMetadata_SinceID: + goto handle_SinceID + + case ffj_t_SearchMetadata_SinceIDStr: + goto handle_SinceIDStr + + case ffj_t_SearchMetadatano_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_CompletedIn: + + /* handler: uj.CompletedIn type=float64 kind=float64 quoted=false*/ + + { + if tok != fflib.FFTok_double && tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for float64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseFloat(fs.Output.Bytes(), 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.CompletedIn = float64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Count: + + /* handler: uj.Count type=int kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.Count = int(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MaxID: + + /* handler: uj.MaxID type=int kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.MaxID = int(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_MaxIDStr: + + /* handler: uj.MaxIDStr type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.MaxIDStr = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_NextResults: + + /* handler: uj.NextResults type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.NextResults = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Query: + + /* handler: uj.Query type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Query = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_RefreshURL: + + /* handler: uj.RefreshURL type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.RefreshURL = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_SinceID: + + /* handler: uj.SinceID type=int kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.SinceID = int(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_SinceIDStr: + + /* handler: uj.SinceIDStr type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.SinceIDStr = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + return nil +} + +func (mj *Status) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *Status) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + if mj.Contributors != nil { + buf.WriteString(`{"contributors":`) + fflib.WriteJsonString(buf, string(*mj.Contributors)) + } else { + buf.WriteString(`{"contributors":null`) + } + if mj.Coordinates != nil { + buf.WriteString(`,"coordinates":`) + fflib.WriteJsonString(buf, string(*mj.Coordinates)) + } else { + buf.WriteString(`,"coordinates":null`) + } + buf.WriteString(`,"created_at":`) + fflib.WriteJsonString(buf, string(mj.CreatedAt)) + buf.WriteString(`,"entities":`) + + { + + err = mj.Entities.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + if mj.Favorited { + buf.WriteString(`,"favorited":true`) + } else { + buf.WriteString(`,"favorited":false`) + } + if mj.Geo != nil { + buf.WriteString(`,"geo":`) + fflib.WriteJsonString(buf, string(*mj.Geo)) + } else { + buf.WriteString(`,"geo":null`) + } + buf.WriteString(`,"id":`) + fflib.FormatBits2(buf, uint64(mj.ID), 10, mj.ID < 0) + buf.WriteString(`,"id_str":`) + fflib.WriteJsonString(buf, string(mj.IDStr)) + if mj.InReplyToScreenName != nil { + buf.WriteString(`,"in_reply_to_screen_name":`) + fflib.WriteJsonString(buf, string(*mj.InReplyToScreenName)) + } else { + buf.WriteString(`,"in_reply_to_screen_name":null`) + } + if mj.InReplyToStatusID != nil { + buf.WriteString(`,"in_reply_to_status_id":`) + fflib.WriteJsonString(buf, string(*mj.InReplyToStatusID)) + } else { + buf.WriteString(`,"in_reply_to_status_id":null`) + } + if mj.InReplyToStatusIDStr != nil { + buf.WriteString(`,"in_reply_to_status_id_str":`) + fflib.WriteJsonString(buf, string(*mj.InReplyToStatusIDStr)) + } else { + buf.WriteString(`,"in_reply_to_status_id_str":null`) + } + if mj.InReplyToUserID != nil { + buf.WriteString(`,"in_reply_to_user_id":`) + fflib.WriteJsonString(buf, string(*mj.InReplyToUserID)) + } else { + buf.WriteString(`,"in_reply_to_user_id":null`) + } + if mj.InReplyToUserIDStr != nil { + buf.WriteString(`,"in_reply_to_user_id_str":`) + fflib.WriteJsonString(buf, string(*mj.InReplyToUserIDStr)) + } else { + buf.WriteString(`,"in_reply_to_user_id_str":null`) + } + buf.WriteString(`,"metadata":`) + + { + + err = mj.Metadata.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + if mj.Place != nil { + buf.WriteString(`,"place":`) + fflib.WriteJsonString(buf, string(*mj.Place)) + } else { + buf.WriteString(`,"place":null`) + } + buf.WriteString(`,"retweet_count":`) + fflib.FormatBits2(buf, uint64(mj.RetweetCount), 10, mj.RetweetCount < 0) + if mj.Retweeted { + buf.WriteString(`,"retweeted":true`) + } else { + buf.WriteString(`,"retweeted":false`) + } + buf.WriteString(`,"source":`) + fflib.WriteJsonString(buf, string(mj.Source)) + buf.WriteString(`,"text":`) + fflib.WriteJsonString(buf, string(mj.Text)) + if mj.Truncated { + buf.WriteString(`,"truncated":true`) + } else { + buf.WriteString(`,"truncated":false`) + } + buf.WriteString(`,"user":`) + + { + + err = mj.User.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Statusbase = iota + ffj_t_Statusno_such_key + + ffj_t_Status_Contributors + + ffj_t_Status_Coordinates + + ffj_t_Status_CreatedAt + + ffj_t_Status_Entities + + ffj_t_Status_Favorited + + ffj_t_Status_Geo + + ffj_t_Status_ID + + ffj_t_Status_IDStr + + ffj_t_Status_InReplyToScreenName + + ffj_t_Status_InReplyToStatusID + + ffj_t_Status_InReplyToStatusIDStr + + ffj_t_Status_InReplyToUserID + + ffj_t_Status_InReplyToUserIDStr + + ffj_t_Status_Metadata + + ffj_t_Status_Place + + ffj_t_Status_RetweetCount + + ffj_t_Status_Retweeted + + ffj_t_Status_Source + + ffj_t_Status_Text + + ffj_t_Status_Truncated + + ffj_t_Status_User +) + +var ffj_key_Status_Contributors = []byte("contributors") + +var ffj_key_Status_Coordinates = []byte("coordinates") + +var ffj_key_Status_CreatedAt = []byte("created_at") + +var ffj_key_Status_Entities = []byte("entities") + +var ffj_key_Status_Favorited = []byte("favorited") + +var ffj_key_Status_Geo = []byte("geo") + +var ffj_key_Status_ID = []byte("id") + +var ffj_key_Status_IDStr = []byte("id_str") + +var ffj_key_Status_InReplyToScreenName = []byte("in_reply_to_screen_name") + +var ffj_key_Status_InReplyToStatusID = []byte("in_reply_to_status_id") + +var ffj_key_Status_InReplyToStatusIDStr = []byte("in_reply_to_status_id_str") + +var ffj_key_Status_InReplyToUserID = []byte("in_reply_to_user_id") + +var ffj_key_Status_InReplyToUserIDStr = []byte("in_reply_to_user_id_str") + +var ffj_key_Status_Metadata = []byte("metadata") + +var ffj_key_Status_Place = []byte("place") + +var ffj_key_Status_RetweetCount = []byte("retweet_count") + +var ffj_key_Status_Retweeted = []byte("retweeted") + +var ffj_key_Status_Source = []byte("source") + +var ffj_key_Status_Text = []byte("text") + +var ffj_key_Status_Truncated = []byte("truncated") + +var ffj_key_Status_User = []byte("user") + +func (uj *Status) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *Status) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Statusbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Statusno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_Status_Contributors, kn) { + currentKey = ffj_t_Status_Contributors + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Status_Coordinates, kn) { + currentKey = ffj_t_Status_Coordinates + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Status_CreatedAt, kn) { + currentKey = ffj_t_Status_CreatedAt + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'e': + + if bytes.Equal(ffj_key_Status_Entities, kn) { + currentKey = ffj_t_Status_Entities + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffj_key_Status_Favorited, kn) { + currentKey = ffj_t_Status_Favorited + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'g': + + if bytes.Equal(ffj_key_Status_Geo, kn) { + currentKey = ffj_t_Status_Geo + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_Status_ID, kn) { + currentKey = ffj_t_Status_ID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Status_IDStr, kn) { + currentKey = ffj_t_Status_IDStr + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Status_InReplyToScreenName, kn) { + currentKey = ffj_t_Status_InReplyToScreenName + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Status_InReplyToStatusID, kn) { + currentKey = ffj_t_Status_InReplyToStatusID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Status_InReplyToStatusIDStr, kn) { + currentKey = ffj_t_Status_InReplyToStatusIDStr + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Status_InReplyToUserID, kn) { + currentKey = ffj_t_Status_InReplyToUserID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Status_InReplyToUserIDStr, kn) { + currentKey = ffj_t_Status_InReplyToUserIDStr + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'm': + + if bytes.Equal(ffj_key_Status_Metadata, kn) { + currentKey = ffj_t_Status_Metadata + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_Status_Place, kn) { + currentKey = ffj_t_Status_Place + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_Status_RetweetCount, kn) { + currentKey = ffj_t_Status_RetweetCount + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Status_Retweeted, kn) { + currentKey = ffj_t_Status_Retweeted + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_Status_Source, kn) { + currentKey = ffj_t_Status_Source + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_Status_Text, kn) { + currentKey = ffj_t_Status_Text + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_Status_Truncated, kn) { + currentKey = ffj_t_Status_Truncated + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_Status_User, kn) { + currentKey = ffj_t_Status_User + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_Status_User, kn) { + currentKey = ffj_t_Status_User + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Status_Truncated, kn) { + currentKey = ffj_t_Status_Truncated + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Status_Text, kn) { + currentKey = ffj_t_Status_Text + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Status_Source, kn) { + currentKey = ffj_t_Status_Source + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Status_Retweeted, kn) { + currentKey = ffj_t_Status_Retweeted + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Status_RetweetCount, kn) { + currentKey = ffj_t_Status_RetweetCount + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Status_Place, kn) { + currentKey = ffj_t_Status_Place + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Status_Metadata, kn) { + currentKey = ffj_t_Status_Metadata + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Status_InReplyToUserIDStr, kn) { + currentKey = ffj_t_Status_InReplyToUserIDStr + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Status_InReplyToUserID, kn) { + currentKey = ffj_t_Status_InReplyToUserID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Status_InReplyToStatusIDStr, kn) { + currentKey = ffj_t_Status_InReplyToStatusIDStr + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Status_InReplyToStatusID, kn) { + currentKey = ffj_t_Status_InReplyToStatusID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Status_InReplyToScreenName, kn) { + currentKey = ffj_t_Status_InReplyToScreenName + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Status_IDStr, kn) { + currentKey = ffj_t_Status_IDStr + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Status_ID, kn) { + currentKey = ffj_t_Status_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Status_Geo, kn) { + currentKey = ffj_t_Status_Geo + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_Status_Favorited, kn) { + currentKey = ffj_t_Status_Favorited + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Status_Entities, kn) { + currentKey = ffj_t_Status_Entities + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_Status_CreatedAt, kn) { + currentKey = ffj_t_Status_CreatedAt + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Status_Coordinates, kn) { + currentKey = ffj_t_Status_Coordinates + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_Status_Contributors, kn) { + currentKey = ffj_t_Status_Contributors + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Statusno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_Status_Contributors: + goto handle_Contributors + + case ffj_t_Status_Coordinates: + goto handle_Coordinates + + case ffj_t_Status_CreatedAt: + goto handle_CreatedAt + + case ffj_t_Status_Entities: + goto handle_Entities + + case ffj_t_Status_Favorited: + goto handle_Favorited + + case ffj_t_Status_Geo: + goto handle_Geo + + case ffj_t_Status_ID: + goto handle_ID + + case ffj_t_Status_IDStr: + goto handle_IDStr + + case ffj_t_Status_InReplyToScreenName: + goto handle_InReplyToScreenName + + case ffj_t_Status_InReplyToStatusID: + goto handle_InReplyToStatusID + + case ffj_t_Status_InReplyToStatusIDStr: + goto handle_InReplyToStatusIDStr + + case ffj_t_Status_InReplyToUserID: + goto handle_InReplyToUserID + + case ffj_t_Status_InReplyToUserIDStr: + goto handle_InReplyToUserIDStr + + case ffj_t_Status_Metadata: + goto handle_Metadata + + case ffj_t_Status_Place: + goto handle_Place + + case ffj_t_Status_RetweetCount: + goto handle_RetweetCount + + case ffj_t_Status_Retweeted: + goto handle_Retweeted + + case ffj_t_Status_Source: + goto handle_Source + + case ffj_t_Status_Text: + goto handle_Text + + case ffj_t_Status_Truncated: + goto handle_Truncated + + case ffj_t_Status_User: + goto handle_User + + case ffj_t_Statusno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Contributors: + + /* handler: uj.Contributors type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Contributors = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Contributors = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Coordinates: + + /* handler: uj.Coordinates type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Coordinates = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Coordinates = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CreatedAt: + + /* handler: uj.CreatedAt type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.CreatedAt = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Entities: + + /* handler: uj.Entities type=benchmark.Entities kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Entities.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Favorited: + + /* handler: uj.Favorited type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + uj.Favorited = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + uj.Favorited = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Geo: + + /* handler: uj.Geo type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Geo = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Geo = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ID: + + /* handler: uj.ID type=int64 kind=int64 quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.ID = int64(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IDStr: + + /* handler: uj.IDStr type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.IDStr = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InReplyToScreenName: + + /* handler: uj.InReplyToScreenName type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.InReplyToScreenName = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.InReplyToScreenName = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InReplyToStatusID: + + /* handler: uj.InReplyToStatusID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.InReplyToStatusID = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.InReplyToStatusID = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InReplyToStatusIDStr: + + /* handler: uj.InReplyToStatusIDStr type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.InReplyToStatusIDStr = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.InReplyToStatusIDStr = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InReplyToUserID: + + /* handler: uj.InReplyToUserID type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.InReplyToUserID = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.InReplyToUserID = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_InReplyToUserIDStr: + + /* handler: uj.InReplyToUserIDStr type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.InReplyToUserIDStr = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.InReplyToUserIDStr = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Metadata: + + /* handler: uj.Metadata type=benchmark.StatusMetadata kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Metadata.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Place: + + /* handler: uj.Place type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Place = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Place = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_RetweetCount: + + /* handler: uj.RetweetCount type=int kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.RetweetCount = int(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Retweeted: + + /* handler: uj.Retweeted type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + uj.Retweeted = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + uj.Retweeted = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Source: + + /* handler: uj.Source type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Source = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Text: + + /* handler: uj.Text type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Text = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Truncated: + + /* handler: uj.Truncated type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + uj.Truncated = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + uj.Truncated = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_User: + + /* handler: uj.User type=benchmark.User kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.User.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + return nil +} + +func (mj *StatusMetadata) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *StatusMetadata) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"iso_language_code":`) + fflib.WriteJsonString(buf, string(mj.IsoLanguageCode)) + buf.WriteString(`,"result_type":`) + fflib.WriteJsonString(buf, string(mj.ResultType)) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_StatusMetadatabase = iota + ffj_t_StatusMetadatano_such_key + + ffj_t_StatusMetadata_IsoLanguageCode + + ffj_t_StatusMetadata_ResultType +) + +var ffj_key_StatusMetadata_IsoLanguageCode = []byte("iso_language_code") + +var ffj_key_StatusMetadata_ResultType = []byte("result_type") + +func (uj *StatusMetadata) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *StatusMetadata) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_StatusMetadatabase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_StatusMetadatano_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'i': + + if bytes.Equal(ffj_key_StatusMetadata_IsoLanguageCode, kn) { + currentKey = ffj_t_StatusMetadata_IsoLanguageCode + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'r': + + if bytes.Equal(ffj_key_StatusMetadata_ResultType, kn) { + currentKey = ffj_t_StatusMetadata_ResultType + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_StatusMetadata_ResultType, kn) { + currentKey = ffj_t_StatusMetadata_ResultType + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_StatusMetadata_IsoLanguageCode, kn) { + currentKey = ffj_t_StatusMetadata_IsoLanguageCode + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_StatusMetadatano_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_StatusMetadata_IsoLanguageCode: + goto handle_IsoLanguageCode + + case ffj_t_StatusMetadata_ResultType: + goto handle_ResultType + + case ffj_t_StatusMetadatano_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_IsoLanguageCode: + + /* handler: uj.IsoLanguageCode type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.IsoLanguageCode = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ResultType: + + /* handler: uj.ResultType type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.ResultType = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + return nil +} + +func (mj *URL) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *URL) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + if mj.ExpandedURL != nil { + buf.WriteString(`{"expanded_url":`) + fflib.WriteJsonString(buf, string(*mj.ExpandedURL)) + } else { + buf.WriteString(`{"expanded_url":null`) + } + buf.WriteString(`,"indices":`) + if mj.Indices != nil { + buf.WriteString(`[`) + for i, v := range mj.Indices { + if i != 0 { + buf.WriteString(`,`) + } + fflib.FormatBits2(buf, uint64(v), 10, v < 0) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteString(`,"url":`) + fflib.WriteJsonString(buf, string(mj.URL)) + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_URLbase = iota + ffj_t_URLno_such_key + + ffj_t_URL_ExpandedURL + + ffj_t_URL_Indices + + ffj_t_URL_URL +) + +var ffj_key_URL_ExpandedURL = []byte("expanded_url") + +var ffj_key_URL_Indices = []byte("indices") + +var ffj_key_URL_URL = []byte("url") + +func (uj *URL) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *URL) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_URLbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_URLno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'e': + + if bytes.Equal(ffj_key_URL_ExpandedURL, kn) { + currentKey = ffj_t_URL_ExpandedURL + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_URL_Indices, kn) { + currentKey = ffj_t_URL_Indices + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_URL_URL, kn) { + currentKey = ffj_t_URL_URL + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_URL_URL, kn) { + currentKey = ffj_t_URL_URL + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_URL_Indices, kn) { + currentKey = ffj_t_URL_Indices + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_URL_ExpandedURL, kn) { + currentKey = ffj_t_URL_ExpandedURL + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_URLno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_URL_ExpandedURL: + goto handle_ExpandedURL + + case ffj_t_URL_Indices: + goto handle_Indices + + case ffj_t_URL_URL: + goto handle_URL + + case ffj_t_URLno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ExpandedURL: + + /* handler: uj.ExpandedURL type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.ExpandedURL = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.ExpandedURL = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Indices: + + /* handler: uj.Indices type=[]int kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Indices = nil + } else { + + uj.Indices = make([]int, 0) + + wantVal := true + + for { + + var tmp_uj__Indices int + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Indices type=int kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + tmp_uj__Indices = int(tval) + + } + } + + uj.Indices = append(uj.Indices, tmp_uj__Indices) + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_URL: + + /* handler: uj.URL type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.URL = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + return nil +} + +func (mj *User) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *User) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + if mj.ContributorsEnabled { + buf.WriteString(`{"contributors_enabled":true`) + } else { + buf.WriteString(`{"contributors_enabled":false`) + } + buf.WriteString(`,"created_at":`) + fflib.WriteJsonString(buf, string(mj.CreatedAt)) + if mj.DefaultProfile { + buf.WriteString(`,"default_profile":true`) + } else { + buf.WriteString(`,"default_profile":false`) + } + if mj.DefaultProfileImage { + buf.WriteString(`,"default_profile_image":true`) + } else { + buf.WriteString(`,"default_profile_image":false`) + } + buf.WriteString(`,"description":`) + fflib.WriteJsonString(buf, string(mj.Description)) + buf.WriteString(`,"entities":`) + + { + + err = mj.Entities.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"favourites_count":`) + fflib.FormatBits2(buf, uint64(mj.FavouritesCount), 10, mj.FavouritesCount < 0) + if mj.FollowRequestSent != nil { + buf.WriteString(`,"follow_request_sent":`) + fflib.WriteJsonString(buf, string(*mj.FollowRequestSent)) + } else { + buf.WriteString(`,"follow_request_sent":null`) + } + buf.WriteString(`,"followers_count":`) + fflib.FormatBits2(buf, uint64(mj.FollowersCount), 10, mj.FollowersCount < 0) + if mj.Following != nil { + buf.WriteString(`,"following":`) + fflib.WriteJsonString(buf, string(*mj.Following)) + } else { + buf.WriteString(`,"following":null`) + } + buf.WriteString(`,"friends_count":`) + fflib.FormatBits2(buf, uint64(mj.FriendsCount), 10, mj.FriendsCount < 0) + if mj.GeoEnabled { + buf.WriteString(`,"geo_enabled":true`) + } else { + buf.WriteString(`,"geo_enabled":false`) + } + buf.WriteString(`,"id":`) + fflib.FormatBits2(buf, uint64(mj.ID), 10, mj.ID < 0) + buf.WriteString(`,"id_str":`) + fflib.WriteJsonString(buf, string(mj.IDStr)) + if mj.IsTranslator { + buf.WriteString(`,"is_translator":true`) + } else { + buf.WriteString(`,"is_translator":false`) + } + buf.WriteString(`,"lang":`) + fflib.WriteJsonString(buf, string(mj.Lang)) + buf.WriteString(`,"listed_count":`) + fflib.FormatBits2(buf, uint64(mj.ListedCount), 10, mj.ListedCount < 0) + buf.WriteString(`,"location":`) + fflib.WriteJsonString(buf, string(mj.Location)) + buf.WriteString(`,"name":`) + fflib.WriteJsonString(buf, string(mj.Name)) + if mj.Notifications != nil { + buf.WriteString(`,"notifications":`) + fflib.WriteJsonString(buf, string(*mj.Notifications)) + } else { + buf.WriteString(`,"notifications":null`) + } + buf.WriteString(`,"profile_background_color":`) + fflib.WriteJsonString(buf, string(mj.ProfileBackgroundColor)) + buf.WriteString(`,"profile_background_image_url":`) + fflib.WriteJsonString(buf, string(mj.ProfileBackgroundImageURL)) + buf.WriteString(`,"profile_background_image_url_https":`) + fflib.WriteJsonString(buf, string(mj.ProfileBackgroundImageURLHTTPS)) + if mj.ProfileBackgroundTile { + buf.WriteString(`,"profile_background_tile":true`) + } else { + buf.WriteString(`,"profile_background_tile":false`) + } + buf.WriteString(`,"profile_image_url":`) + fflib.WriteJsonString(buf, string(mj.ProfileImageURL)) + buf.WriteString(`,"profile_image_url_https":`) + fflib.WriteJsonString(buf, string(mj.ProfileImageURLHTTPS)) + buf.WriteString(`,"profile_link_color":`) + fflib.WriteJsonString(buf, string(mj.ProfileLinkColor)) + buf.WriteString(`,"profile_sidebar_border_color":`) + fflib.WriteJsonString(buf, string(mj.ProfileSidebarBorderColor)) + buf.WriteString(`,"profile_sidebar_fill_color":`) + fflib.WriteJsonString(buf, string(mj.ProfileSidebarFillColor)) + buf.WriteString(`,"profile_text_color":`) + fflib.WriteJsonString(buf, string(mj.ProfileTextColor)) + if mj.ProfileUseBackgroundImage { + buf.WriteString(`,"profile_use_background_image":true`) + } else { + buf.WriteString(`,"profile_use_background_image":false`) + } + if mj.Protected { + buf.WriteString(`,"protected":true`) + } else { + buf.WriteString(`,"protected":false`) + } + buf.WriteString(`,"screen_name":`) + fflib.WriteJsonString(buf, string(mj.ScreenName)) + if mj.ShowAllInlineMedia { + buf.WriteString(`,"show_all_inline_media":true`) + } else { + buf.WriteString(`,"show_all_inline_media":false`) + } + buf.WriteString(`,"statuses_count":`) + fflib.FormatBits2(buf, uint64(mj.StatusesCount), 10, mj.StatusesCount < 0) + buf.WriteString(`,"time_zone":`) + fflib.WriteJsonString(buf, string(mj.TimeZone)) + if mj.URL != nil { + buf.WriteString(`,"url":`) + fflib.WriteJsonString(buf, string(*mj.URL)) + } else { + buf.WriteString(`,"url":null`) + } + buf.WriteString(`,"utc_offset":`) + fflib.FormatBits2(buf, uint64(mj.UtcOffset), 10, mj.UtcOffset < 0) + if mj.Verified { + buf.WriteString(`,"verified":true`) + } else { + buf.WriteString(`,"verified":false`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_Userbase = iota + ffj_t_Userno_such_key + + ffj_t_User_ContributorsEnabled + + ffj_t_User_CreatedAt + + ffj_t_User_DefaultProfile + + ffj_t_User_DefaultProfileImage + + ffj_t_User_Description + + ffj_t_User_Entities + + ffj_t_User_FavouritesCount + + ffj_t_User_FollowRequestSent + + ffj_t_User_FollowersCount + + ffj_t_User_Following + + ffj_t_User_FriendsCount + + ffj_t_User_GeoEnabled + + ffj_t_User_ID + + ffj_t_User_IDStr + + ffj_t_User_IsTranslator + + ffj_t_User_Lang + + ffj_t_User_ListedCount + + ffj_t_User_Location + + ffj_t_User_Name + + ffj_t_User_Notifications + + ffj_t_User_ProfileBackgroundColor + + ffj_t_User_ProfileBackgroundImageURL + + ffj_t_User_ProfileBackgroundImageURLHTTPS + + ffj_t_User_ProfileBackgroundTile + + ffj_t_User_ProfileImageURL + + ffj_t_User_ProfileImageURLHTTPS + + ffj_t_User_ProfileLinkColor + + ffj_t_User_ProfileSidebarBorderColor + + ffj_t_User_ProfileSidebarFillColor + + ffj_t_User_ProfileTextColor + + ffj_t_User_ProfileUseBackgroundImage + + ffj_t_User_Protected + + ffj_t_User_ScreenName + + ffj_t_User_ShowAllInlineMedia + + ffj_t_User_StatusesCount + + ffj_t_User_TimeZone + + ffj_t_User_URL + + ffj_t_User_UtcOffset + + ffj_t_User_Verified +) + +var ffj_key_User_ContributorsEnabled = []byte("contributors_enabled") + +var ffj_key_User_CreatedAt = []byte("created_at") + +var ffj_key_User_DefaultProfile = []byte("default_profile") + +var ffj_key_User_DefaultProfileImage = []byte("default_profile_image") + +var ffj_key_User_Description = []byte("description") + +var ffj_key_User_Entities = []byte("entities") + +var ffj_key_User_FavouritesCount = []byte("favourites_count") + +var ffj_key_User_FollowRequestSent = []byte("follow_request_sent") + +var ffj_key_User_FollowersCount = []byte("followers_count") + +var ffj_key_User_Following = []byte("following") + +var ffj_key_User_FriendsCount = []byte("friends_count") + +var ffj_key_User_GeoEnabled = []byte("geo_enabled") + +var ffj_key_User_ID = []byte("id") + +var ffj_key_User_IDStr = []byte("id_str") + +var ffj_key_User_IsTranslator = []byte("is_translator") + +var ffj_key_User_Lang = []byte("lang") + +var ffj_key_User_ListedCount = []byte("listed_count") + +var ffj_key_User_Location = []byte("location") + +var ffj_key_User_Name = []byte("name") + +var ffj_key_User_Notifications = []byte("notifications") + +var ffj_key_User_ProfileBackgroundColor = []byte("profile_background_color") + +var ffj_key_User_ProfileBackgroundImageURL = []byte("profile_background_image_url") + +var ffj_key_User_ProfileBackgroundImageURLHTTPS = []byte("profile_background_image_url_https") + +var ffj_key_User_ProfileBackgroundTile = []byte("profile_background_tile") + +var ffj_key_User_ProfileImageURL = []byte("profile_image_url") + +var ffj_key_User_ProfileImageURLHTTPS = []byte("profile_image_url_https") + +var ffj_key_User_ProfileLinkColor = []byte("profile_link_color") + +var ffj_key_User_ProfileSidebarBorderColor = []byte("profile_sidebar_border_color") + +var ffj_key_User_ProfileSidebarFillColor = []byte("profile_sidebar_fill_color") + +var ffj_key_User_ProfileTextColor = []byte("profile_text_color") + +var ffj_key_User_ProfileUseBackgroundImage = []byte("profile_use_background_image") + +var ffj_key_User_Protected = []byte("protected") + +var ffj_key_User_ScreenName = []byte("screen_name") + +var ffj_key_User_ShowAllInlineMedia = []byte("show_all_inline_media") + +var ffj_key_User_StatusesCount = []byte("statuses_count") + +var ffj_key_User_TimeZone = []byte("time_zone") + +var ffj_key_User_URL = []byte("url") + +var ffj_key_User_UtcOffset = []byte("utc_offset") + +var ffj_key_User_Verified = []byte("verified") + +func (uj *User) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *User) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_Userbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_Userno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'c': + + if bytes.Equal(ffj_key_User_ContributorsEnabled, kn) { + currentKey = ffj_t_User_ContributorsEnabled + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_CreatedAt, kn) { + currentKey = ffj_t_User_CreatedAt + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'd': + + if bytes.Equal(ffj_key_User_DefaultProfile, kn) { + currentKey = ffj_t_User_DefaultProfile + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_DefaultProfileImage, kn) { + currentKey = ffj_t_User_DefaultProfileImage + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_Description, kn) { + currentKey = ffj_t_User_Description + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'e': + + if bytes.Equal(ffj_key_User_Entities, kn) { + currentKey = ffj_t_User_Entities + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'f': + + if bytes.Equal(ffj_key_User_FavouritesCount, kn) { + currentKey = ffj_t_User_FavouritesCount + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_FollowRequestSent, kn) { + currentKey = ffj_t_User_FollowRequestSent + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_FollowersCount, kn) { + currentKey = ffj_t_User_FollowersCount + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_Following, kn) { + currentKey = ffj_t_User_Following + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_FriendsCount, kn) { + currentKey = ffj_t_User_FriendsCount + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'g': + + if bytes.Equal(ffj_key_User_GeoEnabled, kn) { + currentKey = ffj_t_User_GeoEnabled + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'i': + + if bytes.Equal(ffj_key_User_ID, kn) { + currentKey = ffj_t_User_ID + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_IDStr, kn) { + currentKey = ffj_t_User_IDStr + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_IsTranslator, kn) { + currentKey = ffj_t_User_IsTranslator + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'l': + + if bytes.Equal(ffj_key_User_Lang, kn) { + currentKey = ffj_t_User_Lang + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_ListedCount, kn) { + currentKey = ffj_t_User_ListedCount + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_Location, kn) { + currentKey = ffj_t_User_Location + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'n': + + if bytes.Equal(ffj_key_User_Name, kn) { + currentKey = ffj_t_User_Name + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_Notifications, kn) { + currentKey = ffj_t_User_Notifications + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'p': + + if bytes.Equal(ffj_key_User_ProfileBackgroundColor, kn) { + currentKey = ffj_t_User_ProfileBackgroundColor + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_ProfileBackgroundImageURL, kn) { + currentKey = ffj_t_User_ProfileBackgroundImageURL + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_ProfileBackgroundImageURLHTTPS, kn) { + currentKey = ffj_t_User_ProfileBackgroundImageURLHTTPS + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_ProfileBackgroundTile, kn) { + currentKey = ffj_t_User_ProfileBackgroundTile + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_ProfileImageURL, kn) { + currentKey = ffj_t_User_ProfileImageURL + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_ProfileImageURLHTTPS, kn) { + currentKey = ffj_t_User_ProfileImageURLHTTPS + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_ProfileLinkColor, kn) { + currentKey = ffj_t_User_ProfileLinkColor + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_ProfileSidebarBorderColor, kn) { + currentKey = ffj_t_User_ProfileSidebarBorderColor + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_ProfileSidebarFillColor, kn) { + currentKey = ffj_t_User_ProfileSidebarFillColor + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_ProfileTextColor, kn) { + currentKey = ffj_t_User_ProfileTextColor + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_ProfileUseBackgroundImage, kn) { + currentKey = ffj_t_User_ProfileUseBackgroundImage + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_Protected, kn) { + currentKey = ffj_t_User_Protected + state = fflib.FFParse_want_colon + goto mainparse + } + + case 's': + + if bytes.Equal(ffj_key_User_ScreenName, kn) { + currentKey = ffj_t_User_ScreenName + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_ShowAllInlineMedia, kn) { + currentKey = ffj_t_User_ShowAllInlineMedia + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_StatusesCount, kn) { + currentKey = ffj_t_User_StatusesCount + state = fflib.FFParse_want_colon + goto mainparse + } + + case 't': + + if bytes.Equal(ffj_key_User_TimeZone, kn) { + currentKey = ffj_t_User_TimeZone + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_User_URL, kn) { + currentKey = ffj_t_User_URL + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffj_key_User_UtcOffset, kn) { + currentKey = ffj_t_User_UtcOffset + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'v': + + if bytes.Equal(ffj_key_User_Verified, kn) { + currentKey = ffj_t_User_Verified + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_User_Verified, kn) { + currentKey = ffj_t_User_Verified + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_UtcOffset, kn) { + currentKey = ffj_t_User_UtcOffset + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_User_URL, kn) { + currentKey = ffj_t_User_URL + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_User_TimeZone, kn) { + currentKey = ffj_t_User_TimeZone + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_StatusesCount, kn) { + currentKey = ffj_t_User_StatusesCount + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_ShowAllInlineMedia, kn) { + currentKey = ffj_t_User_ShowAllInlineMedia + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_ScreenName, kn) { + currentKey = ffj_t_User_ScreenName + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_User_Protected, kn) { + currentKey = ffj_t_User_Protected + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_ProfileUseBackgroundImage, kn) { + currentKey = ffj_t_User_ProfileUseBackgroundImage + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_User_ProfileTextColor, kn) { + currentKey = ffj_t_User_ProfileTextColor + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_ProfileSidebarFillColor, kn) { + currentKey = ffj_t_User_ProfileSidebarFillColor + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_ProfileSidebarBorderColor, kn) { + currentKey = ffj_t_User_ProfileSidebarBorderColor + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_ProfileLinkColor, kn) { + currentKey = ffj_t_User_ProfileLinkColor + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_ProfileImageURLHTTPS, kn) { + currentKey = ffj_t_User_ProfileImageURLHTTPS + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_User_ProfileImageURL, kn) { + currentKey = ffj_t_User_ProfileImageURL + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_ProfileBackgroundTile, kn) { + currentKey = ffj_t_User_ProfileBackgroundTile + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_ProfileBackgroundImageURLHTTPS, kn) { + currentKey = ffj_t_User_ProfileBackgroundImageURLHTTPS + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_ProfileBackgroundImageURL, kn) { + currentKey = ffj_t_User_ProfileBackgroundImageURL + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_ProfileBackgroundColor, kn) { + currentKey = ffj_t_User_ProfileBackgroundColor + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_Notifications, kn) { + currentKey = ffj_t_User_Notifications + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_User_Name, kn) { + currentKey = ffj_t_User_Name + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_User_Location, kn) { + currentKey = ffj_t_User_Location + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_ListedCount, kn) { + currentKey = ffj_t_User_ListedCount + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_User_Lang, kn) { + currentKey = ffj_t_User_Lang + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_IsTranslator, kn) { + currentKey = ffj_t_User_IsTranslator + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_IDStr, kn) { + currentKey = ffj_t_User_IDStr + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_User_ID, kn) { + currentKey = ffj_t_User_ID + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_User_GeoEnabled, kn) { + currentKey = ffj_t_User_GeoEnabled + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_FriendsCount, kn) { + currentKey = ffj_t_User_FriendsCount + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.SimpleLetterEqualFold(ffj_key_User_Following, kn) { + currentKey = ffj_t_User_Following + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_FollowersCount, kn) { + currentKey = ffj_t_User_FollowersCount + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_FollowRequestSent, kn) { + currentKey = ffj_t_User_FollowRequestSent + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_FavouritesCount, kn) { + currentKey = ffj_t_User_FavouritesCount + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_Entities, kn) { + currentKey = ffj_t_User_Entities + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_Description, kn) { + currentKey = ffj_t_User_Description + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_User_DefaultProfileImage, kn) { + currentKey = ffj_t_User_DefaultProfileImage + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_User_DefaultProfile, kn) { + currentKey = ffj_t_User_DefaultProfile + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.AsciiEqualFold(ffj_key_User_CreatedAt, kn) { + currentKey = ffj_t_User_CreatedAt + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_User_ContributorsEnabled, kn) { + currentKey = ffj_t_User_ContributorsEnabled + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_Userno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_User_ContributorsEnabled: + goto handle_ContributorsEnabled + + case ffj_t_User_CreatedAt: + goto handle_CreatedAt + + case ffj_t_User_DefaultProfile: + goto handle_DefaultProfile + + case ffj_t_User_DefaultProfileImage: + goto handle_DefaultProfileImage + + case ffj_t_User_Description: + goto handle_Description + + case ffj_t_User_Entities: + goto handle_Entities + + case ffj_t_User_FavouritesCount: + goto handle_FavouritesCount + + case ffj_t_User_FollowRequestSent: + goto handle_FollowRequestSent + + case ffj_t_User_FollowersCount: + goto handle_FollowersCount + + case ffj_t_User_Following: + goto handle_Following + + case ffj_t_User_FriendsCount: + goto handle_FriendsCount + + case ffj_t_User_GeoEnabled: + goto handle_GeoEnabled + + case ffj_t_User_ID: + goto handle_ID + + case ffj_t_User_IDStr: + goto handle_IDStr + + case ffj_t_User_IsTranslator: + goto handle_IsTranslator + + case ffj_t_User_Lang: + goto handle_Lang + + case ffj_t_User_ListedCount: + goto handle_ListedCount + + case ffj_t_User_Location: + goto handle_Location + + case ffj_t_User_Name: + goto handle_Name + + case ffj_t_User_Notifications: + goto handle_Notifications + + case ffj_t_User_ProfileBackgroundColor: + goto handle_ProfileBackgroundColor + + case ffj_t_User_ProfileBackgroundImageURL: + goto handle_ProfileBackgroundImageURL + + case ffj_t_User_ProfileBackgroundImageURLHTTPS: + goto handle_ProfileBackgroundImageURLHTTPS + + case ffj_t_User_ProfileBackgroundTile: + goto handle_ProfileBackgroundTile + + case ffj_t_User_ProfileImageURL: + goto handle_ProfileImageURL + + case ffj_t_User_ProfileImageURLHTTPS: + goto handle_ProfileImageURLHTTPS + + case ffj_t_User_ProfileLinkColor: + goto handle_ProfileLinkColor + + case ffj_t_User_ProfileSidebarBorderColor: + goto handle_ProfileSidebarBorderColor + + case ffj_t_User_ProfileSidebarFillColor: + goto handle_ProfileSidebarFillColor + + case ffj_t_User_ProfileTextColor: + goto handle_ProfileTextColor + + case ffj_t_User_ProfileUseBackgroundImage: + goto handle_ProfileUseBackgroundImage + + case ffj_t_User_Protected: + goto handle_Protected + + case ffj_t_User_ScreenName: + goto handle_ScreenName + + case ffj_t_User_ShowAllInlineMedia: + goto handle_ShowAllInlineMedia + + case ffj_t_User_StatusesCount: + goto handle_StatusesCount + + case ffj_t_User_TimeZone: + goto handle_TimeZone + + case ffj_t_User_URL: + goto handle_URL + + case ffj_t_User_UtcOffset: + goto handle_UtcOffset + + case ffj_t_User_Verified: + goto handle_Verified + + case ffj_t_Userno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_ContributorsEnabled: + + /* handler: uj.ContributorsEnabled type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + uj.ContributorsEnabled = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + uj.ContributorsEnabled = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_CreatedAt: + + /* handler: uj.CreatedAt type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.CreatedAt = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_DefaultProfile: + + /* handler: uj.DefaultProfile type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + uj.DefaultProfile = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + uj.DefaultProfile = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_DefaultProfileImage: + + /* handler: uj.DefaultProfileImage type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + uj.DefaultProfileImage = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + uj.DefaultProfileImage = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Description: + + /* handler: uj.Description type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Description = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Entities: + + /* handler: uj.Entities type=benchmark.UserEntities kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Entities.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_FavouritesCount: + + /* handler: uj.FavouritesCount type=int kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.FavouritesCount = int(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_FollowRequestSent: + + /* handler: uj.FollowRequestSent type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.FollowRequestSent = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.FollowRequestSent = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_FollowersCount: + + /* handler: uj.FollowersCount type=int kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.FollowersCount = int(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Following: + + /* handler: uj.Following type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Following = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Following = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_FriendsCount: + + /* handler: uj.FriendsCount type=int kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.FriendsCount = int(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_GeoEnabled: + + /* handler: uj.GeoEnabled type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + uj.GeoEnabled = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + uj.GeoEnabled = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ID: + + /* handler: uj.ID type=int kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.ID = int(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IDStr: + + /* handler: uj.IDStr type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.IDStr = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_IsTranslator: + + /* handler: uj.IsTranslator type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + uj.IsTranslator = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + uj.IsTranslator = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Lang: + + /* handler: uj.Lang type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Lang = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ListedCount: + + /* handler: uj.ListedCount type=int kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.ListedCount = int(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Location: + + /* handler: uj.Location type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Location = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Name: + + /* handler: uj.Name type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.Name = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Notifications: + + /* handler: uj.Notifications type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.Notifications = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.Notifications = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ProfileBackgroundColor: + + /* handler: uj.ProfileBackgroundColor type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.ProfileBackgroundColor = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ProfileBackgroundImageURL: + + /* handler: uj.ProfileBackgroundImageURL type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.ProfileBackgroundImageURL = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ProfileBackgroundImageURLHTTPS: + + /* handler: uj.ProfileBackgroundImageURLHTTPS type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.ProfileBackgroundImageURLHTTPS = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ProfileBackgroundTile: + + /* handler: uj.ProfileBackgroundTile type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + uj.ProfileBackgroundTile = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + uj.ProfileBackgroundTile = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ProfileImageURL: + + /* handler: uj.ProfileImageURL type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.ProfileImageURL = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ProfileImageURLHTTPS: + + /* handler: uj.ProfileImageURLHTTPS type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.ProfileImageURLHTTPS = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ProfileLinkColor: + + /* handler: uj.ProfileLinkColor type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.ProfileLinkColor = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ProfileSidebarBorderColor: + + /* handler: uj.ProfileSidebarBorderColor type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.ProfileSidebarBorderColor = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ProfileSidebarFillColor: + + /* handler: uj.ProfileSidebarFillColor type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.ProfileSidebarFillColor = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ProfileTextColor: + + /* handler: uj.ProfileTextColor type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.ProfileTextColor = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ProfileUseBackgroundImage: + + /* handler: uj.ProfileUseBackgroundImage type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + uj.ProfileUseBackgroundImage = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + uj.ProfileUseBackgroundImage = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Protected: + + /* handler: uj.Protected type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + uj.Protected = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + uj.Protected = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ScreenName: + + /* handler: uj.ScreenName type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.ScreenName = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_ShowAllInlineMedia: + + /* handler: uj.ShowAllInlineMedia type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + uj.ShowAllInlineMedia = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + uj.ShowAllInlineMedia = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_StatusesCount: + + /* handler: uj.StatusesCount type=int kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.StatusesCount = int(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_TimeZone: + + /* handler: uj.TimeZone type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + uj.TimeZone = string(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_URL: + + /* handler: uj.URL type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + uj.URL = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + uj.URL = &tval + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_UtcOffset: + + /* handler: uj.UtcOffset type=int kind=int quoted=false*/ + + { + if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok)) + } + } + + { + + if tok == fflib.FFTok_null { + + } else { + + tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) + + if err != nil { + return fs.WrapErr(err) + } + + uj.UtcOffset = int(tval) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_Verified: + + /* handler: uj.Verified type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + uj.Verified = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + uj.Verified = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + return nil +} + +func (mj *UserEntities) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *UserEntities) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"description":`) + + { + + err = mj.Description.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteString(`,"url":`) + + { + + err = mj.URL.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_UserEntitiesbase = iota + ffj_t_UserEntitiesno_such_key + + ffj_t_UserEntities_Description + + ffj_t_UserEntities_URL +) + +var ffj_key_UserEntities_Description = []byte("description") + +var ffj_key_UserEntities_URL = []byte("url") + +func (uj *UserEntities) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *UserEntities) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_UserEntitiesbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_UserEntitiesno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'd': + + if bytes.Equal(ffj_key_UserEntities_Description, kn) { + currentKey = ffj_t_UserEntities_Description + state = fflib.FFParse_want_colon + goto mainparse + } + + case 'u': + + if bytes.Equal(ffj_key_UserEntities_URL, kn) { + currentKey = ffj_t_UserEntities_URL + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_UserEntities_URL, kn) { + currentKey = ffj_t_UserEntities_URL + state = fflib.FFParse_want_colon + goto mainparse + } + + if fflib.EqualFoldRight(ffj_key_UserEntities_Description, kn) { + currentKey = ffj_t_UserEntities_Description + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_UserEntitiesno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_UserEntities_Description: + goto handle_Description + + case ffj_t_UserEntities_URL: + goto handle_URL + + case ffj_t_UserEntitiesno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Description: + + /* handler: uj.Description type=benchmark.UserEntityDescription kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.Description.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +handle_URL: + + /* handler: uj.URL type=benchmark.UserEntityURL kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = uj.URL.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + return nil +} + +func (mj *UserEntityDescription) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *UserEntityDescription) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"urls":`) + if mj.Urls != nil { + buf.WriteString(`[`) + for i, v := range mj.Urls { + if i != 0 { + buf.WriteString(`,`) + } + if v != nil { + fflib.WriteJsonString(buf, string(*v)) + } else { + buf.WriteString(`null`) + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_UserEntityDescriptionbase = iota + ffj_t_UserEntityDescriptionno_such_key + + ffj_t_UserEntityDescription_Urls +) + +var ffj_key_UserEntityDescription_Urls = []byte("urls") + +func (uj *UserEntityDescription) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *UserEntityDescription) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_UserEntityDescriptionbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_UserEntityDescriptionno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'u': + + if bytes.Equal(ffj_key_UserEntityDescription_Urls, kn) { + currentKey = ffj_t_UserEntityDescription_Urls + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_UserEntityDescription_Urls, kn) { + currentKey = ffj_t_UserEntityDescription_Urls + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_UserEntityDescriptionno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_UserEntityDescription_Urls: + goto handle_Urls + + case ffj_t_UserEntityDescriptionno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Urls: + + /* handler: uj.Urls type=[]*string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Urls = nil + } else { + + uj.Urls = make([]*string, 0) + + wantVal := true + + for { + + var tmp_uj__Urls *string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Urls type=*string kind=ptr quoted=false*/ + + { + + if tok == fflib.FFTok_null { + tmp_uj__Urls = nil + } else { + if tmp_uj__Urls == nil { + tmp_uj__Urls = new(string) + } + + /* handler: tmp_uj__Urls type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + tmp_uj__Urls = nil + + } else { + + var tval string + outBuf := fs.Output.Bytes() + + tval = string(string(outBuf)) + tmp_uj__Urls = &tval + + } + } + + } + } + + uj.Urls = append(uj.Urls, tmp_uj__Urls) + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + return nil +} + +func (mj *UserEntityURL) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *UserEntityURL) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"urls":`) + if mj.Urls != nil { + buf.WriteString(`[`) + for i, v := range mj.Urls { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_UserEntityURLbase = iota + ffj_t_UserEntityURLno_such_key + + ffj_t_UserEntityURL_Urls +) + +var ffj_key_UserEntityURL_Urls = []byte("urls") + +func (uj *UserEntityURL) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *UserEntityURL) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_UserEntityURLbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_UserEntityURLno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'u': + + if bytes.Equal(ffj_key_UserEntityURL_Urls, kn) { + currentKey = ffj_t_UserEntityURL_Urls + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.EqualFoldRight(ffj_key_UserEntityURL_Urls, kn) { + currentKey = ffj_t_UserEntityURL_Urls + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_UserEntityURLno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_UserEntityURL_Urls: + goto handle_Urls + + case ffj_t_UserEntityURLno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Urls: + + /* handler: uj.Urls type=[]benchmark.URL kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Urls = nil + } else { + + uj.Urls = make([]URL, 0) + + wantVal := true + + for { + + var tmp_uj__Urls URL + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Urls type=benchmark.URL kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Urls.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Urls = append(uj.Urls, tmp_uj__Urls) + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + return nil +} + +func (mj *XLStruct) MarshalJSON() ([]byte, error) { + var buf fflib.Buffer + if mj == nil { + buf.WriteString("null") + return buf.Bytes(), nil + } + err := mj.MarshalJSONBuf(&buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} +func (mj *XLStruct) MarshalJSONBuf(buf fflib.EncodingBuffer) error { + if mj == nil { + buf.WriteString("null") + return nil + } + var err error + var obj []byte + _ = obj + _ = err + buf.WriteString(`{"Data":`) + if mj.Data != nil { + buf.WriteString(`[`) + for i, v := range mj.Data { + if i != 0 { + buf.WriteString(`,`) + } + + { + + err = v.MarshalJSONBuf(buf) + if err != nil { + return err + } + + } + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte('}') + return nil +} + +const ( + ffj_t_XLStructbase = iota + ffj_t_XLStructno_such_key + + ffj_t_XLStruct_Data +) + +var ffj_key_XLStruct_Data = []byte("Data") + +func (uj *XLStruct) UnmarshalJSON(input []byte) error { + fs := fflib.NewFFLexer(input) + return uj.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) +} + +func (uj *XLStruct) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { + var err error = nil + currentKey := ffj_t_XLStructbase + _ = currentKey + tok := fflib.FFTok_init + wantedTok := fflib.FFTok_init + +mainparse: + for { + tok = fs.Scan() + // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) + if tok == fflib.FFTok_error { + goto tokerror + } + + switch state { + + case fflib.FFParse_map_start: + if tok != fflib.FFTok_left_bracket { + wantedTok = fflib.FFTok_left_bracket + goto wrongtokenerror + } + state = fflib.FFParse_want_key + continue + + case fflib.FFParse_after_value: + if tok == fflib.FFTok_comma { + state = fflib.FFParse_want_key + } else if tok == fflib.FFTok_right_bracket { + goto done + } else { + wantedTok = fflib.FFTok_comma + goto wrongtokenerror + } + + case fflib.FFParse_want_key: + // json {} ended. goto exit. woo. + if tok == fflib.FFTok_right_bracket { + goto done + } + if tok != fflib.FFTok_string { + wantedTok = fflib.FFTok_string + goto wrongtokenerror + } + + kn := fs.Output.Bytes() + if len(kn) <= 0 { + // "" case. hrm. + currentKey = ffj_t_XLStructno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } else { + switch kn[0] { + + case 'D': + + if bytes.Equal(ffj_key_XLStruct_Data, kn) { + currentKey = ffj_t_XLStruct_Data + state = fflib.FFParse_want_colon + goto mainparse + } + + } + + if fflib.SimpleLetterEqualFold(ffj_key_XLStruct_Data, kn) { + currentKey = ffj_t_XLStruct_Data + state = fflib.FFParse_want_colon + goto mainparse + } + + currentKey = ffj_t_XLStructno_such_key + state = fflib.FFParse_want_colon + goto mainparse + } + + case fflib.FFParse_want_colon: + if tok != fflib.FFTok_colon { + wantedTok = fflib.FFTok_colon + goto wrongtokenerror + } + state = fflib.FFParse_want_value + continue + case fflib.FFParse_want_value: + + if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { + switch currentKey { + + case ffj_t_XLStruct_Data: + goto handle_Data + + case ffj_t_XLStructno_such_key: + err = fs.SkipField(tok) + if err != nil { + return fs.WrapErr(err) + } + state = fflib.FFParse_after_value + goto mainparse + } + } else { + goto wantedvalue + } + } + } + +handle_Data: + + /* handler: uj.Data type=[]benchmark.LargeStruct kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + uj.Data = nil + } else { + + uj.Data = make([]LargeStruct, 0) + + wantVal := true + + for { + + var tmp_uj__Data LargeStruct + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmp_uj__Data type=benchmark.LargeStruct kind=struct quoted=false*/ + + { + if tok == fflib.FFTok_null { + + state = fflib.FFParse_after_value + goto mainparse + } + + err = tmp_uj__Data.UnmarshalJSONFFLexer(fs, fflib.FFParse_want_key) + if err != nil { + return err + } + state = fflib.FFParse_after_value + } + + uj.Data = append(uj.Data, tmp_uj__Data) + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + +wantedvalue: + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) +wrongtokenerror: + return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) +tokerror: + if fs.BigError != nil { + return fs.WrapErr(fs.BigError) + } + err = fs.Error.ToError() + if err != nil { + return fs.WrapErr(err) + } + panic("ffjson-generated: unreachable, please report bug.") +done: + return nil +} diff --git a/src/vendor/github.com/mailru/easyjson/benchmark/data_var.go b/src/vendor/github.com/mailru/easyjson/benchmark/data_var.go new file mode 100644 index 00000000..ea4202db --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/benchmark/data_var.go @@ -0,0 +1,350 @@ +package benchmark + +var largeStructData = LargeStruct{ + SearchMetadata: SearchMetadata{ + CompletedIn: 0.035, + Count: 4, + MaxID: 250126199840518145, + MaxIDStr: "250126199840518145", + NextResults: "?max_id=249279667666817023&q=%23freebandnames&count=4&include_entities=1&result_type=mixed", + Query: "%23freebandnames", + RefreshURL: "?since_id=250126199840518145&q=%23freebandnames&result_type=mixed&include_entities=1", + SinceID: 24012619984051000, + SinceIDStr: "24012619984051000", + }, + Statuses: []Status{ + { + Contributors: nil, + Coordinates: nil, + CreatedAt: "Mon Sep 24 03:35:21 +0000 2012", + Entities: Entities{ + Hashtags: []Hashtag{{ + Indices: []int{20, 34}, + Text: "freebandnames"}, + }, + Urls: []*string{}, + UserMentions: []*string{}, + }, + Favorited: false, + Geo: nil, + ID: 250075927172759552, + IDStr: "250075927172759552", + InReplyToScreenName: nil, + InReplyToStatusID: nil, + InReplyToStatusIDStr: nil, + InReplyToUserID: nil, + InReplyToUserIDStr: nil, + Metadata: StatusMetadata{ + IsoLanguageCode: "en", + ResultType: "recent", + }, + Place: nil, + RetweetCount: 0, + Retweeted: false, + Source: "Twitter for Mac", + Text: "Aggressive Ponytail #freebandnames", + Truncated: false, + User: User{ + ContributorsEnabled: false, + CreatedAt: "Mon Apr 26 06:01:55 +0000 2010", + DefaultProfile: true, + DefaultProfileImage: false, + Description: "Born 330 Live 310", + Entities: UserEntities{ + Description: UserEntityDescription{ + Urls: []*string{}, + }, + URL: UserEntityURL{ + Urls: []URL{{ + ExpandedURL: nil, + Indices: []int{0, 0}, + URL: "", + }}, + }, + }, + FavouritesCount: 0, + FollowRequestSent: nil, + FollowersCount: 70, + Following: nil, + FriendsCount: 110, + GeoEnabled: true, + ID: 137238150, + IDStr: "137238150", + IsTranslator: false, + Lang: "en", + ListedCount: 2, + Location: "LA, CA", + Name: "Sean Cummings", + Notifications: nil, + ProfileBackgroundColor: "C0DEED", + ProfileBackgroundImageURL: "http://a0.twimg.com/images/themes/theme1/bg.png", + ProfileBackgroundImageURLHTTPS: "https://si0.twimg.com/images/themes/theme1/bg.png", + ProfileBackgroundTile: false, + ProfileImageURL: "http://a0.twimg.com/profile_images/2359746665/1v6zfgqo8g0d3mk7ii5s_normal.jpeg", + ProfileImageURLHTTPS: "https://si0.twimg.com/profile_images/2359746665/1v6zfgqo8g0d3mk7ii5s_normal.jpeg", + ProfileLinkColor: "0084B4", + ProfileSidebarBorderColor: "C0DEED", + ProfileSidebarFillColor: "DDEEF6", + ProfileTextColor: "333333", + ProfileUseBackgroundImage: true, + Protected: false, + ScreenName: "sean_cummings", + ShowAllInlineMedia: false, + StatusesCount: 579, + TimeZone: "Pacific Time (US & Canada)", + URL: nil, + UtcOffset: -28800, + Verified: false, + }, + }, + { + Contributors: nil, + Coordinates: nil, + CreatedAt: "Fri Sep 21 23:40:54 +0000 2012", + Entities: Entities{ + Hashtags: []Hashtag{{ + Indices: []int{20, 34}, + Text: "FreeBandNames", + }}, + Urls: []*string{}, + UserMentions: []*string{}, + }, + Favorited: false, + Geo: nil, + ID: 249292149810667520, + IDStr: "249292149810667520", + InReplyToScreenName: nil, + InReplyToStatusID: nil, + InReplyToStatusIDStr: nil, + InReplyToUserID: nil, + InReplyToUserIDStr: nil, + Metadata: StatusMetadata{ + IsoLanguageCode: "pl", + ResultType: "recent", + }, + Place: nil, + RetweetCount: 0, + Retweeted: false, + Source: "web", + Text: "Thee Namaste Nerdz. #FreeBandNames", + Truncated: false, + User: User{ + ContributorsEnabled: false, + CreatedAt: "Tue Apr 07 19:05:07 +0000 2009", + DefaultProfile: false, + DefaultProfileImage: false, + Description: "You will come to Durham, North Carolina. I will sell you some records then, here in Durham, North Carolina. Fun will happen.", + Entities: UserEntities{ + Description: UserEntityDescription{Urls: []*string{}}, + URL: UserEntityURL{ + Urls: []URL{{ + ExpandedURL: nil, + Indices: []int{0, 32}, + URL: "http://bullcityrecords.com/wnng/"}}, + }, + }, + FavouritesCount: 8, + FollowRequestSent: nil, + FollowersCount: 2052, + Following: nil, + FriendsCount: 348, + GeoEnabled: false, + ID: 29516238, + IDStr: "29516238", + IsTranslator: false, + Lang: "en", + ListedCount: 118, + Location: "Durham, NC", + Name: "Chaz Martenstein", + Notifications: nil, + ProfileBackgroundColor: "9AE4E8", + ProfileBackgroundImageURL: "http://a0.twimg.com/profile_background_images/9423277/background_tile.bmp", + ProfileBackgroundImageURLHTTPS: "https://si0.twimg.com/profile_background_images/9423277/background_tile.bmp", + ProfileBackgroundTile: true, + ProfileImageURL: "http://a0.twimg.com/profile_images/447958234/Lichtenstein_normal.jpg", + ProfileImageURLHTTPS: "https://si0.twimg.com/profile_images/447958234/Lichtenstein_normal.jpg", + ProfileLinkColor: "0084B4", + ProfileSidebarBorderColor: "BDDCAD", + ProfileSidebarFillColor: "DDFFCC", + ProfileTextColor: "333333", + ProfileUseBackgroundImage: true, + Protected: false, + ScreenName: "bullcityrecords", + ShowAllInlineMedia: true, + StatusesCount: 7579, + TimeZone: "Eastern Time (US & Canada)", + URL: nil, + UtcOffset: -18000, + Verified: false, + }, + }, + Status{ + Contributors: nil, + Coordinates: nil, + CreatedAt: "Fri Sep 21 23:30:20 +0000 2012", + Entities: Entities{ + Hashtags: []Hashtag{{ + Indices: []int{29, 43}, + Text: "freebandnames", + }}, + Urls: []*string{}, + UserMentions: []*string{}, + }, + Favorited: false, + Geo: nil, + ID: 249289491129438208, + IDStr: "249289491129438208", + InReplyToScreenName: nil, + InReplyToStatusID: nil, + InReplyToStatusIDStr: nil, + InReplyToUserID: nil, + InReplyToUserIDStr: nil, + Metadata: StatusMetadata{ + IsoLanguageCode: "en", + ResultType: "recent", + }, + Place: nil, + RetweetCount: 0, + Retweeted: false, + Source: "web", + Text: "Mexican Heaven, Mexican Hell #freebandnames", + Truncated: false, + User: User{ + ContributorsEnabled: false, + CreatedAt: "Tue Sep 01 21:21:35 +0000 2009", + DefaultProfile: false, + DefaultProfileImage: false, + Description: "Science Fiction Writer, sort of. Likes Superheroes, Mole People, Alt. Timelines.", + Entities: UserEntities{ + Description: UserEntityDescription{ + Urls: nil, + }, + URL: UserEntityURL{ + Urls: []URL{{ + ExpandedURL: nil, + Indices: []int{0, 0}, + URL: "", + }}, + }, + }, + FavouritesCount: 19, + FollowRequestSent: nil, + FollowersCount: 63, + Following: nil, + FriendsCount: 63, + GeoEnabled: false, + ID: 70789458, + IDStr: "70789458", + IsTranslator: false, + Lang: "en", + ListedCount: 1, + Location: "Kingston New York", + Name: "Thomas John Wakeman", + Notifications: nil, + ProfileBackgroundColor: "352726", + ProfileBackgroundImageURL: "http://a0.twimg.com/images/themes/theme5/bg.gif", + ProfileBackgroundImageURLHTTPS: "https://si0.twimg.com/images/themes/theme5/bg.gif", + ProfileBackgroundTile: false, + ProfileImageURL: "http://a0.twimg.com/profile_images/2219333930/Froggystyle_normal.png", + ProfileImageURLHTTPS: "https://si0.twimg.com/profile_images/2219333930/Froggystyle_normal.png", + ProfileLinkColor: "D02B55", + ProfileSidebarBorderColor: "829D5E", + ProfileSidebarFillColor: "99CC33", + ProfileTextColor: "3E4415", + ProfileUseBackgroundImage: true, + Protected: false, + ScreenName: "MonkiesFist", + ShowAllInlineMedia: false, + StatusesCount: 1048, + TimeZone: "Eastern Time (US & Canada)", + URL: nil, + UtcOffset: -18000, + Verified: false, + }, + }, + Status{ + Contributors: nil, + Coordinates: nil, + CreatedAt: "Fri Sep 21 22:51:18 +0000 2012", + Entities: Entities{ + Hashtags: []Hashtag{{ + Indices: []int{20, 34}, + Text: "freebandnames", + }}, + Urls: []*string{}, + UserMentions: []*string{}, + }, + Favorited: false, + Geo: nil, + ID: 249279667666817024, + IDStr: "249279667666817024", + InReplyToScreenName: nil, + InReplyToStatusID: nil, + InReplyToStatusIDStr: nil, + InReplyToUserID: nil, + InReplyToUserIDStr: nil, + Metadata: StatusMetadata{ + IsoLanguageCode: "en", + ResultType: "recent", + }, + Place: nil, + RetweetCount: 0, + Retweeted: false, + Source: "Twitter for iPhone", + Text: "The Foolish Mortals #freebandnames", + Truncated: false, + User: User{ + ContributorsEnabled: false, + CreatedAt: "Mon May 04 00:05:00 +0000 2009", + DefaultProfile: false, + DefaultProfileImage: false, + Description: "Cartoonist, Illustrator, and T-Shirt connoisseur", + Entities: UserEntities{ + Description: UserEntityDescription{ + Urls: []*string{}, + }, + URL: UserEntityURL{ + Urls: []URL{{ + ExpandedURL: nil, + Indices: []int{0, 24}, + URL: "http://www.omnitarian.me", + }}, + }, + }, + FavouritesCount: 647, + FollowRequestSent: nil, + FollowersCount: 608, + Following: nil, + FriendsCount: 249, + GeoEnabled: false, + ID: 37539828, + IDStr: "37539828", + IsTranslator: false, + Lang: "en", + ListedCount: 52, + Location: "Wisconsin, USA", + Name: "Marty Elmer", + Notifications: nil, + ProfileBackgroundColor: "EEE3C4", + ProfileBackgroundImageURL: "http://a0.twimg.com/profile_background_images/106455659/rect6056-9.png", + ProfileBackgroundImageURLHTTPS: "https://si0.twimg.com/profile_background_images/106455659/rect6056-9.png", + ProfileBackgroundTile: true, + ProfileImageURL: "http://a0.twimg.com/profile_images/1629790393/shrinker_2000_trans_normal.png", + ProfileImageURLHTTPS: "https://si0.twimg.com/profile_images/1629790393/shrinker_2000_trans_normal.png", + ProfileLinkColor: "3B2A26", + ProfileSidebarBorderColor: "615A44", + ProfileSidebarFillColor: "BFAC83", + ProfileTextColor: "000000", + ProfileUseBackgroundImage: true, + Protected: false, + ScreenName: "Omnitarian", + ShowAllInlineMedia: true, + StatusesCount: 3575, + TimeZone: "Central Time (US & Canada)", + URL: nil, + UtcOffset: -21600, + Verified: false, + }, + }, + }, +} diff --git a/src/vendor/github.com/mailru/easyjson/benchmark/default_test.go b/src/vendor/github.com/mailru/easyjson/benchmark/default_test.go new file mode 100644 index 00000000..b647bef2 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/benchmark/default_test.go @@ -0,0 +1,118 @@ +// +build !use_easyjson,!use_ffjson,!use_codec + +package benchmark + +import ( + "encoding/json" + "testing" +) + +func BenchmarkStd_Unmarshal_M(b *testing.B) { + b.SetBytes(int64(len(largeStructText))) + for i := 0; i < b.N; i++ { + var s LargeStruct + err := json.Unmarshal(largeStructText, &s) + if err != nil { + b.Error(err) + } + } +} + +func BenchmarkStd_Unmarshal_S(b *testing.B) { + for i := 0; i < b.N; i++ { + var s Entities + err := json.Unmarshal(smallStructText, &s) + if err != nil { + b.Error(err) + } + } + b.SetBytes(int64(len(smallStructText))) +} + +func BenchmarkStd_Marshal_M(b *testing.B) { + var l int64 + for i := 0; i < b.N; i++ { + data, err := json.Marshal(&largeStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + } + b.SetBytes(l) +} + +func BenchmarkStd_Marshal_L(b *testing.B) { + var l int64 + for i := 0; i < b.N; i++ { + data, err := json.Marshal(&xlStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + } + b.SetBytes(l) +} + +func BenchmarkStd_Marshal_M_Parallel(b *testing.B) { + var l int64 + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + data, err := json.Marshal(&largeStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + } + }) + b.SetBytes(l) +} + +func BenchmarkStd_Marshal_L_Parallel(b *testing.B) { + var l int64 + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + data, err := json.Marshal(&xlStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + } + }) + b.SetBytes(l) +} + +func BenchmarkStd_Marshal_S(b *testing.B) { + var l int64 + for i := 0; i < b.N; i++ { + data, err := json.Marshal(&smallStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + } + b.SetBytes(l) +} + +func BenchmarkStd_Marshal_S_Parallel(b *testing.B) { + var l int64 + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + data, err := json.Marshal(&smallStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + } + }) + b.SetBytes(l) +} + +func BenchmarkStd_Marshal_M_ToWriter(b *testing.B) { + enc := json.NewEncoder(&DummyWriter{}) + for i := 0; i < b.N; i++ { + err := enc.Encode(&largeStructData) + if err != nil { + b.Error(err) + } + } +} diff --git a/src/vendor/github.com/mailru/easyjson/benchmark/dummy_test.go b/src/vendor/github.com/mailru/easyjson/benchmark/dummy_test.go new file mode 100644 index 00000000..3d928ca7 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/benchmark/dummy_test.go @@ -0,0 +1,11 @@ +package benchmark + +import ( + "testing" +) + +type DummyWriter struct{} + +func (w DummyWriter) Write(data []byte) (int, error) { return len(data), nil } + +func TestToSuppressNoTestsWarning(t *testing.T) {} diff --git a/src/vendor/github.com/mailru/easyjson/benchmark/easyjson_test.go b/src/vendor/github.com/mailru/easyjson/benchmark/easyjson_test.go new file mode 100644 index 00000000..16b670b2 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/benchmark/easyjson_test.go @@ -0,0 +1,184 @@ +// +build use_easyjson + +package benchmark + +import ( + "testing" + + "github.com/mailru/easyjson" + "github.com/mailru/easyjson/jwriter" +) + +func BenchmarkEJ_Unmarshal_M(b *testing.B) { + b.SetBytes(int64(len(largeStructText))) + for i := 0; i < b.N; i++ { + var s LargeStruct + err := s.UnmarshalJSON(largeStructText) + if err != nil { + b.Error(err) + } + } +} + +func BenchmarkEJ_Unmarshal_S(b *testing.B) { + b.SetBytes(int64(len(smallStructText))) + + for i := 0; i < b.N; i++ { + var s Entities + err := s.UnmarshalJSON(smallStructText) + if err != nil { + b.Error(err) + } + } +} + +func BenchmarkEJ_Marshal_M(b *testing.B) { + var l int64 + for i := 0; i < b.N; i++ { + data, err := easyjson.Marshal(&largeStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + } + b.SetBytes(l) +} + +func BenchmarkEJ_Marshal_L(b *testing.B) { + var l int64 + for i := 0; i < b.N; i++ { + data, err := easyjson.Marshal(&xlStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + } + b.SetBytes(l) +} + +func BenchmarkEJ_Marshal_L_ToWriter(b *testing.B) { + var l int64 + out := &DummyWriter{} + for i := 0; i < b.N; i++ { + w := jwriter.Writer{} + xlStructData.MarshalEasyJSON(&w) + if w.Error != nil { + b.Error(w.Error) + } + + l = int64(w.Size()) + w.DumpTo(out) + } + b.SetBytes(l) + +} +func BenchmarkEJ_Marshal_M_Parallel(b *testing.B) { + b.SetBytes(int64(len(largeStructText))) + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + _, err := largeStructData.MarshalJSON() + if err != nil { + b.Error(err) + } + } + }) +} + +func BenchmarkEJ_Marshal_M_ToWriter(b *testing.B) { + var l int64 + out := &DummyWriter{} + for i := 0; i < b.N; i++ { + w := jwriter.Writer{} + largeStructData.MarshalEasyJSON(&w) + if w.Error != nil { + b.Error(w.Error) + } + + l = int64(w.Size()) + w.DumpTo(out) + } + b.SetBytes(l) + +} +func BenchmarkEJ_Marshal_M_ToWriter_Parallel(b *testing.B) { + out := &DummyWriter{} + + b.RunParallel(func(pb *testing.PB) { + var l int64 + for pb.Next() { + w := jwriter.Writer{} + largeStructData.MarshalEasyJSON(&w) + if w.Error != nil { + b.Error(w.Error) + } + + l = int64(w.Size()) + w.DumpTo(out) + } + if l > 0 { + b.SetBytes(l) + } + }) + +} + +func BenchmarkEJ_Marshal_L_Parallel(b *testing.B) { + var l int64 + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + data, err := xlStructData.MarshalJSON() + if err != nil { + b.Error(err) + } + l = int64(len(data)) + } + }) + b.SetBytes(l) +} + +func BenchmarkEJ_Marshal_L_ToWriter_Parallel(b *testing.B) { + out := &DummyWriter{} + b.RunParallel(func(pb *testing.PB) { + var l int64 + for pb.Next() { + w := jwriter.Writer{} + + xlStructData.MarshalEasyJSON(&w) + if w.Error != nil { + b.Error(w.Error) + } + l = int64(w.Size()) + w.DumpTo(out) + } + if l > 0 { + b.SetBytes(l) + } + }) +} + +func BenchmarkEJ_Marshal_S(b *testing.B) { + var l int64 + for i := 0; i < b.N; i++ { + data, err := smallStructData.MarshalJSON() + if err != nil { + b.Error(err) + } + l = int64(len(data)) + } + b.SetBytes(l) +} + +func BenchmarkEJ_Marshal_S_Parallel(b *testing.B) { + var l int64 + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + data, err := smallStructData.MarshalJSON() + if err != nil { + b.Error(err) + } + l = int64(len(data)) + } + }) + b.SetBytes(l) +} diff --git a/src/vendor/github.com/mailru/easyjson/benchmark/example.json b/src/vendor/github.com/mailru/easyjson/benchmark/example.json new file mode 100644 index 00000000..2405022c --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/benchmark/example.json @@ -0,0 +1,415 @@ +{ + "statuses": [ + { + "coordinates": null, + "favorited": false, + "truncated": false, + "created_at": "Mon Sep 24 03:35:21 +0000 2012", + "id_str": "250075927172759552", + "entities": { + "urls": [ + + ], + "hashtags": [ + { + "text": "freebandnames", + "indices": [ + 20, + 34 + ] + } + ], + "user_mentions": [ + + ] + }, + "in_reply_to_user_id_str": null, + "contributors": null, + "text": "Aggressive Ponytail #freebandnames", + "metadata": { + "iso_language_code": "en", + "result_type": "recent" + }, + "retweet_count": 0, + "in_reply_to_status_id_str": null, + "id": 250075927172759552, + "geo": null, + "retweeted": false, + "in_reply_to_user_id": null, + "place": null, + "user": { + "profile_sidebar_fill_color": "DDEEF6", + "profile_sidebar_border_color": "C0DEED", + "profile_background_tile": false, + "name": "Sean Cummings", + "profile_image_url": "http://a0.twimg.com/profile_images/2359746665/1v6zfgqo8g0d3mk7ii5s_normal.jpeg", + "created_at": "Mon Apr 26 06:01:55 +0000 2010", + "location": "LA, CA", + "follow_request_sent": null, + "profile_link_color": "0084B4", + "is_translator": false, + "id_str": "137238150", + "entities": { + "url": { + "urls": [ + { + "expanded_url": null, + "url": "", + "indices": [ + 0, + 0 + ] + } + ] + }, + "description": { + "urls": [ + + ] + } + }, + "default_profile": true, + "contributors_enabled": false, + "favourites_count": 0, + "url": null, + "profile_image_url_https": "https://si0.twimg.com/profile_images/2359746665/1v6zfgqo8g0d3mk7ii5s_normal.jpeg", + "utc_offset": -28800, + "id": 137238150, + "profile_use_background_image": true, + "listed_count": 2, + "profile_text_color": "333333", + "lang": "en", + "followers_count": 70, + "protected": false, + "notifications": null, + "profile_background_image_url_https": "https://si0.twimg.com/images/themes/theme1/bg.png", + "profile_background_color": "C0DEED", + "verified": false, + "geo_enabled": true, + "time_zone": "Pacific Time (US & Canada)", + "description": "Born 330 Live 310", + "default_profile_image": false, + "profile_background_image_url": "http://a0.twimg.com/images/themes/theme1/bg.png", + "statuses_count": 579, + "friends_count": 110, + "following": null, + "show_all_inline_media": false, + "screen_name": "sean_cummings" + }, + "in_reply_to_screen_name": null, + "source": "Twitter for Mac", + "in_reply_to_status_id": null + }, + { + "coordinates": null, + "favorited": false, + "truncated": false, + "created_at": "Fri Sep 21 23:40:54 +0000 2012", + "id_str": "249292149810667520", + "entities": { + "urls": [ + + ], + "hashtags": [ + { + "text": "FreeBandNames", + "indices": [ + 20, + 34 + ] + } + ], + "user_mentions": [ + + ] + }, + "in_reply_to_user_id_str": null, + "contributors": null, + "text": "Thee Namaste Nerdz. #FreeBandNames", + "metadata": { + "iso_language_code": "pl", + "result_type": "recent" + }, + "retweet_count": 0, + "in_reply_to_status_id_str": null, + "id": 249292149810667520, + "geo": null, + "retweeted": false, + "in_reply_to_user_id": null, + "place": null, + "user": { + "profile_sidebar_fill_color": "DDFFCC", + "profile_sidebar_border_color": "BDDCAD", + "profile_background_tile": true, + "name": "Chaz Martenstein", + "profile_image_url": "http://a0.twimg.com/profile_images/447958234/Lichtenstein_normal.jpg", + "created_at": "Tue Apr 07 19:05:07 +0000 2009", + "location": "Durham, NC", + "follow_request_sent": null, + "profile_link_color": "0084B4", + "is_translator": false, + "id_str": "29516238", + "entities": { + "url": { + "urls": [ + { + "expanded_url": null, + "url": "http://bullcityrecords.com/wnng/", + "indices": [ + 0, + 32 + ] + } + ] + }, + "description": { + "urls": [ + + ] + } + }, + "default_profile": false, + "contributors_enabled": false, + "favourites_count": 8, + "url": "http://bullcityrecords.com/wnng/", + "profile_image_url_https": "https://si0.twimg.com/profile_images/447958234/Lichtenstein_normal.jpg", + "utc_offset": -18000, + "id": 29516238, + "profile_use_background_image": true, + "listed_count": 118, + "profile_text_color": "333333", + "lang": "en", + "followers_count": 2052, + "protected": false, + "notifications": null, + "profile_background_image_url_https": "https://si0.twimg.com/profile_background_images/9423277/background_tile.bmp", + "profile_background_color": "9AE4E8", + "verified": false, + "geo_enabled": false, + "time_zone": "Eastern Time (US & Canada)", + "description": "You will come to Durham, North Carolina. I will sell you some records then, here in Durham, North Carolina. Fun will happen.", + "default_profile_image": false, + "profile_background_image_url": "http://a0.twimg.com/profile_background_images/9423277/background_tile.bmp", + "statuses_count": 7579, + "friends_count": 348, + "following": null, + "show_all_inline_media": true, + "screen_name": "bullcityrecords" + }, + "in_reply_to_screen_name": null, + "source": "web", + "in_reply_to_status_id": null + }, + { + "coordinates": null, + "favorited": false, + "truncated": false, + "created_at": "Fri Sep 21 23:30:20 +0000 2012", + "id_str": "249289491129438208", + "entities": { + "urls": [ + + ], + "hashtags": [ + { + "text": "freebandnames", + "indices": [ + 29, + 43 + ] + } + ], + "user_mentions": [ + + ] + }, + "in_reply_to_user_id_str": null, + "contributors": null, + "text": "Mexican Heaven, Mexican Hell #freebandnames", + "metadata": { + "iso_language_code": "en", + "result_type": "recent" + }, + "retweet_count": 0, + "in_reply_to_status_id_str": null, + "id": 249289491129438208, + "geo": null, + "retweeted": false, + "in_reply_to_user_id": null, + "place": null, + "user": { + "profile_sidebar_fill_color": "99CC33", + "profile_sidebar_border_color": "829D5E", + "profile_background_tile": false, + "name": "Thomas John Wakeman", + "profile_image_url": "http://a0.twimg.com/profile_images/2219333930/Froggystyle_normal.png", + "created_at": "Tue Sep 01 21:21:35 +0000 2009", + "location": "Kingston New York", + "follow_request_sent": null, + "profile_link_color": "D02B55", + "is_translator": false, + "id_str": "70789458", + "entities": { + "url": { + "urls": [ + { + "expanded_url": null, + "url": "", + "indices": [ + 0, + 0 + ] + } + ] + }, + "description": { + "urls": [ + + ] + } + }, + "default_profile": false, + "contributors_enabled": false, + "favourites_count": 19, + "url": null, + "profile_image_url_https": "https://si0.twimg.com/profile_images/2219333930/Froggystyle_normal.png", + "utc_offset": -18000, + "id": 70789458, + "profile_use_background_image": true, + "listed_count": 1, + "profile_text_color": "3E4415", + "lang": "en", + "followers_count": 63, + "protected": false, + "notifications": null, + "profile_background_image_url_https": "https://si0.twimg.com/images/themes/theme5/bg.gif", + "profile_background_color": "352726", + "verified": false, + "geo_enabled": false, + "time_zone": "Eastern Time (US & Canada)", + "description": "Science Fiction Writer, sort of. Likes Superheroes, Mole People, Alt. Timelines.", + "default_profile_image": false, + "profile_background_image_url": "http://a0.twimg.com/images/themes/theme5/bg.gif", + "statuses_count": 1048, + "friends_count": 63, + "following": null, + "show_all_inline_media": false, + "screen_name": "MonkiesFist" + }, + "in_reply_to_screen_name": null, + "source": "web", + "in_reply_to_status_id": null + }, + { + "coordinates": null, + "favorited": false, + "truncated": false, + "created_at": "Fri Sep 21 22:51:18 +0000 2012", + "id_str": "249279667666817024", + "entities": { + "urls": [ + + ], + "hashtags": [ + { + "text": "freebandnames", + "indices": [ + 20, + 34 + ] + } + ], + "user_mentions": [ + + ] + }, + "in_reply_to_user_id_str": null, + "contributors": null, + "text": "The Foolish Mortals #freebandnames", + "metadata": { + "iso_language_code": "en", + "result_type": "recent" + }, + "retweet_count": 0, + "in_reply_to_status_id_str": null, + "id": 249279667666817024, + "geo": null, + "retweeted": false, + "in_reply_to_user_id": null, + "place": null, + "user": { + "profile_sidebar_fill_color": "BFAC83", + "profile_sidebar_border_color": "615A44", + "profile_background_tile": true, + "name": "Marty Elmer", + "profile_image_url": "http://a0.twimg.com/profile_images/1629790393/shrinker_2000_trans_normal.png", + "created_at": "Mon May 04 00:05:00 +0000 2009", + "location": "Wisconsin, USA", + "follow_request_sent": null, + "profile_link_color": "3B2A26", + "is_translator": false, + "id_str": "37539828", + "entities": { + "url": { + "urls": [ + { + "expanded_url": null, + "url": "http://www.omnitarian.me", + "indices": [ + 0, + 24 + ] + } + ] + }, + "description": { + "urls": [ + + ] + } + }, + "default_profile": false, + "contributors_enabled": false, + "favourites_count": 647, + "url": "http://www.omnitarian.me", + "profile_image_url_https": "https://si0.twimg.com/profile_images/1629790393/shrinker_2000_trans_normal.png", + "utc_offset": -21600, + "id": 37539828, + "profile_use_background_image": true, + "listed_count": 52, + "profile_text_color": "000000", + "lang": "en", + "followers_count": 608, + "protected": false, + "notifications": null, + "profile_background_image_url_https": "https://si0.twimg.com/profile_background_images/106455659/rect6056-9.png", + "profile_background_color": "EEE3C4", + "verified": false, + "geo_enabled": false, + "time_zone": "Central Time (US & Canada)", + "description": "Cartoonist, Illustrator, and T-Shirt connoisseur", + "default_profile_image": false, + "profile_background_image_url": "http://a0.twimg.com/profile_background_images/106455659/rect6056-9.png", + "statuses_count": 3575, + "friends_count": 249, + "following": null, + "show_all_inline_media": true, + "screen_name": "Omnitarian" + }, + "in_reply_to_screen_name": null, + "source": "Twitter for iPhone", + "in_reply_to_status_id": null + } + ], + "search_metadata": { + "max_id": 250126199840518145, + "since_id": 24012619984051000, + "refresh_url": "?since_id=250126199840518145&q=%23freebandnames&result_type=mixed&include_entities=1", + "next_results": "?max_id=249279667666817023&q=%23freebandnames&count=4&include_entities=1&result_type=mixed", + "count": 4, + "completed_in": 0.035, + "since_id_str": "24012619984051000", + "query": "%23freebandnames", + "max_id_str": "250126199840518145" + } +} diff --git a/src/vendor/github.com/mailru/easyjson/benchmark/ffjson_test.go b/src/vendor/github.com/mailru/easyjson/benchmark/ffjson_test.go new file mode 100644 index 00000000..03671827 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/benchmark/ffjson_test.go @@ -0,0 +1,190 @@ +// +build use_ffjson + +package benchmark + +import ( + "testing" + + "github.com/pquerna/ffjson/ffjson" +) + +func BenchmarkFF_Unmarshal_M(b *testing.B) { + b.SetBytes(int64(len(largeStructText))) + for i := 0; i < b.N; i++ { + var s LargeStruct + err := ffjson.UnmarshalFast(largeStructText, &s) + if err != nil { + b.Error(err) + } + } +} + +func BenchmarkFF_Unmarshal_S(b *testing.B) { + for i := 0; i < b.N; i++ { + var s Entities + err := ffjson.UnmarshalFast(smallStructText, &s) + if err != nil { + b.Error(err) + } + } + b.SetBytes(int64(len(smallStructText))) +} + +func BenchmarkFF_Marshal_M(b *testing.B) { + var l int64 + for i := 0; i < b.N; i++ { + data, err := ffjson.MarshalFast(&largeStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + } + b.SetBytes(l) +} + +func BenchmarkFF_Marshal_S(b *testing.B) { + var l int64 + for i := 0; i < b.N; i++ { + data, err := ffjson.MarshalFast(&smallStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + } + b.SetBytes(l) +} + +func BenchmarkFF_Marshal_M_Pool(b *testing.B) { + var l int64 + for i := 0; i < b.N; i++ { + data, err := ffjson.MarshalFast(&largeStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + ffjson.Pool(data) + } + b.SetBytes(l) +} + +func BenchmarkFF_Marshal_L(b *testing.B) { + var l int64 + for i := 0; i < b.N; i++ { + data, err := ffjson.MarshalFast(&xlStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + } + b.SetBytes(l) +} + +func BenchmarkFF_Marshal_L_Pool(b *testing.B) { + var l int64 + for i := 0; i < b.N; i++ { + data, err := ffjson.MarshalFast(&xlStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + ffjson.Pool(data) + } + b.SetBytes(l) +} + +func BenchmarkFF_Marshal_L_Pool_Parallel(b *testing.B) { + var l int64 + for i := 0; i < b.N; i++ { + data, err := ffjson.MarshalFast(&xlStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + ffjson.Pool(data) + } + b.SetBytes(l) +} +func BenchmarkFF_Marshal_M_Pool_Parallel(b *testing.B) { + var l int64 + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + data, err := ffjson.MarshalFast(&largeStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + ffjson.Pool(data) + } + }) + b.SetBytes(l) +} + +func BenchmarkFF_Marshal_S_Pool(b *testing.B) { + var l int64 + for i := 0; i < b.N; i++ { + data, err := ffjson.MarshalFast(&smallStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + ffjson.Pool(data) + } + b.SetBytes(l) +} + +func BenchmarkFF_Marshal_S_Pool_Parallel(b *testing.B) { + var l int64 + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + data, err := ffjson.MarshalFast(&smallStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + ffjson.Pool(data) + } + }) + b.SetBytes(l) +} + +func BenchmarkFF_Marshal_S_Parallel(b *testing.B) { + var l int64 + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + data, err := ffjson.MarshalFast(&smallStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + } + }) + b.SetBytes(l) +} + +func BenchmarkFF_Marshal_M_Parallel(b *testing.B) { + var l int64 + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + data, err := ffjson.MarshalFast(&largeStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + } + }) + b.SetBytes(l) +} + +func BenchmarkFF_Marshal_L_Parallel(b *testing.B) { + var l int64 + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + data, err := ffjson.MarshalFast(&xlStructData) + if err != nil { + b.Error(err) + } + l = int64(len(data)) + } + }) + b.SetBytes(l) +} diff --git a/src/vendor/github.com/mailru/easyjson/benchmark/ujson.sh b/src/vendor/github.com/mailru/easyjson/benchmark/ujson.sh new file mode 100755 index 00000000..378e7df4 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/benchmark/ujson.sh @@ -0,0 +1,7 @@ +#/bin/bash + +echo -n "Python ujson module, DECODE: " +python -m timeit -s "import ujson; data = open('`dirname $0`/example.json', 'r').read()" 'ujson.loads(data)' + +echo -n "Python ujson module, ENCODE: " +python -m timeit -s "import ujson; data = open('`dirname $0`/example.json', 'r').read(); obj = ujson.loads(data)" 'ujson.dumps(obj)' diff --git a/src/vendor/github.com/mailru/easyjson/bootstrap/bootstrap.go b/src/vendor/github.com/mailru/easyjson/bootstrap/bootstrap.go new file mode 100644 index 00000000..03225117 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/bootstrap/bootstrap.go @@ -0,0 +1,184 @@ +// Package bootstrap implements the bootstrapping logic: generation of a .go file to +// launch the actual generator and launching the generator itself. +// +// The package may be preferred to a command-line utility if generating the serializers +// from golang code is required. +package bootstrap + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sort" +) + +const genPackage = "github.com/mailru/easyjson/gen" +const pkgWriter = "github.com/mailru/easyjson/jwriter" +const pkgLexer = "github.com/mailru/easyjson/jlexer" + +type Generator struct { + PkgPath, PkgName string + Types []string + + NoStdMarshalers bool + SnakeCase bool + OmitEmpty bool + + OutName string + BuildTags string + + StubsOnly bool + LeaveTemps bool + NoFormat bool +} + +// writeStub outputs an initial stubs for marshalers/unmarshalers so that the package +// using marshalers/unmarshales compiles correctly for boostrapping code. +func (g *Generator) writeStub() error { + f, err := os.Create(g.OutName) + if err != nil { + return err + } + defer f.Close() + + if g.BuildTags != "" { + fmt.Fprintln(f, "// +build ", g.BuildTags) + fmt.Fprintln(f) + } + fmt.Fprintln(f, "// TEMPORARY AUTOGENERATED FILE: easyjson stub code to make the package") + fmt.Fprintln(f, "// compilable during generation.") + fmt.Fprintln(f) + fmt.Fprintln(f, "package ", g.PkgName) + + if len(g.Types) > 0 { + fmt.Fprintln(f) + fmt.Fprintln(f, "import (") + fmt.Fprintln(f, ` "`+pkgWriter+`"`) + fmt.Fprintln(f, ` "`+pkgLexer+`"`) + fmt.Fprintln(f, ")") + } + + sort.Strings(g.Types) + for _, t := range g.Types { + fmt.Fprintln(f) + if !g.NoStdMarshalers { + fmt.Fprintln(f, "func (", t, ") MarshalJSON() ([]byte, error) { return nil, nil }") + fmt.Fprintln(f, "func (*", t, ") UnmarshalJSON([]byte) error { return nil }") + } + + fmt.Fprintln(f, "func (", t, ") MarshalEasyJSON(w *jwriter.Writer) {}") + fmt.Fprintln(f, "func (*", t, ") UnmarshalEasyJSON(l *jlexer.Lexer) {}") + fmt.Fprintln(f) + fmt.Fprintln(f, "type EasyJSON_exporter_"+t+" *"+t) + } + return nil +} + +// writeMain creates a .go file that launches the generator if 'go run'. +func (g *Generator) writeMain() (path string, err error) { + f, err := ioutil.TempFile(filepath.Dir(g.OutName), "easyjson-bootstrap") + if err != nil { + return "", err + } + + fmt.Fprintln(f, "// +build ignore") + fmt.Fprintln(f) + fmt.Fprintln(f, "// TEMPORARY AUTOGENERATED FILE: easyjson bootstapping code to launch") + fmt.Fprintln(f, "// the actual generator.") + fmt.Fprintln(f) + fmt.Fprintln(f, "package main") + fmt.Fprintln(f) + fmt.Fprintln(f, "import (") + fmt.Fprintln(f, ` "fmt"`) + fmt.Fprintln(f, ` "os"`) + fmt.Fprintln(f) + fmt.Fprintf(f, " %q\n", genPackage) + if len(g.Types) > 0 { + fmt.Fprintln(f) + fmt.Fprintf(f, " pkg %q\n", g.PkgPath) + } + fmt.Fprintln(f, ")") + fmt.Fprintln(f) + fmt.Fprintln(f, "func main() {") + fmt.Fprintf(f, " g := gen.NewGenerator(%q)\n", filepath.Base(g.OutName)) + fmt.Fprintf(f, " g.SetPkg(%q, %q)\n", g.PkgName, g.PkgPath) + if g.BuildTags != "" { + fmt.Fprintf(f, " g.SetBuildTags(%q)\n", g.BuildTags) + } + if g.SnakeCase { + fmt.Fprintln(f, " g.UseSnakeCase()") + } + if g.OmitEmpty { + fmt.Fprintln(f, " g.OmitEmpty()") + } + if g.NoStdMarshalers { + fmt.Fprintln(f, " g.NoStdMarshalers()") + } + + sort.Strings(g.Types) + for _, v := range g.Types { + fmt.Fprintln(f, " g.Add(pkg.EasyJSON_exporter_"+v+"(nil))") + } + + fmt.Fprintln(f, " if err := g.Run(os.Stdout); err != nil {") + fmt.Fprintln(f, " fmt.Fprintln(os.Stderr, err)") + fmt.Fprintln(f, " os.Exit(1)") + fmt.Fprintln(f, " }") + fmt.Fprintln(f, "}") + + src := f.Name() + if err := f.Close(); err != nil { + return src, err + } + + dest := src + ".go" + return dest, os.Rename(src, dest) +} + +func (g *Generator) Run() error { + if err := g.writeStub(); err != nil { + return err + } + if g.StubsOnly { + return nil + } + + path, err := g.writeMain() + if err != nil { + return err + } + if !g.LeaveTemps { + defer os.Remove(path) + } + + f, err := os.Create(g.OutName + ".tmp") + if err != nil { + return err + } + if !g.LeaveTemps { + defer os.Remove(f.Name()) // will not remove after rename + } + + cmd := exec.Command("go", "run", "-tags", g.BuildTags, path) + cmd.Stdout = f + cmd.Stderr = os.Stderr + if err = cmd.Run(); err != nil { + return err + } + + f.Close() + + if !g.NoFormat { + cmd = exec.Command("gofmt", "-w", f.Name()) + cmd.Stderr = os.Stderr + cmd.Stdout = os.Stdout + + if err = cmd.Run(); err != nil { + return err + } + } + + return os.Rename(f.Name(), g.OutName) +} diff --git a/src/vendor/github.com/mailru/easyjson/buffer/pool.go b/src/vendor/github.com/mailru/easyjson/buffer/pool.go new file mode 100644 index 00000000..07fb4bc1 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/buffer/pool.go @@ -0,0 +1,270 @@ +// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to +// reduce copying and to allow reuse of individual chunks. +package buffer + +import ( + "io" + "sync" +) + +// PoolConfig contains configuration for the allocation and reuse strategy. +type PoolConfig struct { + StartSize int // Minimum chunk size that is allocated. + PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead. + MaxSize int // Maximum chunk size that will be allocated. +} + +var config = PoolConfig{ + StartSize: 128, + PooledSize: 512, + MaxSize: 32768, +} + +// Reuse pool: chunk size -> pool. +var buffers = map[int]*sync.Pool{} + +func initBuffers() { + for l := config.PooledSize; l <= config.MaxSize; l *= 2 { + buffers[l] = new(sync.Pool) + } +} + +func init() { + initBuffers() +} + +// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done. +func Init(cfg PoolConfig) { + config = cfg + initBuffers() +} + +// putBuf puts a chunk to reuse pool if it can be reused. +func putBuf(buf []byte) { + size := cap(buf) + if size < config.PooledSize { + return + } + if c := buffers[size]; c != nil { + c.Put(buf[:0]) + } +} + +// getBuf gets a chunk from reuse pool or creates a new one if reuse failed. +func getBuf(size int) []byte { + if size < config.PooledSize { + return make([]byte, 0, size) + } + + if c := buffers[size]; c != nil { + v := c.Get() + if v != nil { + return v.([]byte) + } + } + return make([]byte, 0, size) +} + +// Buffer is a buffer optimized for serialization without extra copying. +type Buffer struct { + + // Buf is the current chunk that can be used for serialization. + Buf []byte + + toPool []byte + bufs [][]byte +} + +// EnsureSpace makes sure that the current chunk contains at least s free bytes, +// possibly creating a new chunk. +func (b *Buffer) EnsureSpace(s int) { + if cap(b.Buf)-len(b.Buf) >= s { + return + } + l := len(b.Buf) + if l > 0 { + if cap(b.toPool) != cap(b.Buf) { + // Chunk was reallocated, toPool can be pooled. + putBuf(b.toPool) + } + if cap(b.bufs) == 0 { + b.bufs = make([][]byte, 0, 8) + } + b.bufs = append(b.bufs, b.Buf) + l = cap(b.toPool) * 2 + } else { + l = config.StartSize + } + + if l > config.MaxSize { + l = config.MaxSize + } + b.Buf = getBuf(l) + b.toPool = b.Buf +} + +// AppendByte appends a single byte to buffer. +func (b *Buffer) AppendByte(data byte) { + if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. + b.EnsureSpace(1) + } + b.Buf = append(b.Buf, data) +} + +// AppendBytes appends a byte slice to buffer. +func (b *Buffer) AppendBytes(data []byte) { + for len(data) > 0 { + if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. + b.EnsureSpace(1) + } + + sz := cap(b.Buf) - len(b.Buf) + if sz > len(data) { + sz = len(data) + } + + b.Buf = append(b.Buf, data[:sz]...) + data = data[sz:] + } +} + +// AppendBytes appends a string to buffer. +func (b *Buffer) AppendString(data string) { + for len(data) > 0 { + if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. + b.EnsureSpace(1) + } + + sz := cap(b.Buf) - len(b.Buf) + if sz > len(data) { + sz = len(data) + } + + b.Buf = append(b.Buf, data[:sz]...) + data = data[sz:] + } +} + +// Size computes the size of a buffer by adding sizes of every chunk. +func (b *Buffer) Size() int { + size := len(b.Buf) + for _, buf := range b.bufs { + size += len(buf) + } + return size +} + +// DumpTo outputs the contents of a buffer to a writer and resets the buffer. +func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { + var n int + for _, buf := range b.bufs { + if err == nil { + n, err = w.Write(buf) + written += n + } + putBuf(buf) + } + + if err == nil { + n, err = w.Write(b.Buf) + written += n + } + putBuf(b.toPool) + + b.bufs = nil + b.Buf = nil + b.toPool = nil + + return +} + +// BuildBytes creates a single byte slice with all the contents of the buffer. Data is +// copied if it does not fit in a single chunk. You can optionally provide one byte +// slice as argument that it will try to reuse. +func (b *Buffer) BuildBytes(reuse ...[]byte) []byte { + if len(b.bufs) == 0 { + ret := b.Buf + b.toPool = nil + b.Buf = nil + return ret + } + + var ret []byte + size := b.Size() + + // If we got a buffer as argument and it is big enought, reuse it. + if len(reuse) == 1 && cap(reuse[0]) >= size { + ret = reuse[0][:0] + } else { + ret = make([]byte, 0, size) + } + for _, buf := range b.bufs { + ret = append(ret, buf...) + putBuf(buf) + } + + ret = append(ret, b.Buf...) + putBuf(b.toPool) + + b.bufs = nil + b.toPool = nil + b.Buf = nil + + return ret +} + +type readCloser struct { + offset int + bufs [][]byte +} + +func (r *readCloser) Read(p []byte) (n int, err error) { + for _, buf := range r.bufs { + // Copy as much as we can. + x := copy(p[n:], buf[r.offset:]) + n += x // Increment how much we filled. + + // Did we empty the whole buffer? + if r.offset+x == len(buf) { + // On to the next buffer. + r.offset = 0 + r.bufs = r.bufs[1:] + + // We can release this buffer. + putBuf(buf) + } else { + r.offset += x + } + + if n == len(p) { + break + } + } + // No buffers left or nothing read? + if len(r.bufs) == 0 { + err = io.EOF + } + return +} + +func (r *readCloser) Close() error { + // Release all remaining buffers. + for _, buf := range r.bufs { + putBuf(buf) + } + // In case Close gets called multiple times. + r.bufs = nil + + return nil +} + +// ReadCloser creates an io.ReadCloser with all the contents of the buffer. +func (b *Buffer) ReadCloser() io.ReadCloser { + ret := &readCloser{0, append(b.bufs, b.Buf)} + + b.bufs = nil + b.toPool = nil + b.Buf = nil + + return ret +} diff --git a/src/vendor/github.com/mailru/easyjson/buffer/pool_test.go b/src/vendor/github.com/mailru/easyjson/buffer/pool_test.go new file mode 100644 index 00000000..680623ac --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/buffer/pool_test.go @@ -0,0 +1,107 @@ +package buffer + +import ( + "bytes" + "testing" +) + +func TestAppendByte(t *testing.T) { + var b Buffer + var want []byte + + for i := 0; i < 1000; i++ { + b.AppendByte(1) + b.AppendByte(2) + want = append(want, 1, 2) + } + + got := b.BuildBytes() + if !bytes.Equal(got, want) { + t.Errorf("BuildBytes() = %v; want %v", got, want) + } +} + +func TestAppendBytes(t *testing.T) { + var b Buffer + var want []byte + + for i := 0; i < 1000; i++ { + b.AppendBytes([]byte{1, 2}) + want = append(want, 1, 2) + } + + got := b.BuildBytes() + if !bytes.Equal(got, want) { + t.Errorf("BuildBytes() = %v; want %v", got, want) + } +} + +func TestAppendString(t *testing.T) { + var b Buffer + var want []byte + + s := "test" + for i := 0; i < 1000; i++ { + b.AppendBytes([]byte(s)) + want = append(want, s...) + } + + got := b.BuildBytes() + if !bytes.Equal(got, want) { + t.Errorf("BuildBytes() = %v; want %v", got, want) + } +} + +func TestDumpTo(t *testing.T) { + var b Buffer + var want []byte + + s := "test" + for i := 0; i < 1000; i++ { + b.AppendBytes([]byte(s)) + want = append(want, s...) + } + + out := &bytes.Buffer{} + n, err := b.DumpTo(out) + if err != nil { + t.Errorf("DumpTo() error: %v", err) + } + + got := out.Bytes() + if !bytes.Equal(got, want) { + t.Errorf("DumpTo(): got %v; want %v", got, want) + } + + if n != len(want) { + t.Errorf("DumpTo() = %v; want %v", n, len(want)) + } +} + +func TestReadCloser(t *testing.T) { + var b Buffer + var want []byte + + s := "test" + for i := 0; i < 1000; i++ { + b.AppendBytes([]byte(s)) + want = append(want, s...) + } + + out := &bytes.Buffer{} + rc := b.ReadCloser() + n, err := out.ReadFrom(rc) + if err != nil { + t.Errorf("ReadCloser() error: %v", err) + } + rc.Close() // Will always return nil + + got := out.Bytes() + if !bytes.Equal(got, want) { + t.Errorf("DumpTo(): got %v; want %v", got, want) + } + + if n != int64(len(want)) { + t.Errorf("DumpTo() = %v; want %v", n, len(want)) + } +} diff --git a/src/vendor/github.com/mailru/easyjson/easyjson/main.go b/src/vendor/github.com/mailru/easyjson/easyjson/main.go new file mode 100644 index 00000000..62879267 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/easyjson/main.go @@ -0,0 +1,99 @@ +package main + +import ( + "errors" + "flag" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/mailru/easyjson/bootstrap" + // Reference the gen package to be friendly to vendoring tools, + // as it is an indirect dependency. + // (The temporary bootstrapping code uses it.) + _ "github.com/mailru/easyjson/gen" + "github.com/mailru/easyjson/parser" +) + +var buildTags = flag.String("build_tags", "", "build tags to add to generated file") +var snakeCase = flag.Bool("snake_case", false, "use snake_case names instead of CamelCase by default") +var noStdMarshalers = flag.Bool("no_std_marshalers", false, "don't generate MarshalJSON/UnmarshalJSON funcs") +var omitEmpty = flag.Bool("omit_empty", false, "omit empty fields by default") +var allStructs = flag.Bool("all", false, "generate marshaler/unmarshalers for all structs in a file") +var leaveTemps = flag.Bool("leave_temps", false, "do not delete temporary files") +var stubs = flag.Bool("stubs", false, "only generate stubs for marshaler/unmarshaler funcs") +var noformat = flag.Bool("noformat", false, "do not run 'gofmt -w' on output file") +var specifiedName = flag.String("output_filename", "", "specify the filename of the output") +var processPkg = flag.Bool("pkg", false, "process the whole package instead of just the given file") + +func generate(fname string) (err error) { + fInfo, err := os.Stat(fname) + if err != nil { + return err + } + + p := parser.Parser{AllStructs: *allStructs} + if err := p.Parse(fname, fInfo.IsDir()); err != nil { + return fmt.Errorf("Error parsing %v: %v", fname, err) + } + + var outName string + if fInfo.IsDir() { + outName = filepath.Join(fname, p.PkgName+"_easyjson.go") + } else { + if s := strings.TrimSuffix(fname, ".go"); s == fname { + return errors.New("Filename must end in '.go'") + } else { + outName = s + "_easyjson.go" + } + } + + if *specifiedName != "" { + outName = *specifiedName + } + + g := bootstrap.Generator{ + BuildTags: *buildTags, + PkgPath: p.PkgPath, + PkgName: p.PkgName, + Types: p.StructNames, + SnakeCase: *snakeCase, + NoStdMarshalers: *noStdMarshalers, + OmitEmpty: *omitEmpty, + LeaveTemps: *leaveTemps, + OutName: outName, + StubsOnly: *stubs, + NoFormat: *noformat, + } + + if err := g.Run(); err != nil { + return fmt.Errorf("Bootstrap failed: %v", err) + } + return nil +} + +func main() { + flag.Parse() + + files := flag.Args() + + gofile := os.Getenv("GOFILE") + if *processPkg { + gofile = filepath.Dir(gofile) + } + + if len(files) == 0 && gofile != "" { + files = []string{gofile} + } else if len(files) == 0 { + flag.Usage() + os.Exit(1) + } + + for _, fname := range files { + if err := generate(fname); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + } +} diff --git a/src/vendor/github.com/mailru/easyjson/gen/decoder.go b/src/vendor/github.com/mailru/easyjson/gen/decoder.go new file mode 100644 index 00000000..80f8d2cc --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/gen/decoder.go @@ -0,0 +1,471 @@ +package gen + +import ( + "encoding" + "encoding/json" + "fmt" + "reflect" + "strings" + "unicode" + + "github.com/mailru/easyjson" +) + +// Target this byte size for initial slice allocation to reduce garbage collection. +const minSliceBytes = 64 + +func (g *Generator) getDecoderName(t reflect.Type) string { + return g.functionName("decode", t) +} + +var primitiveDecoders = map[reflect.Kind]string{ + reflect.String: "in.String()", + reflect.Bool: "in.Bool()", + reflect.Int: "in.Int()", + reflect.Int8: "in.Int8()", + reflect.Int16: "in.Int16()", + reflect.Int32: "in.Int32()", + reflect.Int64: "in.Int64()", + reflect.Uint: "in.Uint()", + reflect.Uint8: "in.Uint8()", + reflect.Uint16: "in.Uint16()", + reflect.Uint32: "in.Uint32()", + reflect.Uint64: "in.Uint64()", + reflect.Float32: "in.Float32()", + reflect.Float64: "in.Float64()", +} + +var primitiveStringDecoders = map[reflect.Kind]string{ + reflect.Int: "in.IntStr()", + reflect.Int8: "in.Int8Str()", + reflect.Int16: "in.Int16Str()", + reflect.Int32: "in.Int32Str()", + reflect.Int64: "in.Int64Str()", + reflect.Uint: "in.UintStr()", + reflect.Uint8: "in.Uint8Str()", + reflect.Uint16: "in.Uint16Str()", + reflect.Uint32: "in.Uint32Str()", + reflect.Uint64: "in.Uint64Str()", +} + +// genTypeDecoder generates decoding code for the type t, but uses unmarshaler interface if implemented by t. +func (g *Generator) genTypeDecoder(t reflect.Type, out string, tags fieldTags, indent int) error { + ws := strings.Repeat(" ", indent) + + unmarshalerIface := reflect.TypeOf((*easyjson.Unmarshaler)(nil)).Elem() + if reflect.PtrTo(t).Implements(unmarshalerIface) { + fmt.Fprintln(g.out, ws+"("+out+").UnmarshalEasyJSON(in)") + return nil + } + + unmarshalerIface = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if reflect.PtrTo(t).Implements(unmarshalerIface) { + fmt.Fprintln(g.out, ws+"if data := in.Raw(); in.Ok() {") + fmt.Fprintln(g.out, ws+" in.AddError( ("+out+").UnmarshalJSON(data) )") + fmt.Fprintln(g.out, ws+"}") + return nil + } + + unmarshalerIface = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + if reflect.PtrTo(t).Implements(unmarshalerIface) { + fmt.Fprintln(g.out, ws+"if data := in.UnsafeBytes(); in.Ok() {") + fmt.Fprintln(g.out, ws+" in.AddError( ("+out+").UnmarshalText(data) )") + fmt.Fprintln(g.out, ws+"}") + return nil + } + + err := g.genTypeDecoderNoCheck(t, out, tags, indent) + return err +} + +// genTypeDecoderNoCheck generates decoding code for the type t. +func (g *Generator) genTypeDecoderNoCheck(t reflect.Type, out string, tags fieldTags, indent int) error { + ws := strings.Repeat(" ", indent) + // Check whether type is primitive, needs to be done after interface check. + if dec := primitiveStringDecoders[t.Kind()]; dec != "" && tags.asString { + fmt.Fprintln(g.out, ws+out+" = "+g.getType(t)+"("+dec+")") + return nil + } else if dec := primitiveDecoders[t.Kind()]; dec != "" { + fmt.Fprintln(g.out, ws+out+" = "+g.getType(t)+"("+dec+")") + return nil + } + + switch t.Kind() { + case reflect.Slice: + tmpVar := g.uniqueVarName() + elem := t.Elem() + + if elem.Kind() == reflect.Uint8 { + fmt.Fprintln(g.out, ws+"if in.IsNull() {") + fmt.Fprintln(g.out, ws+" in.Skip()") + fmt.Fprintln(g.out, ws+" "+out+" = nil") + fmt.Fprintln(g.out, ws+"} else {") + fmt.Fprintln(g.out, ws+" "+out+" = in.Bytes()") + fmt.Fprintln(g.out, ws+"}") + + } else { + + capacity := minSliceBytes / elem.Size() + if capacity == 0 { + capacity = 1 + } + + fmt.Fprintln(g.out, ws+"if in.IsNull() {") + fmt.Fprintln(g.out, ws+" in.Skip()") + fmt.Fprintln(g.out, ws+" "+out+" = nil") + fmt.Fprintln(g.out, ws+"} else {") + fmt.Fprintln(g.out, ws+" in.Delim('[')") + fmt.Fprintln(g.out, ws+" if "+out+" == nil {") + fmt.Fprintln(g.out, ws+" if !in.IsDelim(']') {") + fmt.Fprintln(g.out, ws+" "+out+" = make("+g.getType(t)+", 0, "+fmt.Sprint(capacity)+")") + fmt.Fprintln(g.out, ws+" } else {") + fmt.Fprintln(g.out, ws+" "+out+" = "+g.getType(t)+"{}") + fmt.Fprintln(g.out, ws+" }") + fmt.Fprintln(g.out, ws+" } else { ") + fmt.Fprintln(g.out, ws+" "+out+" = ("+out+")[:0]") + fmt.Fprintln(g.out, ws+" }") + fmt.Fprintln(g.out, ws+" for !in.IsDelim(']') {") + fmt.Fprintln(g.out, ws+" var "+tmpVar+" "+g.getType(elem)) + + g.genTypeDecoder(elem, tmpVar, tags, indent+2) + + fmt.Fprintln(g.out, ws+" "+out+" = append("+out+", "+tmpVar+")") + fmt.Fprintln(g.out, ws+" in.WantComma()") + fmt.Fprintln(g.out, ws+" }") + fmt.Fprintln(g.out, ws+" in.Delim(']')") + fmt.Fprintln(g.out, ws+"}") + } + + case reflect.Array: + iterVar := g.uniqueVarName() + elem := t.Elem() + + if elem.Kind() == reflect.Uint8 { + fmt.Fprintln(g.out, ws+"if in.IsNull() {") + fmt.Fprintln(g.out, ws+" in.Skip()") + fmt.Fprintln(g.out, ws+"} else {") + fmt.Fprintln(g.out, ws+" copy("+out+"[:], in.Bytes())") + fmt.Fprintln(g.out, ws+"}") + + } else { + + length := t.Len() + + fmt.Fprintln(g.out, ws+"if in.IsNull() {") + fmt.Fprintln(g.out, ws+" in.Skip()") + fmt.Fprintln(g.out, ws+"} else {") + fmt.Fprintln(g.out, ws+" in.Delim('[')") + fmt.Fprintln(g.out, ws+" "+iterVar+" := 0") + fmt.Fprintln(g.out, ws+" for !in.IsDelim(']') {") + fmt.Fprintln(g.out, ws+" if "+iterVar+" < "+fmt.Sprint(length)+" {") + + g.genTypeDecoder(elem, out+"["+iterVar+"]", tags, indent+3) + + fmt.Fprintln(g.out, ws+" "+iterVar+"++") + fmt.Fprintln(g.out, ws+" } else {") + fmt.Fprintln(g.out, ws+" in.SkipRecursive()") + fmt.Fprintln(g.out, ws+" }") + fmt.Fprintln(g.out, ws+" in.WantComma()") + fmt.Fprintln(g.out, ws+" }") + fmt.Fprintln(g.out, ws+" in.Delim(']')") + fmt.Fprintln(g.out, ws+"}") + } + + case reflect.Struct: + dec := g.getDecoderName(t) + g.addType(t) + + fmt.Fprintln(g.out, ws+dec+"(in, &"+out+")") + + case reflect.Ptr: + fmt.Fprintln(g.out, ws+"if in.IsNull() {") + fmt.Fprintln(g.out, ws+" in.Skip()") + fmt.Fprintln(g.out, ws+" "+out+" = nil") + fmt.Fprintln(g.out, ws+"} else {") + fmt.Fprintln(g.out, ws+" if "+out+" == nil {") + fmt.Fprintln(g.out, ws+" "+out+" = new("+g.getType(t.Elem())+")") + fmt.Fprintln(g.out, ws+" }") + + g.genTypeDecoder(t.Elem(), "*"+out, tags, indent+1) + + fmt.Fprintln(g.out, ws+"}") + + case reflect.Map: + key := t.Key() + if key.Kind() != reflect.String { + return fmt.Errorf("map type %v not supported: only string keys are allowed", key) + } + elem := t.Elem() + tmpVar := g.uniqueVarName() + + fmt.Fprintln(g.out, ws+"if in.IsNull() {") + fmt.Fprintln(g.out, ws+" in.Skip()") + fmt.Fprintln(g.out, ws+"} else {") + fmt.Fprintln(g.out, ws+" in.Delim('{')") + fmt.Fprintln(g.out, ws+" if !in.IsDelim('}') {") + fmt.Fprintln(g.out, ws+" "+out+" = make("+g.getType(t)+")") + fmt.Fprintln(g.out, ws+" } else {") + fmt.Fprintln(g.out, ws+" "+out+" = nil") + fmt.Fprintln(g.out, ws+" }") + + fmt.Fprintln(g.out, ws+" for !in.IsDelim('}') {") + fmt.Fprintln(g.out, ws+" key := "+g.getType(t.Key())+"(in.String())") + fmt.Fprintln(g.out, ws+" in.WantColon()") + fmt.Fprintln(g.out, ws+" var "+tmpVar+" "+g.getType(elem)) + + g.genTypeDecoder(elem, tmpVar, tags, indent+2) + + fmt.Fprintln(g.out, ws+" ("+out+")[key] = "+tmpVar) + fmt.Fprintln(g.out, ws+" in.WantComma()") + fmt.Fprintln(g.out, ws+" }") + fmt.Fprintln(g.out, ws+" in.Delim('}')") + fmt.Fprintln(g.out, ws+"}") + + case reflect.Interface: + if t.NumMethod() != 0 { + return fmt.Errorf("interface type %v not supported: only interface{} is allowed", t) + } + fmt.Fprintln(g.out, ws+"if m, ok := "+out+".(easyjson.Unmarshaler); ok {") + fmt.Fprintln(g.out, ws+"m.UnmarshalEasyJSON(in)") + fmt.Fprintln(g.out, ws+"} else if m, ok := "+out+".(json.Unmarshaler); ok {") + fmt.Fprintln(g.out, ws+"m.UnmarshalJSON(in.Raw())") + fmt.Fprintln(g.out, ws+"} else {") + fmt.Fprintln(g.out, ws+" "+out+" = in.Interface()") + fmt.Fprintln(g.out, ws+"}") + default: + return fmt.Errorf("don't know how to decode %v", t) + } + return nil + +} + +func (g *Generator) genStructFieldDecoder(t reflect.Type, f reflect.StructField) error { + jsonName := g.fieldNamer.GetJSONFieldName(t, f) + tags := parseFieldTags(f) + + if tags.omit { + return nil + } + + fmt.Fprintf(g.out, " case %q:\n", jsonName) + if err := g.genTypeDecoder(f.Type, "out."+f.Name, tags, 3); err != nil { + return err + } + + if tags.required { + fmt.Fprintf(g.out, "%sSet = true\n", f.Name) + } + + return nil +} + +func (g *Generator) genRequiredFieldSet(t reflect.Type, f reflect.StructField) { + tags := parseFieldTags(f) + + if !tags.required { + return + } + + fmt.Fprintf(g.out, "var %sSet bool\n", f.Name) +} + +func (g *Generator) genRequiredFieldCheck(t reflect.Type, f reflect.StructField) { + jsonName := g.fieldNamer.GetJSONFieldName(t, f) + tags := parseFieldTags(f) + + if !tags.required { + return + } + + g.imports["fmt"] = "fmt" + + fmt.Fprintf(g.out, "if !%sSet {\n", f.Name) + fmt.Fprintf(g.out, " in.AddError(fmt.Errorf(\"key '%s' is required\"))\n", jsonName) + fmt.Fprintf(g.out, "}\n") +} + +func mergeStructFields(fields1, fields2 []reflect.StructField) (fields []reflect.StructField) { + used := map[string]bool{} + for _, f := range fields2 { + used[f.Name] = true + fields = append(fields, f) + } + + for _, f := range fields1 { + if !used[f.Name] { + fields = append(fields, f) + } + } + return +} + +func getStructFields(t reflect.Type) ([]reflect.StructField, error) { + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("got %v; expected a struct", t) + } + + var efields []reflect.StructField + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if !f.Anonymous { + continue + } + + t1 := f.Type + if t1.Kind() == reflect.Ptr { + t1 = t1.Elem() + } + + fs, err := getStructFields(t1) + if err != nil { + return nil, fmt.Errorf("error processing embedded field: %v", err) + } + efields = mergeStructFields(efields, fs) + } + + var fields []reflect.StructField + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Anonymous { + continue + } + + c := []rune(f.Name)[0] + if unicode.IsUpper(c) { + fields = append(fields, f) + } + } + return mergeStructFields(efields, fields), nil +} + +func (g *Generator) genDecoder(t reflect.Type) error { + switch t.Kind() { + case reflect.Slice, reflect.Array, reflect.Map: + return g.genSliceArrayDecoder(t) + default: + return g.genStructDecoder(t) + } +} + +func (g *Generator) genSliceArrayDecoder(t reflect.Type) error { + switch t.Kind() { + case reflect.Slice, reflect.Array, reflect.Map: + default: + return fmt.Errorf("cannot generate encoder/decoder for %v, not a slice/array/map type", t) + } + + fname := g.getDecoderName(t) + typ := g.getType(t) + + fmt.Fprintln(g.out, "func "+fname+"(in *jlexer.Lexer, out *"+typ+") {") + fmt.Fprintln(g.out, " isTopLevel := in.IsStart()") + err := g.genTypeDecoderNoCheck(t, "*out", fieldTags{}, 1) + if err != nil { + return err + } + fmt.Fprintln(g.out, " if isTopLevel {") + fmt.Fprintln(g.out, " in.Consumed()") + fmt.Fprintln(g.out, " }") + fmt.Fprintln(g.out, "}") + + return nil +} + +func (g *Generator) genStructDecoder(t reflect.Type) error { + if t.Kind() != reflect.Struct { + return fmt.Errorf("cannot generate encoder/decoder for %v, not a struct type", t) + } + + fname := g.getDecoderName(t) + typ := g.getType(t) + + fmt.Fprintln(g.out, "func "+fname+"(in *jlexer.Lexer, out *"+typ+") {") + fmt.Fprintln(g.out, " isTopLevel := in.IsStart()") + fmt.Fprintln(g.out, " if in.IsNull() {") + fmt.Fprintln(g.out, " if isTopLevel {") + fmt.Fprintln(g.out, " in.Consumed()") + fmt.Fprintln(g.out, " }") + fmt.Fprintln(g.out, " in.Skip()") + fmt.Fprintln(g.out, " return") + fmt.Fprintln(g.out, " }") + + // Init embedded pointer fields. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if !f.Anonymous || f.Type.Kind() != reflect.Ptr { + continue + } + fmt.Fprintln(g.out, " out."+f.Name+" = new("+g.getType(f.Type.Elem())+")") + } + + fs, err := getStructFields(t) + if err != nil { + return fmt.Errorf("cannot generate decoder for %v: %v", t, err) + } + + for _, f := range fs { + g.genRequiredFieldSet(t, f) + } + + fmt.Fprintln(g.out, " in.Delim('{')") + fmt.Fprintln(g.out, " for !in.IsDelim('}') {") + fmt.Fprintln(g.out, " key := in.UnsafeString()") + fmt.Fprintln(g.out, " in.WantColon()") + fmt.Fprintln(g.out, " if in.IsNull() {") + fmt.Fprintln(g.out, " in.Skip()") + fmt.Fprintln(g.out, " in.WantComma()") + fmt.Fprintln(g.out, " continue") + fmt.Fprintln(g.out, " }") + + fmt.Fprintln(g.out, " switch key {") + for _, f := range fs { + if err := g.genStructFieldDecoder(t, f); err != nil { + return err + } + } + + fmt.Fprintln(g.out, " default:") + fmt.Fprintln(g.out, " in.SkipRecursive()") + fmt.Fprintln(g.out, " }") + fmt.Fprintln(g.out, " in.WantComma()") + fmt.Fprintln(g.out, " }") + fmt.Fprintln(g.out, " in.Delim('}')") + fmt.Fprintln(g.out, " if isTopLevel {") + fmt.Fprintln(g.out, " in.Consumed()") + fmt.Fprintln(g.out, " }") + + for _, f := range fs { + g.genRequiredFieldCheck(t, f) + } + + fmt.Fprintln(g.out, "}") + + return nil +} + +func (g *Generator) genStructUnmarshaler(t reflect.Type) error { + switch t.Kind() { + case reflect.Slice, reflect.Array, reflect.Map, reflect.Struct: + default: + return fmt.Errorf("cannot generate encoder/decoder for %v, not a struct/slice/array/map type", t) + } + + fname := g.getDecoderName(t) + typ := g.getType(t) + + if !g.noStdMarshalers { + fmt.Fprintln(g.out, "// UnmarshalJSON supports json.Unmarshaler interface") + fmt.Fprintln(g.out, "func (v *"+typ+") UnmarshalJSON(data []byte) error {") + fmt.Fprintln(g.out, " r := jlexer.Lexer{Data: data}") + fmt.Fprintln(g.out, " "+fname+"(&r, v)") + fmt.Fprintln(g.out, " return r.Error()") + fmt.Fprintln(g.out, "}") + } + + fmt.Fprintln(g.out, "// UnmarshalEasyJSON supports easyjson.Unmarshaler interface") + fmt.Fprintln(g.out, "func (v *"+typ+") UnmarshalEasyJSON(l *jlexer.Lexer) {") + fmt.Fprintln(g.out, " "+fname+"(l, v)") + fmt.Fprintln(g.out, "}") + + return nil +} diff --git a/src/vendor/github.com/mailru/easyjson/gen/encoder.go b/src/vendor/github.com/mailru/easyjson/gen/encoder.go new file mode 100644 index 00000000..a54f6e24 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/gen/encoder.go @@ -0,0 +1,358 @@ +package gen + +import ( + "encoding" + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/mailru/easyjson" +) + +func (g *Generator) getEncoderName(t reflect.Type) string { + return g.functionName("encode", t) +} + +var primitiveEncoders = map[reflect.Kind]string{ + reflect.String: "out.String(string(%v))", + reflect.Bool: "out.Bool(bool(%v))", + reflect.Int: "out.Int(int(%v))", + reflect.Int8: "out.Int8(int8(%v))", + reflect.Int16: "out.Int16(int16(%v))", + reflect.Int32: "out.Int32(int32(%v))", + reflect.Int64: "out.Int64(int64(%v))", + reflect.Uint: "out.Uint(uint(%v))", + reflect.Uint8: "out.Uint8(uint8(%v))", + reflect.Uint16: "out.Uint16(uint16(%v))", + reflect.Uint32: "out.Uint32(uint32(%v))", + reflect.Uint64: "out.Uint64(uint64(%v))", + reflect.Float32: "out.Float32(float32(%v))", + reflect.Float64: "out.Float64(float64(%v))", +} + +var primitiveStringEncoders = map[reflect.Kind]string{ + reflect.Int: "out.IntStr(int(%v))", + reflect.Int8: "out.Int8Str(int8(%v))", + reflect.Int16: "out.Int16Str(int16(%v))", + reflect.Int32: "out.Int32Str(int32(%v))", + reflect.Int64: "out.Int64Str(int64(%v))", + reflect.Uint: "out.UintStr(uint(%v))", + reflect.Uint8: "out.Uint8Str(uint8(%v))", + reflect.Uint16: "out.Uint16Str(uint16(%v))", + reflect.Uint32: "out.Uint32Str(uint32(%v))", + reflect.Uint64: "out.Uint64Str(uint64(%v))", +} + +// fieldTags contains parsed version of json struct field tags. +type fieldTags struct { + name string + + omit bool + omitEmpty bool + noOmitEmpty bool + asString bool + required bool +} + +// parseFieldTags parses the json field tag into a structure. +func parseFieldTags(f reflect.StructField) fieldTags { + var ret fieldTags + + for i, s := range strings.Split(f.Tag.Get("json"), ",") { + switch { + case i == 0 && s == "-": + ret.omit = true + case i == 0: + ret.name = s + case s == "omitempty": + ret.omitEmpty = true + case s == "!omitempty": + ret.noOmitEmpty = true + case s == "string": + ret.asString = true + case s == "required": + ret.required = true + } + } + + return ret +} + +// genTypeEncoder generates code that encodes in of type t into the writer, but uses marshaler interface if implemented by t. +func (g *Generator) genTypeEncoder(t reflect.Type, in string, tags fieldTags, indent int) error { + ws := strings.Repeat(" ", indent) + + marshalerIface := reflect.TypeOf((*easyjson.Marshaler)(nil)).Elem() + if reflect.PtrTo(t).Implements(marshalerIface) { + fmt.Fprintln(g.out, ws+"("+in+").MarshalEasyJSON(out)") + return nil + } + + marshalerIface = reflect.TypeOf((*json.Marshaler)(nil)).Elem() + if reflect.PtrTo(t).Implements(marshalerIface) { + fmt.Fprintln(g.out, ws+"out.Raw( ("+in+").MarshalJSON() )") + return nil + } + + marshalerIface = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + if reflect.PtrTo(t).Implements(marshalerIface) { + fmt.Fprintln(g.out, ws+"out.RawText( ("+in+").MarshalText() )") + return nil + } + + err := g.genTypeEncoderNoCheck(t, in, tags, indent) + return err +} + +// genTypeEncoderNoCheck generates code that encodes in of type t into the writer. +func (g *Generator) genTypeEncoderNoCheck(t reflect.Type, in string, tags fieldTags, indent int) error { + ws := strings.Repeat(" ", indent) + + // Check whether type is primitive, needs to be done after interface check. + if enc := primitiveStringEncoders[t.Kind()]; enc != "" && tags.asString { + fmt.Fprintf(g.out, ws+enc+"\n", in) + return nil + } else if enc := primitiveEncoders[t.Kind()]; enc != "" { + fmt.Fprintf(g.out, ws+enc+"\n", in) + return nil + } + + switch t.Kind() { + case reflect.Slice: + elem := t.Elem() + iVar := g.uniqueVarName() + vVar := g.uniqueVarName() + + if t.Elem().Kind() == reflect.Uint8 { + fmt.Fprintln(g.out, ws+"out.Base64Bytes("+in+")") + } else { + fmt.Fprintln(g.out, ws+"if "+in+" == nil && (out.Flags & jwriter.NilSliceAsEmpty) == 0 {") + fmt.Fprintln(g.out, ws+` out.RawString("null")`) + fmt.Fprintln(g.out, ws+"} else {") + fmt.Fprintln(g.out, ws+" out.RawByte('[')") + fmt.Fprintln(g.out, ws+" for "+iVar+", "+vVar+" := range "+in+" {") + fmt.Fprintln(g.out, ws+" if "+iVar+" > 0 {") + fmt.Fprintln(g.out, ws+" out.RawByte(',')") + fmt.Fprintln(g.out, ws+" }") + + g.genTypeEncoder(elem, vVar, tags, indent+2) + + fmt.Fprintln(g.out, ws+" }") + fmt.Fprintln(g.out, ws+" out.RawByte(']')") + fmt.Fprintln(g.out, ws+"}") + } + + case reflect.Array: + elem := t.Elem() + iVar := g.uniqueVarName() + + if t.Elem().Kind() == reflect.Uint8 { + fmt.Fprintln(g.out, ws+"out.Base64Bytes("+in+"[:])") + } else { + fmt.Fprintln(g.out, ws+"out.RawByte('[')") + fmt.Fprintln(g.out, ws+"for "+iVar+" := range "+in+" {") + fmt.Fprintln(g.out, ws+" if "+iVar+" > 0 {") + fmt.Fprintln(g.out, ws+" out.RawByte(',')") + fmt.Fprintln(g.out, ws+" }") + + g.genTypeEncoder(elem, in+"["+iVar+"]", tags, indent+1) + + fmt.Fprintln(g.out, ws+"}") + fmt.Fprintln(g.out, ws+"out.RawByte(']')") + } + + case reflect.Struct: + enc := g.getEncoderName(t) + g.addType(t) + + fmt.Fprintln(g.out, ws+enc+"(out, "+in+")") + + case reflect.Ptr: + fmt.Fprintln(g.out, ws+"if "+in+" == nil {") + fmt.Fprintln(g.out, ws+` out.RawString("null")`) + fmt.Fprintln(g.out, ws+"} else {") + + g.genTypeEncoder(t.Elem(), "*"+in, tags, indent+1) + + fmt.Fprintln(g.out, ws+"}") + + case reflect.Map: + key := t.Key() + if key.Kind() != reflect.String { + return fmt.Errorf("map type %v not supported: only string keys are allowed", key) + } + tmpVar := g.uniqueVarName() + + fmt.Fprintln(g.out, ws+"if "+in+" == nil && (out.Flags & jwriter.NilMapAsEmpty) == 0 {") + fmt.Fprintln(g.out, ws+" out.RawString(`null`)") + fmt.Fprintln(g.out, ws+"} else {") + fmt.Fprintln(g.out, ws+" out.RawByte('{')") + fmt.Fprintln(g.out, ws+" "+tmpVar+"First := true") + fmt.Fprintln(g.out, ws+" for "+tmpVar+"Name, "+tmpVar+"Value := range "+in+" {") + fmt.Fprintln(g.out, ws+" if !"+tmpVar+"First { out.RawByte(',') }") + fmt.Fprintln(g.out, ws+" "+tmpVar+"First = false") + fmt.Fprintln(g.out, ws+" out.String(string("+tmpVar+"Name))") + fmt.Fprintln(g.out, ws+" out.RawByte(':')") + + g.genTypeEncoder(t.Elem(), tmpVar+"Value", tags, indent+2) + + fmt.Fprintln(g.out, ws+" }") + fmt.Fprintln(g.out, ws+" out.RawByte('}')") + fmt.Fprintln(g.out, ws+"}") + + case reflect.Interface: + if t.NumMethod() != 0 { + return fmt.Errorf("interface type %v not supported: only interface{} is allowed", t) + } + fmt.Fprintln(g.out, ws+"if m, ok := "+in+".(easyjson.Marshaler); ok {") + fmt.Fprintln(g.out, ws+" m.MarshalEasyJSON(out)") + fmt.Fprintln(g.out, ws+"} else if m, ok := "+in+".(json.Marshaler); ok {") + fmt.Fprintln(g.out, ws+" out.Raw(m.MarshalJSON())") + fmt.Fprintln(g.out, ws+"} else {") + fmt.Fprintln(g.out, ws+" out.Raw(json.Marshal("+in+"))") + fmt.Fprintln(g.out, ws+"}") + + default: + return fmt.Errorf("don't know how to encode %v", t) + } + return nil +} + +func (g *Generator) notEmptyCheck(t reflect.Type, v string) string { + optionalIface := reflect.TypeOf((*easyjson.Optional)(nil)).Elem() + if reflect.PtrTo(t).Implements(optionalIface) { + return "(" + v + ").IsDefined()" + } + + switch t.Kind() { + case reflect.Slice, reflect.Map: + return "len(" + v + ") != 0" + case reflect.Interface, reflect.Ptr: + return v + " != nil" + case reflect.Bool: + return v + case reflect.String: + return v + ` != ""` + case reflect.Float32, reflect.Float64, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + + return v + " != 0" + + default: + // note: Array types don't have a useful empty value + return "true" + } +} + +func (g *Generator) genStructFieldEncoder(t reflect.Type, f reflect.StructField) error { + jsonName := g.fieldNamer.GetJSONFieldName(t, f) + tags := parseFieldTags(f) + + if tags.omit { + return nil + } + if !tags.omitEmpty && !g.omitEmpty || tags.noOmitEmpty { + fmt.Fprintln(g.out, " if !first { out.RawByte(',') }") + fmt.Fprintln(g.out, " first = false") + fmt.Fprintf(g.out, " out.RawString(%q)\n", strconv.Quote(jsonName)+":") + return g.genTypeEncoder(f.Type, "in."+f.Name, tags, 1) + } + + fmt.Fprintln(g.out, " if", g.notEmptyCheck(f.Type, "in."+f.Name), "{") + fmt.Fprintln(g.out, " if !first { out.RawByte(',') }") + fmt.Fprintln(g.out, " first = false") + + fmt.Fprintf(g.out, " out.RawString(%q)\n", strconv.Quote(jsonName)+":") + if err := g.genTypeEncoder(f.Type, "in."+f.Name, tags, 2); err != nil { + return err + } + fmt.Fprintln(g.out, " }") + return nil +} + +func (g *Generator) genEncoder(t reflect.Type) error { + switch t.Kind() { + case reflect.Slice, reflect.Array, reflect.Map: + return g.genSliceArrayMapEncoder(t) + default: + return g.genStructEncoder(t) + } +} + +func (g *Generator) genSliceArrayMapEncoder(t reflect.Type) error { + switch t.Kind() { + case reflect.Slice, reflect.Array, reflect.Map: + default: + return fmt.Errorf("cannot generate encoder/decoder for %v, not a slice/array/map type", t) + } + + fname := g.getEncoderName(t) + typ := g.getType(t) + + fmt.Fprintln(g.out, "func "+fname+"(out *jwriter.Writer, in "+typ+") {") + err := g.genTypeEncoderNoCheck(t, "in", fieldTags{}, 1) + if err != nil { + return err + } + fmt.Fprintln(g.out, "}") + return nil +} + +func (g *Generator) genStructEncoder(t reflect.Type) error { + if t.Kind() != reflect.Struct { + return fmt.Errorf("cannot generate encoder/decoder for %v, not a struct type", t) + } + + fname := g.getEncoderName(t) + typ := g.getType(t) + + fmt.Fprintln(g.out, "func "+fname+"(out *jwriter.Writer, in "+typ+") {") + fmt.Fprintln(g.out, " out.RawByte('{')") + fmt.Fprintln(g.out, " first := true") + fmt.Fprintln(g.out, " _ = first") + + fs, err := getStructFields(t) + if err != nil { + return fmt.Errorf("cannot generate encoder for %v: %v", t, err) + } + for _, f := range fs { + if err := g.genStructFieldEncoder(t, f); err != nil { + return err + } + } + + fmt.Fprintln(g.out, " out.RawByte('}')") + fmt.Fprintln(g.out, "}") + + return nil +} + +func (g *Generator) genStructMarshaler(t reflect.Type) error { + switch t.Kind() { + case reflect.Slice, reflect.Array, reflect.Map, reflect.Struct: + default: + return fmt.Errorf("cannot generate encoder/decoder for %v, not a struct/slice/array/map type", t) + } + + fname := g.getEncoderName(t) + typ := g.getType(t) + + if !g.noStdMarshalers { + fmt.Fprintln(g.out, "// MarshalJSON supports json.Marshaler interface") + fmt.Fprintln(g.out, "func (v "+typ+") MarshalJSON() ([]byte, error) {") + fmt.Fprintln(g.out, " w := jwriter.Writer{}") + fmt.Fprintln(g.out, " "+fname+"(&w, v)") + fmt.Fprintln(g.out, " return w.Buffer.BuildBytes(), w.Error") + fmt.Fprintln(g.out, "}") + } + + fmt.Fprintln(g.out, "// MarshalEasyJSON supports easyjson.Marshaler interface") + fmt.Fprintln(g.out, "func (v "+typ+") MarshalEasyJSON(w *jwriter.Writer) {") + fmt.Fprintln(g.out, " "+fname+"(w, v)") + fmt.Fprintln(g.out, "}") + + return nil +} diff --git a/src/vendor/github.com/mailru/easyjson/gen/generator.go b/src/vendor/github.com/mailru/easyjson/gen/generator.go new file mode 100644 index 00000000..988a3a5f --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/gen/generator.go @@ -0,0 +1,449 @@ +package gen + +import ( + "bytes" + "fmt" + "hash/fnv" + "io" + "path" + "reflect" + "sort" + "strconv" + "strings" + "unicode" +) + +const pkgWriter = "github.com/mailru/easyjson/jwriter" +const pkgLexer = "github.com/mailru/easyjson/jlexer" +const pkgEasyJSON = "github.com/mailru/easyjson" + +// FieldNamer defines a policy for generating names for struct fields. +type FieldNamer interface { + GetJSONFieldName(t reflect.Type, f reflect.StructField) string +} + +// Generator generates the requested marshaler/unmarshalers. +type Generator struct { + out *bytes.Buffer + + pkgName string + pkgPath string + buildTags string + hashString string + + varCounter int + + noStdMarshalers bool + omitEmpty bool + fieldNamer FieldNamer + + // package path to local alias map for tracking imports + imports map[string]string + + // types that marshalers were requested for by user + marshalers map[reflect.Type]bool + + // types that encoders were already generated for + typesSeen map[reflect.Type]bool + + // types that encoders were requested for (e.g. by encoders of other types) + typesUnseen []reflect.Type + + // function name to relevant type maps to track names of de-/encoders in + // case of a name clash or unnamed structs + functionNames map[string]reflect.Type +} + +// NewGenerator initializes and returns a Generator. +func NewGenerator(filename string) *Generator { + ret := &Generator{ + imports: map[string]string{ + pkgWriter: "jwriter", + pkgLexer: "jlexer", + pkgEasyJSON: "easyjson", + "encoding/json": "json", + }, + fieldNamer: DefaultFieldNamer{}, + marshalers: make(map[reflect.Type]bool), + typesSeen: make(map[reflect.Type]bool), + functionNames: make(map[string]reflect.Type), + } + + // Use a file-unique prefix on all auxiliary funcs to avoid + // name clashes. + hash := fnv.New32() + hash.Write([]byte(filename)) + ret.hashString = fmt.Sprintf("%x", hash.Sum32()) + + return ret +} + +// SetPkg sets the name and path of output package. +func (g *Generator) SetPkg(name, path string) { + g.pkgName = name + g.pkgPath = path +} + +// SetBuildTags sets build tags for the output file. +func (g *Generator) SetBuildTags(tags string) { + g.buildTags = tags +} + +// SetFieldNamer sets field naming strategy. +func (g *Generator) SetFieldNamer(n FieldNamer) { + g.fieldNamer = n +} + +// UseSnakeCase sets snake_case field naming strategy. +func (g *Generator) UseSnakeCase() { + g.fieldNamer = SnakeCaseFieldNamer{} +} + +// NoStdMarshalers instructs not to generate standard MarshalJSON/UnmarshalJSON +// methods (only the custom interface). +func (g *Generator) NoStdMarshalers() { + g.noStdMarshalers = true +} + +// OmitEmpty triggers `json=",omitempty"` behaviour by default. +func (g *Generator) OmitEmpty() { + g.omitEmpty = true +} + +// addTypes requests to generate encoding/decoding funcs for the given type. +func (g *Generator) addType(t reflect.Type) { + if g.typesSeen[t] { + return + } + for _, t1 := range g.typesUnseen { + if t1 == t { + return + } + } + g.typesUnseen = append(g.typesUnseen, t) +} + +// Add requests to generate marshaler/unmarshalers and encoding/decoding +// funcs for the type of given object. +func (g *Generator) Add(obj interface{}) { + t := reflect.TypeOf(obj) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + g.addType(t) + g.marshalers[t] = true +} + +// printHeader prints package declaration and imports. +func (g *Generator) printHeader() { + if g.buildTags != "" { + fmt.Println("// +build ", g.buildTags) + fmt.Println() + } + fmt.Println("// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.") + fmt.Println() + fmt.Println("package ", g.pkgName) + fmt.Println() + + byAlias := map[string]string{} + var aliases []string + for path, alias := range g.imports { + aliases = append(aliases, alias) + byAlias[alias] = path + } + + sort.Strings(aliases) + fmt.Println("import (") + for _, alias := range aliases { + fmt.Printf(" %s %q\n", alias, byAlias[alias]) + } + + fmt.Println(")") + fmt.Println("") + fmt.Println("// suppress unused package warning") + fmt.Println("var (") + fmt.Println(" _ *json.RawMessage") + fmt.Println(" _ *jlexer.Lexer") + fmt.Println(" _ *jwriter.Writer") + fmt.Println(" _ easyjson.Marshaler") + fmt.Println(")") + + fmt.Println() +} + +// Run runs the generator and outputs generated code to out. +func (g *Generator) Run(out io.Writer) error { + g.out = &bytes.Buffer{} + + for len(g.typesUnseen) > 0 { + t := g.typesUnseen[len(g.typesUnseen)-1] + g.typesUnseen = g.typesUnseen[:len(g.typesUnseen)-1] + g.typesSeen[t] = true + + if err := g.genDecoder(t); err != nil { + return err + } + if err := g.genEncoder(t); err != nil { + return err + } + + if !g.marshalers[t] { + continue + } + + if err := g.genStructMarshaler(t); err != nil { + return err + } + if err := g.genStructUnmarshaler(t); err != nil { + return err + } + } + g.printHeader() + _, err := out.Write(g.out.Bytes()) + return err +} + +// fixes vendored paths +func fixPkgPathVendoring(pkgPath string) string { + const vendor = "/vendor/" + if i := strings.LastIndex(pkgPath, vendor); i != -1 { + return pkgPath[i+len(vendor):] + } + return pkgPath +} + +func fixAliasName(alias string) string { + alias = strings.Replace( + strings.Replace(alias, ".", "_", -1), + "-", + "_", + -1, + ) + return alias +} + +// pkgAlias creates and returns and import alias for a given package. +func (g *Generator) pkgAlias(pkgPath string) string { + pkgPath = fixPkgPathVendoring(pkgPath) + if alias := g.imports[pkgPath]; alias != "" { + return alias + } + + for i := 0; ; i++ { + alias := fixAliasName(path.Base(pkgPath)) + if i > 0 { + alias += fmt.Sprint(i) + } + + exists := false + for _, v := range g.imports { + if v == alias { + exists = true + break + } + } + + if !exists { + g.imports[pkgPath] = alias + return alias + } + } +} + +// getType return the textual type name of given type that can be used in generated code. +func (g *Generator) getType(t reflect.Type) string { + if t.Name() == "" { + switch t.Kind() { + case reflect.Ptr: + return "*" + g.getType(t.Elem()) + case reflect.Slice: + return "[]" + g.getType(t.Elem()) + case reflect.Array: + return "[" + strconv.Itoa(t.Len()) + "]" + g.getType(t.Elem()) + case reflect.Map: + return "map[" + g.getType(t.Key()) + "]" + g.getType(t.Elem()) + } + } + + if t.Name() == "" || t.PkgPath() == "" { + if t.Kind() == reflect.Struct { + // the fields of an anonymous struct can have named types, + // and t.String() will not be sufficient because it does not + // remove the package name when it matches g.pkgPath. + // so we convert by hand + nf := t.NumField() + lines := make([]string, 0, nf) + for i := 0; i < nf; i++ { + f := t.Field(i) + line := f.Name + " " + g.getType(f.Type) + t := f.Tag + if t != "" { + line += " " + escapeTag(t) + } + lines = append(lines, line) + } + return strings.Join([]string{"struct { ", strings.Join(lines, "; "), " }"}, "") + } + return t.String() + } else if t.PkgPath() == g.pkgPath { + return t.Name() + } + return g.pkgAlias(t.PkgPath()) + "." + t.Name() +} + +// escape a struct field tag string back to source code +func escapeTag(tag reflect.StructTag) string { + t := string(tag) + if strings.ContainsRune(t, '`') { + // there are ` in the string; we can't use ` to enclose the string + return strconv.Quote(t) + } + return "`" + t + "`" +} + +// uniqueVarName returns a file-unique name that can be used for generated variables. +func (g *Generator) uniqueVarName() string { + g.varCounter++ + return fmt.Sprint("v", g.varCounter) +} + +// safeName escapes unsafe characters in pkg/type name and returns a string that can be used +// in encoder/decoder names for the type. +func (g *Generator) safeName(t reflect.Type) string { + name := t.PkgPath() + if t.Name() == "" { + name += "anonymous" + } else { + name += "." + t.Name() + } + + parts := []string{} + part := []rune{} + for _, c := range name { + if unicode.IsLetter(c) || unicode.IsDigit(c) { + part = append(part, c) + } else if len(part) > 0 { + parts = append(parts, string(part)) + part = []rune{} + } + } + return joinFunctionNameParts(false, parts...) +} + +// functionName returns a function name for a given type with a given prefix. If a function +// with this prefix already exists for a type, it is returned. +// +// Method is used to track encoder/decoder names for the type. +func (g *Generator) functionName(prefix string, t reflect.Type) string { + prefix = joinFunctionNameParts(true, "easyjson", g.hashString, prefix) + name := joinFunctionNameParts(true, prefix, g.safeName(t)) + + // Most of the names will be unique, try a shortcut first. + if e, ok := g.functionNames[name]; !ok || e == t { + g.functionNames[name] = t + return name + } + + // Search if the function already exists. + for name1, t1 := range g.functionNames { + if t1 == t && strings.HasPrefix(name1, prefix) { + return name1 + } + } + + // Create a new name in the case of a clash. + for i := 1; ; i++ { + nm := fmt.Sprint(name, i) + if _, ok := g.functionNames[nm]; ok { + continue + } + g.functionNames[nm] = t + return nm + } +} + +// DefaultFieldsNamer implements trivial naming policy equivalent to encoding/json. +type DefaultFieldNamer struct{} + +func (DefaultFieldNamer) GetJSONFieldName(t reflect.Type, f reflect.StructField) string { + jsonName := strings.Split(f.Tag.Get("json"), ",")[0] + if jsonName != "" { + return jsonName + } else { + return f.Name + } +} + +// SnakeCaseFieldNamer implements CamelCase to snake_case conversion for fields names. +type SnakeCaseFieldNamer struct{} + +func camelToSnake(name string) string { + var ret bytes.Buffer + + multipleUpper := false + var lastUpper rune + var beforeUpper rune + + for _, c := range name { + // Non-lowercase character after uppercase is considered to be uppercase too. + isUpper := (unicode.IsUpper(c) || (lastUpper != 0 && !unicode.IsLower(c))) + + if lastUpper != 0 { + // Output a delimiter if last character was either the first uppercase character + // in a row, or the last one in a row (e.g. 'S' in "HTTPServer"). + // Do not output a delimiter at the beginning of the name. + + firstInRow := !multipleUpper + lastInRow := !isUpper + + if ret.Len() > 0 && (firstInRow || lastInRow) && beforeUpper != '_' { + ret.WriteByte('_') + } + ret.WriteRune(unicode.ToLower(lastUpper)) + } + + // Buffer uppercase char, do not output it yet as a delimiter may be required if the + // next character is lowercase. + if isUpper { + multipleUpper = (lastUpper != 0) + lastUpper = c + continue + } + + ret.WriteRune(c) + lastUpper = 0 + beforeUpper = c + multipleUpper = false + } + + if lastUpper != 0 { + ret.WriteRune(unicode.ToLower(lastUpper)) + } + return string(ret.Bytes()) +} + +func (SnakeCaseFieldNamer) GetJSONFieldName(t reflect.Type, f reflect.StructField) string { + jsonName := strings.Split(f.Tag.Get("json"), ",")[0] + if jsonName != "" { + return jsonName + } + + return camelToSnake(f.Name) +} + +func joinFunctionNameParts(keepFirst bool, parts ...string) string { + buf := bytes.NewBufferString("") + for i, part := range parts { + if i == 0 && keepFirst { + buf.WriteString(part) + } else { + if len(part) > 0 { + buf.WriteString(strings.ToUpper(string(part[0]))) + } + if len(part) > 1 { + buf.WriteString(part[1:]) + } + } + } + return buf.String() +} diff --git a/src/vendor/github.com/mailru/easyjson/gen/generator_test.go b/src/vendor/github.com/mailru/easyjson/gen/generator_test.go new file mode 100644 index 00000000..62c03f08 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/gen/generator_test.go @@ -0,0 +1,65 @@ +package gen + +import ( + "testing" +) + +func TestCamelToSnake(t *testing.T) { + for i, test := range []struct { + In, Out string + }{ + {"", ""}, + {"A", "a"}, + {"SimpleExample", "simple_example"}, + {"internalField", "internal_field"}, + + {"SomeHTTPStuff", "some_http_stuff"}, + {"WriteJSON", "write_json"}, + {"HTTP2Server", "http2_server"}, + {"Some_Mixed_Case", "some_mixed_case"}, + {"do_nothing", "do_nothing"}, + + {"JSONHTTPRPCServer", "jsonhttprpc_server"}, // nothing can be done here without a dictionary + } { + got := camelToSnake(test.In) + if got != test.Out { + t.Errorf("[%d] camelToSnake(%s) = %s; want %s", i, test.In, got, test.Out) + } + } +} + +func TestJoinFunctionNameParts(t *testing.T) { + for i, test := range []struct { + keepFirst bool + parts []string + out string + }{ + {false, []string{}, ""}, + {false, []string{"a"}, "A"}, + {false, []string{"simple", "example"}, "SimpleExample"}, + {true, []string{"first", "example"}, "firstExample"}, + {false, []string{"some", "UPPER", "case"}, "SomeUPPERCase"}, + {false, []string{"number", "123"}, "Number123"}, + } { + got := joinFunctionNameParts(test.keepFirst, test.parts...) + if got != test.out { + t.Errorf("[%d] joinFunctionNameParts(%v) = %s; want %s", i, test.parts, got, test.out) + } + } +} + +func TestFixVendorPath(t *testing.T) { + for i, test := range []struct { + In, Out string + }{ + {"", ""}, + {"time", "time"}, + {"project/vendor/subpackage", "subpackage"}, + } { + got := fixPkgPathVendoring(test.In) + if got != test.Out { + t.Errorf("[%d] fixPkgPathVendoring(%s) = %s; want %s", i, test.In, got, test.Out) + } + } + +} diff --git a/src/vendor/github.com/mailru/easyjson/helpers.go b/src/vendor/github.com/mailru/easyjson/helpers.go new file mode 100644 index 00000000..b86b87d2 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/helpers.go @@ -0,0 +1,78 @@ +// Package easyjson contains marshaler/unmarshaler interfaces and helper functions. +package easyjson + +import ( + "io" + "io/ioutil" + "net/http" + "strconv" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// Marshaler is an easyjson-compatible marshaler interface. +type Marshaler interface { + MarshalEasyJSON(w *jwriter.Writer) +} + +// Marshaler is an easyjson-compatible unmarshaler interface. +type Unmarshaler interface { + UnmarshalEasyJSON(w *jlexer.Lexer) +} + +// Optional defines an undefined-test method for a type to integrate with 'omitempty' logic. +type Optional interface { + IsDefined() bool +} + +// Marshal returns data as a single byte slice. Method is suboptimal as the data is likely to be copied +// from a chain of smaller chunks. +func Marshal(v Marshaler) ([]byte, error) { + w := jwriter.Writer{} + v.MarshalEasyJSON(&w) + return w.BuildBytes() +} + +// MarshalToWriter marshals the data to an io.Writer. +func MarshalToWriter(v Marshaler, w io.Writer) (written int, err error) { + jw := jwriter.Writer{} + v.MarshalEasyJSON(&jw) + return jw.DumpTo(w) +} + +// MarshalToHTTPResponseWriter sets Content-Length and Content-Type headers for the +// http.ResponseWriter, and send the data to the writer. started will be equal to +// false if an error occurred before any http.ResponseWriter methods were actually +// invoked (in this case a 500 reply is possible). +func MarshalToHTTPResponseWriter(v Marshaler, w http.ResponseWriter) (started bool, written int, err error) { + jw := jwriter.Writer{} + v.MarshalEasyJSON(&jw) + if jw.Error != nil { + return false, 0, jw.Error + } + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Length", strconv.Itoa(jw.Size())) + + started = true + written, err = jw.DumpTo(w) + return +} + +// Unmarshal decodes the JSON in data into the object. +func Unmarshal(data []byte, v Unmarshaler) error { + l := jlexer.Lexer{Data: data} + v.UnmarshalEasyJSON(&l) + return l.Error() +} + +// UnmarshalFromReader reads all the data in the reader and decodes as JSON into the object. +func UnmarshalFromReader(r io.Reader, v Unmarshaler) error { + data, err := ioutil.ReadAll(r) + if err != nil { + return err + } + l := jlexer.Lexer{Data: data} + v.UnmarshalEasyJSON(&l) + return l.Error() +} diff --git a/src/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/src/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go new file mode 100644 index 00000000..ff7b27c5 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go @@ -0,0 +1,24 @@ +// This file will only be included to the build if neither +// easyjson_nounsafe nor appengine build tag is set. See README notes +// for more details. + +//+build !easyjson_nounsafe +//+build !appengine + +package jlexer + +import ( + "reflect" + "unsafe" +) + +// bytesToStr creates a string pointing at the slice to avoid copying. +// +// Warning: the string returned by the function should be used with care, as the whole input data +// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data +// may be garbage-collected even when the string exists. +func bytesToStr(data []byte) string { + h := (*reflect.SliceHeader)(unsafe.Pointer(&data)) + shdr := reflect.StringHeader{Data: h.Data, Len: h.Len} + return *(*string)(unsafe.Pointer(&shdr)) +} diff --git a/src/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go b/src/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go new file mode 100644 index 00000000..864d1be6 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go @@ -0,0 +1,13 @@ +// This file is included to the build if any of the buildtags below +// are defined. Refer to README notes for more details. + +//+build easyjson_nounsafe appengine + +package jlexer + +// bytesToStr creates a string normally from []byte +// +// Note that this method is roughly 1.5x slower than using the 'unsafe' method. +func bytesToStr(data []byte) string { + return string(data) +} diff --git a/src/vendor/github.com/mailru/easyjson/jlexer/error.go b/src/vendor/github.com/mailru/easyjson/jlexer/error.go new file mode 100644 index 00000000..e90ec40d --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/jlexer/error.go @@ -0,0 +1,15 @@ +package jlexer + +import "fmt" + +// LexerError implements the error interface and represents all possible errors that can be +// generated during parsing the JSON data. +type LexerError struct { + Reason string + Offset int + Data string +} + +func (l *LexerError) Error() string { + return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data) +} diff --git a/src/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/src/vendor/github.com/mailru/easyjson/jlexer/lexer.go new file mode 100644 index 00000000..16e320db --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -0,0 +1,1114 @@ +// Package jlexer contains a JSON lexer implementation. +// +// It is expected that it is mostly used with generated parser code, so the interface is tuned +// for a parser that knows what kind of data is expected. +package jlexer + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// tokenKind determines type of a token. +type tokenKind byte + +const ( + tokenUndef tokenKind = iota // No token. + tokenDelim // Delimiter: one of '{', '}', '[' or ']'. + tokenString // A string literal, e.g. "abc\u1234" + tokenNumber // Number literal, e.g. 1.5e5 + tokenBool // Boolean literal: true or false. + tokenNull // null keyword. +) + +// token describes a single token: type, position in the input and value. +type token struct { + kind tokenKind // Type of a token. + + boolValue bool // Value if a boolean literal token. + byteValue []byte // Raw value of a token. + delimValue byte +} + +// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice. +type Lexer struct { + Data []byte // Input data given to the lexer. + + start int // Start of the current token. + pos int // Current unscanned position in the input stream. + token token // Last scanned token, if token.kind != tokenUndef. + + firstElement bool // Whether current element is the first in array or an object. + wantSep byte // A comma or a colon character, which need to occur before a token. + + UseMultipleErrors bool // If we want to use multiple errors. + fatalError error // Fatal error occurred during lexing. It is usually a syntax error. + multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors. +} + +// FetchToken scans the input for the next token. +func (r *Lexer) FetchToken() { + r.token.kind = tokenUndef + r.start = r.pos + + // Check if r.Data has r.pos element + // If it doesn't, it mean corrupted input data + if len(r.Data) < r.pos { + r.errParse("Unexpected end of data") + return + } + // Determine the type of a token by skipping whitespace and reading the + // first character. + for _, c := range r.Data[r.pos:] { + switch c { + case ':', ',': + if r.wantSep == c { + r.pos++ + r.start++ + r.wantSep = 0 + } else { + r.errSyntax() + } + + case ' ', '\t', '\r', '\n': + r.pos++ + r.start++ + + case '"': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenString + r.fetchString() + return + + case '{', '[': + if r.wantSep != 0 { + r.errSyntax() + } + r.firstElement = true + r.token.kind = tokenDelim + r.token.delimValue = r.Data[r.pos] + r.pos++ + return + + case '}', ']': + if !r.firstElement && (r.wantSep != ',') { + r.errSyntax() + } + r.wantSep = 0 + r.token.kind = tokenDelim + r.token.delimValue = r.Data[r.pos] + r.pos++ + return + + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': + if r.wantSep != 0 { + r.errSyntax() + } + r.token.kind = tokenNumber + r.fetchNumber() + return + + case 'n': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenNull + r.fetchNull() + return + + case 't': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenBool + r.token.boolValue = true + r.fetchTrue() + return + + case 'f': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenBool + r.token.boolValue = false + r.fetchFalse() + return + + default: + r.errSyntax() + return + } + } + r.fatalError = io.EOF + return +} + +// isTokenEnd returns true if the char can follow a non-delimiter token +func isTokenEnd(c byte) bool { + return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':' +} + +// fetchNull fetches and checks remaining bytes of null keyword. +func (r *Lexer) fetchNull() { + r.pos += 4 + if r.pos > len(r.Data) || + r.Data[r.pos-3] != 'u' || + r.Data[r.pos-2] != 'l' || + r.Data[r.pos-1] != 'l' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 4 + r.errSyntax() + } +} + +// fetchTrue fetches and checks remaining bytes of true keyword. +func (r *Lexer) fetchTrue() { + r.pos += 4 + if r.pos > len(r.Data) || + r.Data[r.pos-3] != 'r' || + r.Data[r.pos-2] != 'u' || + r.Data[r.pos-1] != 'e' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 4 + r.errSyntax() + } +} + +// fetchFalse fetches and checks remaining bytes of false keyword. +func (r *Lexer) fetchFalse() { + r.pos += 5 + if r.pos > len(r.Data) || + r.Data[r.pos-4] != 'a' || + r.Data[r.pos-3] != 'l' || + r.Data[r.pos-2] != 's' || + r.Data[r.pos-1] != 'e' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 5 + r.errSyntax() + } +} + +// fetchNumber scans a number literal token. +func (r *Lexer) fetchNumber() { + hasE := false + afterE := false + hasDot := false + + r.pos++ + for i, c := range r.Data[r.pos:] { + switch { + case c >= '0' && c <= '9': + afterE = false + case c == '.' && !hasDot: + hasDot = true + case (c == 'e' || c == 'E') && !hasE: + hasE = true + hasDot = true + afterE = true + case (c == '+' || c == '-') && afterE: + afterE = false + default: + r.pos += i + if !isTokenEnd(c) { + r.errSyntax() + } else { + r.token.byteValue = r.Data[r.start:r.pos] + } + return + } + } + + r.pos = len(r.Data) + r.token.byteValue = r.Data[r.start:] +} + +// findStringLen tries to scan into the string literal for ending quote char to determine required size. +// The size will be exact if no escapes are present and may be inexact if there are escaped chars. +func findStringLen(data []byte) (hasEscapes bool, length int) { + delta := 0 + + for i := 0; i < len(data); i++ { + switch data[i] { + case '\\': + i++ + delta++ + if i < len(data) && data[i] == 'u' { + delta++ + } + case '"': + return (delta > 0), (i - delta) + } + } + + return false, len(data) +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var val rune + for i := 2; i < len(s) && i < 6; i++ { + var v byte + c := s[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + v = c - '0' + case 'a', 'b', 'c', 'd', 'e', 'f': + v = c - 'a' + 10 + case 'A', 'B', 'C', 'D', 'E', 'F': + v = c - 'A' + 10 + default: + return -1 + } + + val <<= 4 + val |= rune(v) + } + return val +} + +// processEscape processes a single escape sequence and returns number of bytes processed. +func (r *Lexer) processEscape(data []byte) (int, error) { + if len(data) < 2 { + return 0, fmt.Errorf("syntax error at %v", string(data)) + } + + c := data[1] + switch c { + case '"', '/', '\\': + r.token.byteValue = append(r.token.byteValue, c) + return 2, nil + case 'b': + r.token.byteValue = append(r.token.byteValue, '\b') + return 2, nil + case 'f': + r.token.byteValue = append(r.token.byteValue, '\f') + return 2, nil + case 'n': + r.token.byteValue = append(r.token.byteValue, '\n') + return 2, nil + case 'r': + r.token.byteValue = append(r.token.byteValue, '\r') + return 2, nil + case 't': + r.token.byteValue = append(r.token.byteValue, '\t') + return 2, nil + case 'u': + rr := getu4(data) + if rr < 0 { + return 0, errors.New("syntax error") + } + + read := 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(data[read:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + read += 6 + rr = dec + } else { + rr = unicode.ReplacementChar + } + } + var d [4]byte + s := utf8.EncodeRune(d[:], rr) + r.token.byteValue = append(r.token.byteValue, d[:s]...) + return read, nil + } + + return 0, errors.New("syntax error") +} + +// fetchString scans a string literal token. +func (r *Lexer) fetchString() { + r.pos++ + data := r.Data[r.pos:] + + hasEscapes, length := findStringLen(data) + if !hasEscapes { + r.token.byteValue = data[:length] + r.pos += length + 1 + return + } + + r.token.byteValue = make([]byte, 0, length) + p := 0 + for i := 0; i < len(data); { + switch data[i] { + case '"': + r.pos += i + 1 + r.token.byteValue = append(r.token.byteValue, data[p:i]...) + i++ + return + + case '\\': + r.token.byteValue = append(r.token.byteValue, data[p:i]...) + off, err := r.processEscape(data[i:]) + if err != nil { + r.errParse(err.Error()) + return + } + i += off + p = i + + default: + i++ + } + } + r.errParse("unterminated string literal") +} + +// scanToken scans the next token if no token is currently available in the lexer. +func (r *Lexer) scanToken() { + if r.token.kind != tokenUndef || r.fatalError != nil { + return + } + + r.FetchToken() +} + +// consume resets the current token to allow scanning the next one. +func (r *Lexer) consume() { + r.token.kind = tokenUndef + r.token.delimValue = 0 +} + +// Ok returns true if no error (including io.EOF) was encountered during scanning. +func (r *Lexer) Ok() bool { + return r.fatalError == nil +} + +const maxErrorContextLen = 13 + +func (r *Lexer) errParse(what string) { + if r.fatalError == nil { + var str string + if len(r.Data)-r.pos <= maxErrorContextLen { + str = string(r.Data) + } else { + str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..." + } + r.fatalError = &LexerError{ + Reason: what, + Offset: r.pos, + Data: str, + } + } +} + +func (r *Lexer) errSyntax() { + r.errParse("syntax error") +} + +func (r *Lexer) errInvalidToken(expected string) { + if r.fatalError != nil { + return + } + if r.UseMultipleErrors { + r.pos = r.start + r.consume() + r.SkipRecursive() + switch expected { + case "[": + r.token.delimValue = ']' + r.token.kind = tokenDelim + case "{": + r.token.delimValue = '}' + r.token.kind = tokenDelim + } + r.addNonfatalError(&LexerError{ + Reason: fmt.Sprintf("expected %s", expected), + Offset: r.start, + Data: string(r.Data[r.start:r.pos]), + }) + return + } + + var str string + if len(r.token.byteValue) <= maxErrorContextLen { + str = string(r.token.byteValue) + } else { + str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..." + } + r.fatalError = &LexerError{ + Reason: fmt.Sprintf("expected %s", expected), + Offset: r.pos, + Data: str, + } +} + +func (r *Lexer) GetPos() int { + return r.pos +} + +// Delim consumes a token and verifies that it is the given delimiter. +func (r *Lexer) Delim(c byte) { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + + if !r.Ok() || r.token.delimValue != c { + r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled. + r.errInvalidToken(string([]byte{c})) + } else { + r.consume() + } +} + +// IsDelim returns true if there was no scanning error and next token is the given delimiter. +func (r *Lexer) IsDelim(c byte) bool { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + return !r.Ok() || r.token.delimValue == c +} + +// Null verifies that the next token is null and consumes it. +func (r *Lexer) Null() { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenNull { + r.errInvalidToken("null") + } + r.consume() +} + +// IsNull returns true if the next token is a null keyword. +func (r *Lexer) IsNull() bool { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + return r.Ok() && r.token.kind == tokenNull +} + +// Skip skips a single token. +func (r *Lexer) Skip() { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + r.consume() +} + +// SkipRecursive skips next array or object completely, or just skips a single token if not +// an array/object. +// +// Note: no syntax validation is performed on the skipped data. +func (r *Lexer) SkipRecursive() { + r.scanToken() + var start, end byte + + if r.token.delimValue == '{' { + start, end = '{', '}' + } else if r.token.delimValue == '[' { + start, end = '[', ']' + } else { + r.consume() + return + } + + r.consume() + + level := 1 + inQuotes := false + wasEscape := false + + for i, c := range r.Data[r.pos:] { + switch { + case c == start && !inQuotes: + level++ + case c == end && !inQuotes: + level-- + if level == 0 { + r.pos += i + 1 + return + } + case c == '\\' && inQuotes: + wasEscape = !wasEscape + continue + case c == '"' && inQuotes: + inQuotes = wasEscape + case c == '"': + inQuotes = true + } + wasEscape = false + } + r.pos = len(r.Data) + r.fatalError = &LexerError{ + Reason: "EOF reached while skipping array/object or token", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + } +} + +// Raw fetches the next item recursively as a data slice +func (r *Lexer) Raw() []byte { + r.SkipRecursive() + if !r.Ok() { + return nil + } + return r.Data[r.start:r.pos] +} + +// IsStart returns whether the lexer is positioned at the start +// of an input string. +func (r *Lexer) IsStart() bool { + return r.pos == 0 +} + +// Consumed reads all remaining bytes from the input, publishing an error if +// there is anything but whitespace remaining. +func (r *Lexer) Consumed() { + if r.pos > len(r.Data) || !r.Ok() { + return + } + + for _, c := range r.Data[r.pos:] { + if c != ' ' && c != '\t' && c != '\r' && c != '\n' { + r.fatalError = &LexerError{ + Reason: "invalid character '" + string(c) + "' after top-level value", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + } + return + } + + r.pos++ + r.start++ + } +} + +func (r *Lexer) unsafeString() (string, []byte) { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "", nil + } + bytes := r.token.byteValue + ret := bytesToStr(r.token.byteValue) + r.consume() + return ret, bytes +} + +// UnsafeString returns the string value if the token is a string literal. +// +// Warning: returned string may point to the input buffer, so the string should not outlive +// the input buffer. Intended pattern of usage is as an argument to a switch statement. +func (r *Lexer) UnsafeString() string { + ret, _ := r.unsafeString() + return ret +} + +// UnsafeBytes returns the byte slice if the token is a string literal. +func (r *Lexer) UnsafeBytes() []byte { + _, ret := r.unsafeString() + return ret +} + +// String reads a string literal. +func (r *Lexer) String() string { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "" + } + ret := string(r.token.byteValue) + r.consume() + return ret +} + +// Bytes reads a string literal and base64 decodes it into a byte slice. +func (r *Lexer) Bytes() []byte { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return nil + } + ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) + len, err := base64.StdEncoding.Decode(ret, r.token.byteValue) + if err != nil { + r.fatalError = &LexerError{ + Reason: err.Error(), + } + return nil + } + + r.consume() + return ret[:len] +} + +// Bool reads a true or false boolean keyword. +func (r *Lexer) Bool() bool { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenBool { + r.errInvalidToken("bool") + return false + } + ret := r.token.boolValue + r.consume() + return ret +} + +func (r *Lexer) number() string { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenNumber { + r.errInvalidToken("number") + return "" + } + ret := bytesToStr(r.token.byteValue) + r.consume() + return ret +} + +func (r *Lexer) Uint8() uint8 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return uint8(n) +} + +func (r *Lexer) Uint16() uint16 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return uint16(n) +} + +func (r *Lexer) Uint32() uint32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return uint32(n) +} + +func (r *Lexer) Uint64() uint64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return n +} + +func (r *Lexer) Uint() uint { + return uint(r.Uint64()) +} + +func (r *Lexer) Int8() int8 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return int8(n) +} + +func (r *Lexer) Int16() int16 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return int16(n) +} + +func (r *Lexer) Int32() int32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return int32(n) +} + +func (r *Lexer) Int64() int64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return n +} + +func (r *Lexer) Int() int { + return int(r.Int64()) +} + +func (r *Lexer) Uint8Str() uint8 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return uint8(n) +} + +func (r *Lexer) Uint16Str() uint16 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return uint16(n) +} + +func (r *Lexer) Uint32Str() uint32 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return uint32(n) +} + +func (r *Lexer) Uint64Str() uint64 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return n +} + +func (r *Lexer) UintStr() uint { + return uint(r.Uint64Str()) +} + +func (r *Lexer) Int8Str() int8 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return int8(n) +} + +func (r *Lexer) Int16Str() int16 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return int16(n) +} + +func (r *Lexer) Int32Str() int32 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return int32(n) +} + +func (r *Lexer) Int64Str() int64 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return n +} + +func (r *Lexer) IntStr() int { + return int(r.Int64Str()) +} + +func (r *Lexer) Float32() float32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseFloat(s, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return float32(n) +} + +func (r *Lexer) Float64() float64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseFloat(s, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return n +} + +func (r *Lexer) Error() error { + return r.fatalError +} + +func (r *Lexer) AddError(e error) { + if r.fatalError == nil { + r.fatalError = e + } +} + +func (r *Lexer) AddNonFatalError(e error) { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Data: string(r.Data[r.start:r.pos]), + Reason: e.Error(), + }) +} + +func (r *Lexer) addNonfatalError(err *LexerError) { + if r.UseMultipleErrors { + // We don't want to add errors with the same offset. + if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset { + return + } + r.multipleErrors = append(r.multipleErrors, err) + return + } + r.fatalError = err +} + +func (r *Lexer) GetNonFatalErrors() []*LexerError { + return r.multipleErrors +} + +// Interface fetches an interface{} analogous to the 'encoding/json' package. +func (r *Lexer) Interface() interface{} { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + + if !r.Ok() { + return nil + } + switch r.token.kind { + case tokenString: + return r.String() + case tokenNumber: + return r.Float64() + case tokenBool: + return r.Bool() + case tokenNull: + r.Null() + return nil + } + + if r.token.delimValue == '{' { + r.consume() + + ret := map[string]interface{}{} + for !r.IsDelim('}') { + key := r.String() + r.WantColon() + ret[key] = r.Interface() + r.WantComma() + } + r.Delim('}') + + if r.Ok() { + return ret + } else { + return nil + } + } else if r.token.delimValue == '[' { + r.consume() + + var ret []interface{} + for !r.IsDelim(']') { + ret = append(ret, r.Interface()) + r.WantComma() + } + r.Delim(']') + + if r.Ok() { + return ret + } else { + return nil + } + } + r.errSyntax() + return nil +} + +// WantComma requires a comma to be present before fetching next token. +func (r *Lexer) WantComma() { + r.wantSep = ',' + r.firstElement = false +} + +// WantColon requires a colon to be present before fetching next token. +func (r *Lexer) WantColon() { + r.wantSep = ':' + r.firstElement = false +} diff --git a/src/vendor/github.com/mailru/easyjson/jlexer/lexer_test.go b/src/vendor/github.com/mailru/easyjson/jlexer/lexer_test.go new file mode 100644 index 00000000..1c97063e --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/jlexer/lexer_test.go @@ -0,0 +1,251 @@ +package jlexer + +import ( + "bytes" + "reflect" + "testing" +) + +func TestString(t *testing.T) { + for i, test := range []struct { + toParse string + want string + wantError bool + }{ + {toParse: `"simple string"`, want: "simple string"}, + {toParse: " \r\r\n\t " + `"test"`, want: "test"}, + {toParse: `"\n\t\"\/\\\f\r"`, want: "\n\t\"/\\\f\r"}, + {toParse: `"\u0020"`, want: " "}, + {toParse: `"\u0020-\t"`, want: " -\t"}, + {toParse: `"\ufffd\uFFFD"`, want: "\ufffd\ufffd"}, + {toParse: `"\ud83d\ude00"`, want: "😀"}, + {toParse: `"\ud83d\ude08"`, want: "😈"}, + {toParse: `"\ud8"`, wantError: true}, + + {toParse: `"test"junk`, want: "test"}, + + {toParse: `5`, wantError: true}, // not a string + {toParse: `"\x"`, wantError: true}, // invalid escape + {toParse: `"\ud800"`, want: "�"}, // invalid utf-8 char; return replacement char + } { + l := Lexer{Data: []byte(test.toParse)} + + got := l.String() + if got != test.want { + t.Errorf("[%d, %q] String() = %v; want %v", i, test.toParse, got, test.want) + } + err := l.Error() + if err != nil && !test.wantError { + t.Errorf("[%d, %q] String() error: %v", i, test.toParse, err) + } else if err == nil && test.wantError { + t.Errorf("[%d, %q] String() ok; want error", i, test.toParse) + } + } +} + +func TestBytes(t *testing.T) { + for i, test := range []struct { + toParse string + want string + wantError bool + }{ + {toParse: `"c2ltcGxlIHN0cmluZw=="`, want: "simple string"}, + {toParse: " \r\r\n\t " + `"dGVzdA=="`, want: "test"}, + + {toParse: `5`, wantError: true}, // not a JSON string + {toParse: `"foobar"`, wantError: true}, // not base64 encoded + {toParse: `"c2ltcGxlIHN0cmluZw="`, wantError: true}, // invalid base64 padding + } { + l := Lexer{Data: []byte(test.toParse)} + + got := l.Bytes() + if bytes.Compare(got, []byte(test.want)) != 0 { + t.Errorf("[%d, %q] Bytes() = %v; want: %v", i, test.toParse, got, []byte(test.want)) + } + err := l.Error() + if err != nil && !test.wantError { + t.Errorf("[%d, %q] Bytes() error: %v", i, test.toParse, err) + } else if err == nil && test.wantError { + t.Errorf("[%d, %q] Bytes() ok; want error", i, test.toParse) + } + } +} + +func TestNumber(t *testing.T) { + for i, test := range []struct { + toParse string + want string + wantError bool + }{ + {toParse: "123", want: "123"}, + {toParse: "-123", want: "-123"}, + {toParse: "\r\n12.35", want: "12.35"}, + {toParse: "12.35e+1", want: "12.35e+1"}, + {toParse: "12.35e-15", want: "12.35e-15"}, + {toParse: "12.35E-15", want: "12.35E-15"}, + {toParse: "12.35E15", want: "12.35E15"}, + + {toParse: `"a"`, wantError: true}, + {toParse: "123junk", wantError: true}, + {toParse: "1.2.3", wantError: true}, + {toParse: "1e2e3", wantError: true}, + {toParse: "1e2.3", wantError: true}, + } { + l := Lexer{Data: []byte(test.toParse)} + + got := l.number() + if got != test.want { + t.Errorf("[%d, %q] number() = %v; want %v", i, test.toParse, got, test.want) + } + err := l.Error() + if err != nil && !test.wantError { + t.Errorf("[%d, %q] number() error: %v", i, test.toParse, err) + } else if err == nil && test.wantError { + t.Errorf("[%d, %q] number() ok; want error", i, test.toParse) + } + } +} + +func TestBool(t *testing.T) { + for i, test := range []struct { + toParse string + want bool + wantError bool + }{ + {toParse: "true", want: true}, + {toParse: "false", want: false}, + + {toParse: "1", wantError: true}, + {toParse: "truejunk", wantError: true}, + {toParse: `false"junk"`, wantError: true}, + {toParse: "True", wantError: true}, + {toParse: "False", wantError: true}, + } { + l := Lexer{Data: []byte(test.toParse)} + + got := l.Bool() + if got != test.want { + t.Errorf("[%d, %q] Bool() = %v; want %v", i, test.toParse, got, test.want) + } + err := l.Error() + if err != nil && !test.wantError { + t.Errorf("[%d, %q] Bool() error: %v", i, test.toParse, err) + } else if err == nil && test.wantError { + t.Errorf("[%d, %q] Bool() ok; want error", i, test.toParse) + } + } +} + +func TestSkipRecursive(t *testing.T) { + for i, test := range []struct { + toParse string + left string + wantError bool + }{ + {toParse: "5, 4", left: ", 4"}, + {toParse: "[5, 6], 4", left: ", 4"}, + {toParse: "[5, [7,8]]: 4", left: ": 4"}, + + {toParse: `{"a":1}, 4`, left: ", 4"}, + {toParse: `{"a":1, "b":{"c": 5}, "e":[12,15]}, 4`, left: ", 4"}, + + // array start/end chars in a string + {toParse: `[5, "]"], 4`, left: ", 4"}, + {toParse: `[5, "\"]"], 4`, left: ", 4"}, + {toParse: `[5, "["], 4`, left: ", 4"}, + {toParse: `[5, "\"["], 4`, left: ", 4"}, + + // object start/end chars in a string + {toParse: `{"a}":1}, 4`, left: ", 4"}, + {toParse: `{"a\"}":1}, 4`, left: ", 4"}, + {toParse: `{"a{":1}, 4`, left: ", 4"}, + {toParse: `{"a\"{":1}, 4`, left: ", 4"}, + + // object with double slashes at the end of string + {toParse: `{"a":"hey\\"}, 4`, left: ", 4"}, + } { + l := Lexer{Data: []byte(test.toParse)} + + l.SkipRecursive() + + got := string(l.Data[l.pos:]) + if got != test.left { + t.Errorf("[%d, %q] SkipRecursive() left = %v; want %v", i, test.toParse, got, test.left) + } + err := l.Error() + if err != nil && !test.wantError { + t.Errorf("[%d, %q] SkipRecursive() error: %v", i, test.toParse, err) + } else if err == nil && test.wantError { + t.Errorf("[%d, %q] SkipRecursive() ok; want error", i, test.toParse) + } + } +} + +func TestInterface(t *testing.T) { + for i, test := range []struct { + toParse string + want interface{} + wantError bool + }{ + {toParse: "null", want: nil}, + {toParse: "true", want: true}, + {toParse: `"a"`, want: "a"}, + {toParse: "5", want: float64(5)}, + + {toParse: `{}`, want: map[string]interface{}{}}, + {toParse: `[]`, want: []interface{}(nil)}, + + {toParse: `{"a": "b"}`, want: map[string]interface{}{"a": "b"}}, + {toParse: `[5]`, want: []interface{}{float64(5)}}, + + {toParse: `{"a":5 , "b" : "string"}`, want: map[string]interface{}{"a": float64(5), "b": "string"}}, + {toParse: `["a", 5 , null, true]`, want: []interface{}{"a", float64(5), nil, true}}, + + {toParse: `{"a" "b"}`, wantError: true}, + {toParse: `{"a": "b",}`, wantError: true}, + {toParse: `{"a":"b","c" "b"}`, wantError: true}, + {toParse: `{"a": "b","c":"d",}`, wantError: true}, + {toParse: `{,}`, wantError: true}, + + {toParse: `[1, 2,]`, wantError: true}, + {toParse: `[1 2]`, wantError: true}, + {toParse: `[,]`, wantError: true}, + } { + l := Lexer{Data: []byte(test.toParse)} + + got := l.Interface() + if !reflect.DeepEqual(got, test.want) { + t.Errorf("[%d, %q] Interface() = %v; want %v", i, test.toParse, got, test.want) + } + err := l.Error() + if err != nil && !test.wantError { + t.Errorf("[%d, %q] Interface() error: %v", i, test.toParse, err) + } else if err == nil && test.wantError { + t.Errorf("[%d, %q] Interface() ok; want error", i, test.toParse) + } + } +} + +func TestConsumed(t *testing.T) { + for i, test := range []struct { + toParse string + wantError bool + }{ + {toParse: "", wantError: false}, + {toParse: " ", wantError: false}, + {toParse: "\r\n", wantError: false}, + {toParse: "\t\t", wantError: false}, + + {toParse: "{", wantError: true}, + } { + l := Lexer{Data: []byte(test.toParse)} + l.Consumed() + + err := l.Error() + if err != nil && !test.wantError { + t.Errorf("[%d, %q] Consumed() error: %v", i, test.toParse, err) + } else if err == nil && test.wantError { + t.Errorf("[%d, %q] Consumed() ok; want error", i, test.toParse) + } + } +} diff --git a/src/vendor/github.com/mailru/easyjson/jwriter/writer.go b/src/vendor/github.com/mailru/easyjson/jwriter/writer.go new file mode 100644 index 00000000..7b55293a --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/jwriter/writer.go @@ -0,0 +1,328 @@ +// Package jwriter contains a JSON writer. +package jwriter + +import ( + "encoding/base64" + "io" + "strconv" + "unicode/utf8" + + "github.com/mailru/easyjson/buffer" +) + +// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but +// Flags field in Writer is used to set and pass them around. +type Flags int + +const ( + NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'. + NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'. +) + +// Writer is a JSON writer. +type Writer struct { + Flags Flags + + Error error + Buffer buffer.Buffer + NoEscapeHTML bool +} + +// Size returns the size of the data that was written out. +func (w *Writer) Size() int { + return w.Buffer.Size() +} + +// DumpTo outputs the data to given io.Writer, resetting the buffer. +func (w *Writer) DumpTo(out io.Writer) (written int, err error) { + return w.Buffer.DumpTo(out) +} + +// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice +// as argument that it will try to reuse. +func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) { + if w.Error != nil { + return nil, w.Error + } + + return w.Buffer.BuildBytes(reuse...), nil +} + +// ReadCloser returns an io.ReadCloser that can be used to read the data. +// ReadCloser also resets the buffer. +func (w *Writer) ReadCloser() (io.ReadCloser, error) { + if w.Error != nil { + return nil, w.Error + } + + return w.Buffer.ReadCloser(), nil +} + +// RawByte appends raw binary data to the buffer. +func (w *Writer) RawByte(c byte) { + w.Buffer.AppendByte(c) +} + +// RawByte appends raw binary data to the buffer. +func (w *Writer) RawString(s string) { + w.Buffer.AppendString(s) +} + +// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for +// calling with results of MarshalJSON-like functions. +func (w *Writer) Raw(data []byte, err error) { + switch { + case w.Error != nil: + return + case err != nil: + w.Error = err + case len(data) > 0: + w.Buffer.AppendBytes(data) + default: + w.RawString("null") + } +} + +// RawText encloses raw binary data in quotes and appends in to the buffer. +// Useful for calling with results of MarshalText-like functions. +func (w *Writer) RawText(data []byte, err error) { + switch { + case w.Error != nil: + return + case err != nil: + w.Error = err + case len(data) > 0: + w.String(string(data)) + default: + w.RawString("null") + } +} + +// Base64Bytes appends data to the buffer after base64 encoding it +func (w *Writer) Base64Bytes(data []byte) { + if data == nil { + w.Buffer.AppendString("null") + return + } + w.Buffer.AppendByte('"') + dst := make([]byte, base64.StdEncoding.EncodedLen(len(data))) + base64.StdEncoding.Encode(dst, data) + w.Buffer.AppendBytes(dst) + w.Buffer.AppendByte('"') +} + +func (w *Writer) Uint8(n uint8) { + w.Buffer.EnsureSpace(3) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint16(n uint16) { + w.Buffer.EnsureSpace(5) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint32(n uint32) { + w.Buffer.EnsureSpace(10) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint(n uint) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint64(n uint64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) +} + +func (w *Writer) Int8(n int8) { + w.Buffer.EnsureSpace(4) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int16(n int16) { + w.Buffer.EnsureSpace(6) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int32(n int32) { + w.Buffer.EnsureSpace(11) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int(n int) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int64(n int64) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) +} + +func (w *Writer) Uint8Str(n uint8) { + w.Buffer.EnsureSpace(3) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint16Str(n uint16) { + w.Buffer.EnsureSpace(5) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint32Str(n uint32) { + w.Buffer.EnsureSpace(10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) UintStr(n uint) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint64Str(n uint64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int8Str(n int8) { + w.Buffer.EnsureSpace(4) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int16Str(n int16) { + w.Buffer.EnsureSpace(6) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int32Str(n int32) { + w.Buffer.EnsureSpace(11) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) IntStr(n int) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int64Str(n int64) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Float32(n float32) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) +} + +func (w *Writer) Float64(n float64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64) +} + +func (w *Writer) Bool(v bool) { + w.Buffer.EnsureSpace(5) + if v { + w.Buffer.Buf = append(w.Buffer.Buf, "true"...) + } else { + w.Buffer.Buf = append(w.Buffer.Buf, "false"...) + } +} + +const chars = "0123456789abcdef" + +func isNotEscapedSingleChar(c byte, escapeHTML bool) bool { + // Note: might make sense to use a table if there are more chars to escape. With 4 chars + // it benchmarks the same. + if escapeHTML { + return c != '<' && c != '>' && c != '&' && c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf + } else { + return c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf + } +} + +func (w *Writer) String(s string) { + w.Buffer.AppendByte('"') + + // Portions of the string that contain no escapes are appended as + // byte slices. + + p := 0 // last non-escape symbol + + for i := 0; i < len(s); { + c := s[i] + + if isNotEscapedSingleChar(c, !w.NoEscapeHTML) { + // single-width character, no escaping is required + i++ + continue + } else if c < utf8.RuneSelf { + // single-with character, need to escape + w.Buffer.AppendString(s[p:i]) + switch c { + case '\t': + w.Buffer.AppendString(`\t`) + case '\r': + w.Buffer.AppendString(`\r`) + case '\n': + w.Buffer.AppendString(`\n`) + case '\\': + w.Buffer.AppendString(`\\`) + case '"': + w.Buffer.AppendString(`\"`) + default: + w.Buffer.AppendString(`\u00`) + w.Buffer.AppendByte(chars[c>>4]) + w.Buffer.AppendByte(chars[c&0xf]) + } + + i++ + p = i + continue + } + + // broken utf + runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) + if runeValue == utf8.RuneError && runeWidth == 1 { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\ufffd`) + i++ + p = i + continue + } + + // jsonp stuff - tab separator and line separator + if runeValue == '\u2028' || runeValue == '\u2029' { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\u202`) + w.Buffer.AppendByte(chars[runeValue&0xf]) + i += runeWidth + p = i + continue + } + i += runeWidth + } + w.Buffer.AppendString(s[p:]) + w.Buffer.AppendByte('"') +} diff --git a/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Bool.go b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Bool.go new file mode 100644 index 00000000..89e61326 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Bool.go @@ -0,0 +1,79 @@ +// generated by gotemplate + +package opt + +import ( + "fmt" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// template type Optional(A) + +// A 'gotemplate'-based type for providing optional semantics without using pointers. +type Bool struct { + V bool + Defined bool +} + +// Creates an optional type with a given value. +func OBool(v bool) Bool { + return Bool{V: v, Defined: true} +} + +// Get returns the value or given default in the case the value is undefined. +func (v Bool) Get(deflt bool) bool { + if !v.Defined { + return deflt + } + return v.V +} + +// MarshalEasyJSON does JSON marshaling using easyjson interface. +func (v Bool) MarshalEasyJSON(w *jwriter.Writer) { + if v.Defined { + w.Bool(v.V) + } else { + w.RawString("null") + } +} + +// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. +func (v *Bool) UnmarshalEasyJSON(l *jlexer.Lexer) { + if l.IsNull() { + l.Skip() + *v = Bool{} + } else { + v.V = l.Bool() + v.Defined = true + } +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Bool) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + v.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Bool) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{} + v.UnmarshalEasyJSON(&l) + return l.Error() +} + +// IsDefined returns whether the value is defined, a function is required so that it can +// be used in an interface. +func (v Bool) IsDefined() bool { + return v.Defined +} + +// String implements a stringer interface using fmt.Sprint for the value. +func (v Bool) String() string { + if !v.Defined { + return "" + } + return fmt.Sprint(v.V) +} diff --git a/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Float32.go b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Float32.go new file mode 100644 index 00000000..93ade1c7 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Float32.go @@ -0,0 +1,79 @@ +// generated by gotemplate + +package opt + +import ( + "fmt" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// template type Optional(A) + +// A 'gotemplate'-based type for providing optional semantics without using pointers. +type Float32 struct { + V float32 + Defined bool +} + +// Creates an optional type with a given value. +func OFloat32(v float32) Float32 { + return Float32{V: v, Defined: true} +} + +// Get returns the value or given default in the case the value is undefined. +func (v Float32) Get(deflt float32) float32 { + if !v.Defined { + return deflt + } + return v.V +} + +// MarshalEasyJSON does JSON marshaling using easyjson interface. +func (v Float32) MarshalEasyJSON(w *jwriter.Writer) { + if v.Defined { + w.Float32(v.V) + } else { + w.RawString("null") + } +} + +// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. +func (v *Float32) UnmarshalEasyJSON(l *jlexer.Lexer) { + if l.IsNull() { + l.Skip() + *v = Float32{} + } else { + v.V = l.Float32() + v.Defined = true + } +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Float32) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + v.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Float32) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{} + v.UnmarshalEasyJSON(&l) + return l.Error() +} + +// IsDefined returns whether the value is defined, a function is required so that it can +// be used in an interface. +func (v Float32) IsDefined() bool { + return v.Defined +} + +// String implements a stringer interface using fmt.Sprint for the value. +func (v Float32) String() string { + if !v.Defined { + return "" + } + return fmt.Sprint(v.V) +} diff --git a/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Float64.go b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Float64.go new file mode 100644 index 00000000..90af91b0 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Float64.go @@ -0,0 +1,79 @@ +// generated by gotemplate + +package opt + +import ( + "fmt" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// template type Optional(A) + +// A 'gotemplate'-based type for providing optional semantics without using pointers. +type Float64 struct { + V float64 + Defined bool +} + +// Creates an optional type with a given value. +func OFloat64(v float64) Float64 { + return Float64{V: v, Defined: true} +} + +// Get returns the value or given default in the case the value is undefined. +func (v Float64) Get(deflt float64) float64 { + if !v.Defined { + return deflt + } + return v.V +} + +// MarshalEasyJSON does JSON marshaling using easyjson interface. +func (v Float64) MarshalEasyJSON(w *jwriter.Writer) { + if v.Defined { + w.Float64(v.V) + } else { + w.RawString("null") + } +} + +// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. +func (v *Float64) UnmarshalEasyJSON(l *jlexer.Lexer) { + if l.IsNull() { + l.Skip() + *v = Float64{} + } else { + v.V = l.Float64() + v.Defined = true + } +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Float64) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + v.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Float64) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{} + v.UnmarshalEasyJSON(&l) + return l.Error() +} + +// IsDefined returns whether the value is defined, a function is required so that it can +// be used in an interface. +func (v Float64) IsDefined() bool { + return v.Defined +} + +// String implements a stringer interface using fmt.Sprint for the value. +func (v Float64) String() string { + if !v.Defined { + return "" + } + return fmt.Sprint(v.V) +} diff --git a/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Int.go b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Int.go new file mode 100644 index 00000000..71e74e83 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Int.go @@ -0,0 +1,79 @@ +// generated by gotemplate + +package opt + +import ( + "fmt" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// template type Optional(A) + +// A 'gotemplate'-based type for providing optional semantics without using pointers. +type Int struct { + V int + Defined bool +} + +// Creates an optional type with a given value. +func OInt(v int) Int { + return Int{V: v, Defined: true} +} + +// Get returns the value or given default in the case the value is undefined. +func (v Int) Get(deflt int) int { + if !v.Defined { + return deflt + } + return v.V +} + +// MarshalEasyJSON does JSON marshaling using easyjson interface. +func (v Int) MarshalEasyJSON(w *jwriter.Writer) { + if v.Defined { + w.Int(v.V) + } else { + w.RawString("null") + } +} + +// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. +func (v *Int) UnmarshalEasyJSON(l *jlexer.Lexer) { + if l.IsNull() { + l.Skip() + *v = Int{} + } else { + v.V = l.Int() + v.Defined = true + } +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Int) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + v.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Int) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{} + v.UnmarshalEasyJSON(&l) + return l.Error() +} + +// IsDefined returns whether the value is defined, a function is required so that it can +// be used in an interface. +func (v Int) IsDefined() bool { + return v.Defined +} + +// String implements a stringer interface using fmt.Sprint for the value. +func (v Int) String() string { + if !v.Defined { + return "" + } + return fmt.Sprint(v.V) +} diff --git a/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Int16.go b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Int16.go new file mode 100644 index 00000000..987e3df6 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Int16.go @@ -0,0 +1,79 @@ +// generated by gotemplate + +package opt + +import ( + "fmt" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// template type Optional(A) + +// A 'gotemplate'-based type for providing optional semantics without using pointers. +type Int16 struct { + V int16 + Defined bool +} + +// Creates an optional type with a given value. +func OInt16(v int16) Int16 { + return Int16{V: v, Defined: true} +} + +// Get returns the value or given default in the case the value is undefined. +func (v Int16) Get(deflt int16) int16 { + if !v.Defined { + return deflt + } + return v.V +} + +// MarshalEasyJSON does JSON marshaling using easyjson interface. +func (v Int16) MarshalEasyJSON(w *jwriter.Writer) { + if v.Defined { + w.Int16(v.V) + } else { + w.RawString("null") + } +} + +// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. +func (v *Int16) UnmarshalEasyJSON(l *jlexer.Lexer) { + if l.IsNull() { + l.Skip() + *v = Int16{} + } else { + v.V = l.Int16() + v.Defined = true + } +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Int16) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + v.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Int16) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{} + v.UnmarshalEasyJSON(&l) + return l.Error() +} + +// IsDefined returns whether the value is defined, a function is required so that it can +// be used in an interface. +func (v Int16) IsDefined() bool { + return v.Defined +} + +// String implements a stringer interface using fmt.Sprint for the value. +func (v Int16) String() string { + if !v.Defined { + return "" + } + return fmt.Sprint(v.V) +} diff --git a/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Int32.go b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Int32.go new file mode 100644 index 00000000..e5f30d8c --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Int32.go @@ -0,0 +1,79 @@ +// generated by gotemplate + +package opt + +import ( + "fmt" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// template type Optional(A) + +// A 'gotemplate'-based type for providing optional semantics without using pointers. +type Int32 struct { + V int32 + Defined bool +} + +// Creates an optional type with a given value. +func OInt32(v int32) Int32 { + return Int32{V: v, Defined: true} +} + +// Get returns the value or given default in the case the value is undefined. +func (v Int32) Get(deflt int32) int32 { + if !v.Defined { + return deflt + } + return v.V +} + +// MarshalEasyJSON does JSON marshaling using easyjson interface. +func (v Int32) MarshalEasyJSON(w *jwriter.Writer) { + if v.Defined { + w.Int32(v.V) + } else { + w.RawString("null") + } +} + +// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. +func (v *Int32) UnmarshalEasyJSON(l *jlexer.Lexer) { + if l.IsNull() { + l.Skip() + *v = Int32{} + } else { + v.V = l.Int32() + v.Defined = true + } +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Int32) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + v.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Int32) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{} + v.UnmarshalEasyJSON(&l) + return l.Error() +} + +// IsDefined returns whether the value is defined, a function is required so that it can +// be used in an interface. +func (v Int32) IsDefined() bool { + return v.Defined +} + +// String implements a stringer interface using fmt.Sprint for the value. +func (v Int32) String() string { + if !v.Defined { + return "" + } + return fmt.Sprint(v.V) +} diff --git a/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Int64.go b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Int64.go new file mode 100644 index 00000000..ff67a335 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Int64.go @@ -0,0 +1,79 @@ +// generated by gotemplate + +package opt + +import ( + "fmt" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// template type Optional(A) + +// A 'gotemplate'-based type for providing optional semantics without using pointers. +type Int64 struct { + V int64 + Defined bool +} + +// Creates an optional type with a given value. +func OInt64(v int64) Int64 { + return Int64{V: v, Defined: true} +} + +// Get returns the value or given default in the case the value is undefined. +func (v Int64) Get(deflt int64) int64 { + if !v.Defined { + return deflt + } + return v.V +} + +// MarshalEasyJSON does JSON marshaling using easyjson interface. +func (v Int64) MarshalEasyJSON(w *jwriter.Writer) { + if v.Defined { + w.Int64(v.V) + } else { + w.RawString("null") + } +} + +// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. +func (v *Int64) UnmarshalEasyJSON(l *jlexer.Lexer) { + if l.IsNull() { + l.Skip() + *v = Int64{} + } else { + v.V = l.Int64() + v.Defined = true + } +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Int64) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + v.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Int64) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{} + v.UnmarshalEasyJSON(&l) + return l.Error() +} + +// IsDefined returns whether the value is defined, a function is required so that it can +// be used in an interface. +func (v Int64) IsDefined() bool { + return v.Defined +} + +// String implements a stringer interface using fmt.Sprint for the value. +func (v Int64) String() string { + if !v.Defined { + return "" + } + return fmt.Sprint(v.V) +} diff --git a/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Int8.go b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Int8.go new file mode 100644 index 00000000..41312d17 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Int8.go @@ -0,0 +1,79 @@ +// generated by gotemplate + +package opt + +import ( + "fmt" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// template type Optional(A) + +// A 'gotemplate'-based type for providing optional semantics without using pointers. +type Int8 struct { + V int8 + Defined bool +} + +// Creates an optional type with a given value. +func OInt8(v int8) Int8 { + return Int8{V: v, Defined: true} +} + +// Get returns the value or given default in the case the value is undefined. +func (v Int8) Get(deflt int8) int8 { + if !v.Defined { + return deflt + } + return v.V +} + +// MarshalEasyJSON does JSON marshaling using easyjson interface. +func (v Int8) MarshalEasyJSON(w *jwriter.Writer) { + if v.Defined { + w.Int8(v.V) + } else { + w.RawString("null") + } +} + +// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. +func (v *Int8) UnmarshalEasyJSON(l *jlexer.Lexer) { + if l.IsNull() { + l.Skip() + *v = Int8{} + } else { + v.V = l.Int8() + v.Defined = true + } +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Int8) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + v.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Int8) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{} + v.UnmarshalEasyJSON(&l) + return l.Error() +} + +// IsDefined returns whether the value is defined, a function is required so that it can +// be used in an interface. +func (v Int8) IsDefined() bool { + return v.Defined +} + +// String implements a stringer interface using fmt.Sprint for the value. +func (v Int8) String() string { + if !v.Defined { + return "" + } + return fmt.Sprint(v.V) +} diff --git a/src/vendor/github.com/mailru/easyjson/opt/gotemplate_String.go b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_String.go new file mode 100644 index 00000000..3d818fa3 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_String.go @@ -0,0 +1,79 @@ +// generated by gotemplate + +package opt + +import ( + "fmt" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// template type Optional(A) + +// A 'gotemplate'-based type for providing optional semantics without using pointers. +type String struct { + V string + Defined bool +} + +// Creates an optional type with a given value. +func OString(v string) String { + return String{V: v, Defined: true} +} + +// Get returns the value or given default in the case the value is undefined. +func (v String) Get(deflt string) string { + if !v.Defined { + return deflt + } + return v.V +} + +// MarshalEasyJSON does JSON marshaling using easyjson interface. +func (v String) MarshalEasyJSON(w *jwriter.Writer) { + if v.Defined { + w.String(v.V) + } else { + w.RawString("null") + } +} + +// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. +func (v *String) UnmarshalEasyJSON(l *jlexer.Lexer) { + if l.IsNull() { + l.Skip() + *v = String{} + } else { + v.V = l.String() + v.Defined = true + } +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *String) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + v.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *String) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{} + v.UnmarshalEasyJSON(&l) + return l.Error() +} + +// IsDefined returns whether the value is defined, a function is required so that it can +// be used in an interface. +func (v String) IsDefined() bool { + return v.Defined +} + +// String implements a stringer interface using fmt.Sprint for the value. +func (v String) String() string { + if !v.Defined { + return "" + } + return fmt.Sprint(v.V) +} diff --git a/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint.go b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint.go new file mode 100644 index 00000000..367db675 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint.go @@ -0,0 +1,79 @@ +// generated by gotemplate + +package opt + +import ( + "fmt" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// template type Optional(A) + +// A 'gotemplate'-based type for providing optional semantics without using pointers. +type Uint struct { + V uint + Defined bool +} + +// Creates an optional type with a given value. +func OUint(v uint) Uint { + return Uint{V: v, Defined: true} +} + +// Get returns the value or given default in the case the value is undefined. +func (v Uint) Get(deflt uint) uint { + if !v.Defined { + return deflt + } + return v.V +} + +// MarshalEasyJSON does JSON marshaling using easyjson interface. +func (v Uint) MarshalEasyJSON(w *jwriter.Writer) { + if v.Defined { + w.Uint(v.V) + } else { + w.RawString("null") + } +} + +// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. +func (v *Uint) UnmarshalEasyJSON(l *jlexer.Lexer) { + if l.IsNull() { + l.Skip() + *v = Uint{} + } else { + v.V = l.Uint() + v.Defined = true + } +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Uint) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + v.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Uint) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{} + v.UnmarshalEasyJSON(&l) + return l.Error() +} + +// IsDefined returns whether the value is defined, a function is required so that it can +// be used in an interface. +func (v Uint) IsDefined() bool { + return v.Defined +} + +// String implements a stringer interface using fmt.Sprint for the value. +func (v Uint) String() string { + if !v.Defined { + return "" + } + return fmt.Sprint(v.V) +} diff --git a/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint16.go b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint16.go new file mode 100644 index 00000000..6abc71dd --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint16.go @@ -0,0 +1,79 @@ +// generated by gotemplate + +package opt + +import ( + "fmt" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// template type Optional(A) + +// A 'gotemplate'-based type for providing optional semantics without using pointers. +type Uint16 struct { + V uint16 + Defined bool +} + +// Creates an optional type with a given value. +func OUint16(v uint16) Uint16 { + return Uint16{V: v, Defined: true} +} + +// Get returns the value or given default in the case the value is undefined. +func (v Uint16) Get(deflt uint16) uint16 { + if !v.Defined { + return deflt + } + return v.V +} + +// MarshalEasyJSON does JSON marshaling using easyjson interface. +func (v Uint16) MarshalEasyJSON(w *jwriter.Writer) { + if v.Defined { + w.Uint16(v.V) + } else { + w.RawString("null") + } +} + +// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. +func (v *Uint16) UnmarshalEasyJSON(l *jlexer.Lexer) { + if l.IsNull() { + l.Skip() + *v = Uint16{} + } else { + v.V = l.Uint16() + v.Defined = true + } +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Uint16) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + v.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Uint16) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{} + v.UnmarshalEasyJSON(&l) + return l.Error() +} + +// IsDefined returns whether the value is defined, a function is required so that it can +// be used in an interface. +func (v Uint16) IsDefined() bool { + return v.Defined +} + +// String implements a stringer interface using fmt.Sprint for the value. +func (v Uint16) String() string { + if !v.Defined { + return "" + } + return fmt.Sprint(v.V) +} diff --git a/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint32.go b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint32.go new file mode 100644 index 00000000..490945c2 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint32.go @@ -0,0 +1,79 @@ +// generated by gotemplate + +package opt + +import ( + "fmt" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// template type Optional(A) + +// A 'gotemplate'-based type for providing optional semantics without using pointers. +type Uint32 struct { + V uint32 + Defined bool +} + +// Creates an optional type with a given value. +func OUint32(v uint32) Uint32 { + return Uint32{V: v, Defined: true} +} + +// Get returns the value or given default in the case the value is undefined. +func (v Uint32) Get(deflt uint32) uint32 { + if !v.Defined { + return deflt + } + return v.V +} + +// MarshalEasyJSON does JSON marshaling using easyjson interface. +func (v Uint32) MarshalEasyJSON(w *jwriter.Writer) { + if v.Defined { + w.Uint32(v.V) + } else { + w.RawString("null") + } +} + +// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. +func (v *Uint32) UnmarshalEasyJSON(l *jlexer.Lexer) { + if l.IsNull() { + l.Skip() + *v = Uint32{} + } else { + v.V = l.Uint32() + v.Defined = true + } +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Uint32) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + v.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Uint32) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{} + v.UnmarshalEasyJSON(&l) + return l.Error() +} + +// IsDefined returns whether the value is defined, a function is required so that it can +// be used in an interface. +func (v Uint32) IsDefined() bool { + return v.Defined +} + +// String implements a stringer interface using fmt.Sprint for the value. +func (v Uint32) String() string { + if !v.Defined { + return "" + } + return fmt.Sprint(v.V) +} diff --git a/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint64.go b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint64.go new file mode 100644 index 00000000..37d2d418 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint64.go @@ -0,0 +1,79 @@ +// generated by gotemplate + +package opt + +import ( + "fmt" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// template type Optional(A) + +// A 'gotemplate'-based type for providing optional semantics without using pointers. +type Uint64 struct { + V uint64 + Defined bool +} + +// Creates an optional type with a given value. +func OUint64(v uint64) Uint64 { + return Uint64{V: v, Defined: true} +} + +// Get returns the value or given default in the case the value is undefined. +func (v Uint64) Get(deflt uint64) uint64 { + if !v.Defined { + return deflt + } + return v.V +} + +// MarshalEasyJSON does JSON marshaling using easyjson interface. +func (v Uint64) MarshalEasyJSON(w *jwriter.Writer) { + if v.Defined { + w.Uint64(v.V) + } else { + w.RawString("null") + } +} + +// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. +func (v *Uint64) UnmarshalEasyJSON(l *jlexer.Lexer) { + if l.IsNull() { + l.Skip() + *v = Uint64{} + } else { + v.V = l.Uint64() + v.Defined = true + } +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Uint64) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + v.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Uint64) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{} + v.UnmarshalEasyJSON(&l) + return l.Error() +} + +// IsDefined returns whether the value is defined, a function is required so that it can +// be used in an interface. +func (v Uint64) IsDefined() bool { + return v.Defined +} + +// String implements a stringer interface using fmt.Sprint for the value. +func (v Uint64) String() string { + if !v.Defined { + return "" + } + return fmt.Sprint(v.V) +} diff --git a/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint8.go b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint8.go new file mode 100644 index 00000000..55c4cdba --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/opt/gotemplate_Uint8.go @@ -0,0 +1,79 @@ +// generated by gotemplate + +package opt + +import ( + "fmt" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// template type Optional(A) + +// A 'gotemplate'-based type for providing optional semantics without using pointers. +type Uint8 struct { + V uint8 + Defined bool +} + +// Creates an optional type with a given value. +func OUint8(v uint8) Uint8 { + return Uint8{V: v, Defined: true} +} + +// Get returns the value or given default in the case the value is undefined. +func (v Uint8) Get(deflt uint8) uint8 { + if !v.Defined { + return deflt + } + return v.V +} + +// MarshalEasyJSON does JSON marshaling using easyjson interface. +func (v Uint8) MarshalEasyJSON(w *jwriter.Writer) { + if v.Defined { + w.Uint8(v.V) + } else { + w.RawString("null") + } +} + +// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. +func (v *Uint8) UnmarshalEasyJSON(l *jlexer.Lexer) { + if l.IsNull() { + l.Skip() + *v = Uint8{} + } else { + v.V = l.Uint8() + v.Defined = true + } +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Uint8) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + v.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Uint8) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{} + v.UnmarshalEasyJSON(&l) + return l.Error() +} + +// IsDefined returns whether the value is defined, a function is required so that it can +// be used in an interface. +func (v Uint8) IsDefined() bool { + return v.Defined +} + +// String implements a stringer interface using fmt.Sprint for the value. +func (v Uint8) String() string { + if !v.Defined { + return "" + } + return fmt.Sprint(v.V) +} diff --git a/src/vendor/github.com/mailru/easyjson/opt/optional/opt.go b/src/vendor/github.com/mailru/easyjson/opt/optional/opt.go new file mode 100644 index 00000000..6d3a0798 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/opt/optional/opt.go @@ -0,0 +1,80 @@ +// +build none + +package optional + +import ( + "fmt" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// template type Optional(A) +type A int + +// A 'gotemplate'-based type for providing optional semantics without using pointers. +type Optional struct { + V A + Defined bool +} + +// Creates an optional type with a given value. +func OOptional(v A) Optional { + return Optional{V: v, Defined: true} +} + +// Get returns the value or given default in the case the value is undefined. +func (v Optional) Get(deflt A) A { + if !v.Defined { + return deflt + } + return v.V +} + +// MarshalEasyJSON does JSON marshaling using easyjson interface. +func (v Optional) MarshalEasyJSON(w *jwriter.Writer) { + if v.Defined { + w.Optional(v.V) + } else { + w.RawString("null") + } +} + +// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. +func (v *Optional) UnmarshalEasyJSON(l *jlexer.Lexer) { + if l.IsNull() { + l.Skip() + *v = Optional{} + } else { + v.V = l.Optional() + v.Defined = true + } +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Optional) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + v.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalJSON implements a standard json marshaler interface. +func (v *Optional) UnmarshalJSON(data []byte) error { + l := jlexer.Lexer{} + v.UnmarshalEasyJSON(&l) + return l.Error() +} + +// IsDefined returns whether the value is defined, a function is required so that it can +// be used in an interface. +func (v Optional) IsDefined() bool { + return v.Defined +} + +// String implements a stringer interface using fmt.Sprint for the value. +func (v Optional) String() string { + if !v.Defined { + return "" + } + return fmt.Sprint(v.V) +} diff --git a/src/vendor/github.com/mailru/easyjson/opt/opts.go b/src/vendor/github.com/mailru/easyjson/opt/opts.go new file mode 100644 index 00000000..3617f7f9 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/opt/opts.go @@ -0,0 +1,22 @@ +package opt + +//go:generate sed -i "s/\\+build none/generated by gotemplate/" optional/opt.go +//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Int(int) +//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Uint(uint) + +//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Int8(int8) +//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Int16(int16) +//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Int32(int32) +//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Int64(int64) + +//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Uint8(uint8) +//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Uint16(uint16) +//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Uint32(uint32) +//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Uint64(uint64) + +//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Float32(float32) +//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Float64(float64) + +//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" Bool(bool) +//go:generate gotemplate "github.com/mailru/easyjson/opt/optional" String(string) +//go:generate sed -i "s/generated by gotemplate/+build none/" optional/opt.go diff --git a/src/vendor/github.com/mailru/easyjson/parser/parser.go b/src/vendor/github.com/mailru/easyjson/parser/parser.go new file mode 100644 index 00000000..1c0b94ca --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/parser/parser.go @@ -0,0 +1,91 @@ +package parser + +import ( + "go/ast" + "go/parser" + "go/token" + "strings" +) + +const structComment = "easyjson:json" + +type Parser struct { + PkgPath string + PkgName string + StructNames []string + AllStructs bool +} + +type visitor struct { + *Parser + + name string + explicit bool +} + +func (p *Parser) needType(comments string) bool { + for _, v := range strings.Split(comments, "\n") { + if strings.HasPrefix(v, structComment) { + return true + } + } + return false +} + +func (v *visitor) Visit(n ast.Node) (w ast.Visitor) { + switch n := n.(type) { + case *ast.Package: + return v + case *ast.File: + v.PkgName = n.Name.String() + return v + + case *ast.GenDecl: + v.explicit = v.needType(n.Doc.Text()) + + if !v.explicit && !v.AllStructs { + return nil + } + return v + case *ast.TypeSpec: + v.name = n.Name.String() + + // Allow to specify non-structs explicitly independent of '-all' flag. + if v.explicit { + v.StructNames = append(v.StructNames, v.name) + return nil + } + return v + case *ast.StructType: + v.StructNames = append(v.StructNames, v.name) + return nil + } + return nil +} + +func (p *Parser) Parse(fname string, isDir bool) error { + var err error + if p.PkgPath, err = getPkgPath(fname, isDir); err != nil { + return err + } + + fset := token.NewFileSet() + if isDir { + packages, err := parser.ParseDir(fset, fname, nil, parser.ParseComments) + if err != nil { + return err + } + + for _, pckg := range packages { + ast.Walk(&visitor{Parser: p}, pckg) + } + } else { + f, err := parser.ParseFile(fset, fname, nil, parser.ParseComments) + if err != nil { + return err + } + + ast.Walk(&visitor{Parser: p}, f) + } + return nil +} diff --git a/src/vendor/github.com/mailru/easyjson/parser/parser_unix.go b/src/vendor/github.com/mailru/easyjson/parser/parser_unix.go new file mode 100644 index 00000000..a1b9d84d --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/parser/parser_unix.go @@ -0,0 +1,33 @@ +// +build !windows + +package parser + +import ( + "fmt" + "os" + "path" + "strings" +) + +func getPkgPath(fname string, isDir bool) (string, error) { + if !path.IsAbs(fname) { + pwd, err := os.Getwd() + if err != nil { + return "", err + } + fname = path.Join(pwd, fname) + } + + for _, p := range strings.Split(os.Getenv("GOPATH"), ":") { + prefix := path.Join(p, "src") + "/" + if rel := strings.TrimPrefix(fname, prefix); rel != fname { + if !isDir { + return path.Dir(rel), nil + } else { + return path.Clean(rel), nil + } + } + } + + return "", fmt.Errorf("file '%v' is not in GOPATH", fname) +} diff --git a/src/vendor/github.com/mailru/easyjson/parser/parser_windows.go b/src/vendor/github.com/mailru/easyjson/parser/parser_windows.go new file mode 100644 index 00000000..ab520ad6 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/parser/parser_windows.go @@ -0,0 +1,37 @@ +package parser + +import ( + "fmt" + "os" + "path" + "strings" +) + +func normalizePath(path string) string { + return strings.Replace(path, "\\", "/", -1) +} + +func getPkgPath(fname string, isDir bool) (string, error) { + if !path.IsAbs(fname) { + pwd, err := os.Getwd() + if err != nil { + return "", err + } + fname = path.Join(pwd, fname) + } + + fname = normalizePath(fname) + + for _, p := range strings.Split(os.Getenv("GOPATH"), ";") { + prefix := path.Join(normalizePath(p), "src") + "/" + if rel := strings.TrimPrefix(fname, prefix); rel != fname { + if !isDir { + return path.Dir(rel), nil + } else { + return path.Clean(rel), nil + } + } + } + + return "", fmt.Errorf("file '%v' is not in GOPATH", fname) +} diff --git a/src/vendor/github.com/mailru/easyjson/raw.go b/src/vendor/github.com/mailru/easyjson/raw.go new file mode 100644 index 00000000..81bd002e --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/raw.go @@ -0,0 +1,45 @@ +package easyjson + +import ( + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// RawMessage is a raw piece of JSON (number, string, bool, object, array or +// null) that is extracted without parsing and output as is during marshaling. +type RawMessage []byte + +// MarshalEasyJSON does JSON marshaling using easyjson interface. +func (v *RawMessage) MarshalEasyJSON(w *jwriter.Writer) { + if len(*v) == 0 { + w.RawString("null") + } else { + w.Raw(*v, nil) + } +} + +// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. +func (v *RawMessage) UnmarshalEasyJSON(l *jlexer.Lexer) { + *v = RawMessage(l.Raw()) +} + +// UnmarshalJSON implements encoding/json.Unmarshaler interface. +func (v *RawMessage) UnmarshalJSON(data []byte) error { + *v = data + return nil +} + +var nullBytes = []byte("null") + +// MarshalJSON implements encoding/json.Marshaler interface. +func (v RawMessage) MarshalJSON() ([]byte, error) { + if len(v) == 0 { + return nullBytes, nil + } + return v, nil +} + +// IsDefined is required for integration with omitempty easyjson logic. +func (v *RawMessage) IsDefined() bool { + return len(*v) > 0 +} diff --git a/src/vendor/github.com/mailru/easyjson/tests/basic_test.go b/src/vendor/github.com/mailru/easyjson/tests/basic_test.go new file mode 100644 index 00000000..b727c9e2 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/tests/basic_test.go @@ -0,0 +1,222 @@ +package tests + +import ( + "reflect" + "testing" + + "encoding/json" + + "github.com/mailru/easyjson" + "github.com/mailru/easyjson/jwriter" +) + +type testType interface { + json.Marshaler + json.Unmarshaler +} + +var testCases = []struct { + Decoded testType + Encoded string +}{ + {&primitiveTypesValue, primitiveTypesString}, + {&namedPrimitiveTypesValue, namedPrimitiveTypesString}, + {&structsValue, structsString}, + {&omitEmptyValue, omitEmptyString}, + {&snakeStructValue, snakeStructString}, + {&omitEmptyDefaultValue, omitEmptyDefaultString}, + {&optsValue, optsString}, + {&rawValue, rawString}, + {&stdMarshalerValue, stdMarshalerString}, + {&userMarshalerValue, userMarshalerString}, + {&unexportedStructValue, unexportedStructString}, + {&excludedFieldValue, excludedFieldString}, + {&sliceValue, sliceString}, + {&arrayValue, arrayString}, + {&mapsValue, mapsString}, + {&deepNestValue, deepNestString}, + {&IntsValue, IntsString}, + {&mapStringStringValue, mapStringStringString}, + {&namedTypeValue, namedTypeValueString}, +} + +func TestMarshal(t *testing.T) { + for i, test := range testCases { + data, err := test.Decoded.MarshalJSON() + if err != nil { + t.Errorf("[%d, %T] MarshalJSON() error: %v", i, test.Decoded, err) + } + + got := string(data) + if got != test.Encoded { + t.Errorf("[%d, %T] MarshalJSON(): got \n%v\n\t\t want \n%v", i, test.Decoded, got, test.Encoded) + } + } +} + +func TestUnmarshal(t *testing.T) { + for i, test := range testCases { + v1 := reflect.New(reflect.TypeOf(test.Decoded).Elem()).Interface() + v := v1.(testType) + + err := v.UnmarshalJSON([]byte(test.Encoded)) + if err != nil { + t.Errorf("[%d, %T] UnmarshalJSON() error: %v", i, test.Decoded, err) + } + + if !reflect.DeepEqual(v, test.Decoded) { + t.Errorf("[%d, %T] UnmarshalJSON(): got \n%+v\n\t\t want \n%+v", i, test.Decoded, v, test.Decoded) + } + } +} + +func TestRawMessageSTD(t *testing.T) { + type T struct { + F easyjson.RawMessage + Fnil easyjson.RawMessage + } + + val := T{F: easyjson.RawMessage([]byte(`"test"`))} + str := `{"F":"test","Fnil":null}` + + data, err := json.Marshal(val) + if err != nil { + t.Errorf("json.Marshal() error: %v", err) + } + got := string(data) + if got != str { + t.Errorf("json.Marshal() = %v; want %v", got, str) + } + + wantV := T{F: easyjson.RawMessage([]byte(`"test"`)), Fnil: easyjson.RawMessage([]byte("null"))} + var gotV T + + err = json.Unmarshal([]byte(str), &gotV) + if err != nil { + t.Errorf("json.Unmarshal() error: %v", err) + } + if !reflect.DeepEqual(gotV, wantV) { + t.Errorf("json.Unmarshal() = %v; want %v", gotV, wantV) + } +} + +func TestParseNull(t *testing.T) { + var got, want SubStruct + if err := easyjson.Unmarshal([]byte("null"), &got); err != nil { + t.Errorf("Unmarshal() error: %v", err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("Unmarshal() = %+v; want %+v", got, want) + } +} + +var testSpecialCases = []struct { + EncodedString string + Value string +}{ + {`"Username \u003cuser@example.com\u003e"`, `Username `}, + {`"Username\ufffd"`, "Username\xc5"}, + {`"тестzтест"`, "тестzтест"}, + {`"тест\ufffdтест"`, "тест\xc5тест"}, + {`"绿茶"`, "绿茶"}, + {`"绿\ufffd茶"`, "绿\xc5茶"}, + {`"тест\u2028"`, "тест\xE2\x80\xA8"}, + {`"\\\r\n\t\""`, "\\\r\n\t\""}, + {`"ü"`, "ü"}, +} + +func TestSpecialCases(t *testing.T) { + for i, test := range testSpecialCases { + w := jwriter.Writer{} + w.String(test.Value) + got := string(w.Buffer.BuildBytes()) + if got != test.EncodedString { + t.Errorf("[%d] Encoded() = %+v; want %+v", i, got, test.EncodedString) + } + } +} + +func TestOverflowArray(t *testing.T) { + var a Arrays + err := easyjson.Unmarshal([]byte(arrayOverflowString), &a) + if err != nil { + t.Error(err) + } + if a != arrayValue { + t.Errorf("Unmarshal(%v) = %+v; want %+v", arrayOverflowString, a, arrayValue) + } +} + +func TestUnderflowArray(t *testing.T) { + var a Arrays + err := easyjson.Unmarshal([]byte(arrayUnderflowString), &a) + if err != nil { + t.Error(err) + } + if a != arrayUnderflowValue { + t.Errorf("Unmarshal(%v) = %+v; want %+v", arrayUnderflowString, a, arrayUnderflowValue) + } +} + +func TestEncodingFlags(t *testing.T) { + for i, test := range []struct { + Flags jwriter.Flags + In easyjson.Marshaler + Want string + }{ + {0, EncodingFlagsTestMap{}, `{"F":null}`}, + {0, EncodingFlagsTestSlice{}, `{"F":null}`}, + {jwriter.NilMapAsEmpty, EncodingFlagsTestMap{}, `{"F":{}}`}, + {jwriter.NilSliceAsEmpty, EncodingFlagsTestSlice{}, `{"F":[]}`}, + } { + w := &jwriter.Writer{Flags: test.Flags} + test.In.MarshalEasyJSON(w) + + data, err := w.BuildBytes() + if err != nil { + t.Errorf("[%v] easyjson.Marshal(%+v) error: %v", i, test.In, err) + } + + v := string(data) + if v != test.Want { + t.Errorf("[%v] easyjson.Marshal(%+v) = %v; want %v", i, test.In, v, test.Want) + } + } + +} + +func TestNestedEasyJsonMarshal(t *testing.T) { + n := map[string]*NestedEasyMarshaler{ + "Value": {}, + "Slice1": {}, + "Slice2": {}, + "Map1": {}, + "Map2": {}, + } + + ni := NestedInterfaces{ + Value: n["Value"], + Slice: []interface{}{n["Slice1"], n["Slice2"]}, + Map: map[string]interface{}{"1": n["Map1"], "2": n["Map2"]}, + } + easyjson.Marshal(ni) + + for k, v := range n { + if !v.EasilyMarshaled { + t.Errorf("Nested interface %s wasn't easily marshaled", k) + } + } +} + +func TestUnmarshalStructWithEmbeddedPtrStruct(t *testing.T) { + var s = StructWithInterface{Field2: &EmbeddedStruct{}} + var err error + err = easyjson.Unmarshal([]byte(structWithInterfaceString), &s) + if err != nil { + t.Errorf("easyjson.Unmarshal() error: %v", err) + } + if !reflect.DeepEqual(s, structWithInterfaceValueFilled) { + t.Errorf("easyjson.Unmarshal() = %#v; want %#v", s, structWithInterfaceValueFilled) + } +} diff --git a/src/vendor/github.com/mailru/easyjson/tests/data.go b/src/vendor/github.com/mailru/easyjson/tests/data.go new file mode 100644 index 00000000..ca8676ee --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/tests/data.go @@ -0,0 +1,694 @@ +package tests + +import ( + "fmt" + "math" + "net" + "time" + + "github.com/mailru/easyjson" + "github.com/mailru/easyjson/opt" +) + +type PrimitiveTypes struct { + String string + Bool bool + + Int int + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + + Uint uint + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + + IntString int `json:",string"` + Int8String int8 `json:",string"` + Int16String int16 `json:",string"` + Int32String int32 `json:",string"` + Int64String int64 `json:",string"` + + UintString uint `json:",string"` + Uint8String uint8 `json:",string"` + Uint16String uint16 `json:",string"` + Uint32String uint32 `json:",string"` + Uint64String uint64 `json:",string"` + + Float32 float32 + Float64 float64 + + Ptr *string + PtrNil *string +} + +var str = "bla" + +var primitiveTypesValue = PrimitiveTypes{ + String: "test", Bool: true, + + Int: math.MinInt32, + Int8: math.MinInt8, + Int16: math.MinInt16, + Int32: math.MinInt32, + Int64: math.MinInt64, + + Uint: math.MaxUint32, + Uint8: math.MaxUint8, + Uint16: math.MaxUint16, + Uint32: math.MaxUint32, + Uint64: math.MaxUint64, + + IntString: math.MinInt32, + Int8String: math.MinInt8, + Int16String: math.MinInt16, + Int32String: math.MinInt32, + Int64String: math.MinInt64, + + UintString: math.MaxUint32, + Uint8String: math.MaxUint8, + Uint16String: math.MaxUint16, + Uint32String: math.MaxUint32, + Uint64String: math.MaxUint64, + + Float32: 1.5, + Float64: math.MaxFloat64, + + Ptr: &str, +} + +var primitiveTypesString = "{" + + `"String":"test","Bool":true,` + + + `"Int":` + fmt.Sprint(math.MinInt32) + `,` + + `"Int8":` + fmt.Sprint(math.MinInt8) + `,` + + `"Int16":` + fmt.Sprint(math.MinInt16) + `,` + + `"Int32":` + fmt.Sprint(math.MinInt32) + `,` + + `"Int64":` + fmt.Sprint(int64(math.MinInt64)) + `,` + + + `"Uint":` + fmt.Sprint(math.MaxUint32) + `,` + + `"Uint8":` + fmt.Sprint(math.MaxUint8) + `,` + + `"Uint16":` + fmt.Sprint(math.MaxUint16) + `,` + + `"Uint32":` + fmt.Sprint(math.MaxUint32) + `,` + + `"Uint64":` + fmt.Sprint(uint64(math.MaxUint64)) + `,` + + + `"IntString":"` + fmt.Sprint(math.MinInt32) + `",` + + `"Int8String":"` + fmt.Sprint(math.MinInt8) + `",` + + `"Int16String":"` + fmt.Sprint(math.MinInt16) + `",` + + `"Int32String":"` + fmt.Sprint(math.MinInt32) + `",` + + `"Int64String":"` + fmt.Sprint(int64(math.MinInt64)) + `",` + + + `"UintString":"` + fmt.Sprint(math.MaxUint32) + `",` + + `"Uint8String":"` + fmt.Sprint(math.MaxUint8) + `",` + + `"Uint16String":"` + fmt.Sprint(math.MaxUint16) + `",` + + `"Uint32String":"` + fmt.Sprint(math.MaxUint32) + `",` + + `"Uint64String":"` + fmt.Sprint(uint64(math.MaxUint64)) + `",` + + + `"Float32":` + fmt.Sprint(1.5) + `,` + + `"Float64":` + fmt.Sprint(math.MaxFloat64) + `,` + + + `"Ptr":"bla",` + + `"PtrNil":null` + + + "}" + +type ( + NamedString string + NamedBool bool + + NamedInt int + NamedInt8 int8 + NamedInt16 int16 + NamedInt32 int32 + NamedInt64 int64 + + NamedUint uint + NamedUint8 uint8 + NamedUint16 uint16 + NamedUint32 uint32 + NamedUint64 uint64 + + NamedFloat32 float32 + NamedFloat64 float64 + + NamedStrPtr *string +) + +type NamedPrimitiveTypes struct { + String NamedString + Bool NamedBool + + Int NamedInt + Int8 NamedInt8 + Int16 NamedInt16 + Int32 NamedInt32 + Int64 NamedInt64 + + Uint NamedUint + Uint8 NamedUint8 + Uint16 NamedUint16 + Uint32 NamedUint32 + Uint64 NamedUint64 + + Float32 NamedFloat32 + Float64 NamedFloat64 + + Ptr NamedStrPtr + PtrNil NamedStrPtr +} + +var namedPrimitiveTypesValue = NamedPrimitiveTypes{ + String: "test", + Bool: true, + + Int: math.MinInt32, + Int8: math.MinInt8, + Int16: math.MinInt16, + Int32: math.MinInt32, + Int64: math.MinInt64, + + Uint: math.MaxUint32, + Uint8: math.MaxUint8, + Uint16: math.MaxUint16, + Uint32: math.MaxUint32, + Uint64: math.MaxUint64, + + Float32: 1.5, + Float64: math.MaxFloat64, + + Ptr: NamedStrPtr(&str), +} + +var namedPrimitiveTypesString = "{" + + `"String":"test",` + + `"Bool":true,` + + + `"Int":` + fmt.Sprint(math.MinInt32) + `,` + + `"Int8":` + fmt.Sprint(math.MinInt8) + `,` + + `"Int16":` + fmt.Sprint(math.MinInt16) + `,` + + `"Int32":` + fmt.Sprint(math.MinInt32) + `,` + + `"Int64":` + fmt.Sprint(int64(math.MinInt64)) + `,` + + + `"Uint":` + fmt.Sprint(math.MaxUint32) + `,` + + `"Uint8":` + fmt.Sprint(math.MaxUint8) + `,` + + `"Uint16":` + fmt.Sprint(math.MaxUint16) + `,` + + `"Uint32":` + fmt.Sprint(math.MaxUint32) + `,` + + `"Uint64":` + fmt.Sprint(uint64(math.MaxUint64)) + `,` + + + `"Float32":` + fmt.Sprint(1.5) + `,` + + `"Float64":` + fmt.Sprint(math.MaxFloat64) + `,` + + + `"Ptr":"bla",` + + `"PtrNil":null` + + "}" + +type SubStruct struct { + Value string + Value2 string + unexpored bool +} + +type SubP struct { + V string +} + +type SubStructAlias SubStruct + +type Structs struct { + SubStruct + *SubP + + Value2 int + + Sub1 SubStruct `json:"substruct"` + Sub2 *SubStruct + SubNil *SubStruct + + SubSlice []SubStruct + SubSliceNil []SubStruct + + SubPtrSlice []*SubStruct + SubPtrSliceNil []*SubStruct + + SubA1 SubStructAlias + SubA2 *SubStructAlias + + Anonymous struct { + V string + I int + } + Anonymous1 *struct { + V string + } + + AnonymousSlice []struct{ V int } + AnonymousPtrSlice []*struct{ V int } + + Slice []string + + unexported bool +} + +var structsValue = Structs{ + SubStruct: SubStruct{Value: "test"}, + SubP: &SubP{V: "subp"}, + + Value2: 5, + + Sub1: SubStruct{Value: "test1", Value2: "v"}, + Sub2: &SubStruct{Value: "test2", Value2: "v2"}, + + SubSlice: []SubStruct{ + {Value: "s1"}, + {Value: "s2"}, + }, + + SubPtrSlice: []*SubStruct{ + {Value: "p1"}, + {Value: "p2"}, + }, + + SubA1: SubStructAlias{Value: "test3", Value2: "v3"}, + SubA2: &SubStructAlias{Value: "test4", Value2: "v4"}, + + Anonymous: struct { + V string + I int + }{V: "bla", I: 5}, + + Anonymous1: &struct { + V string + }{V: "bla1"}, + + AnonymousSlice: []struct{ V int }{{1}, {2}}, + AnonymousPtrSlice: []*struct{ V int }{{3}, {4}}, + + Slice: []string{"test5", "test6"}, +} + +var structsString = "{" + + `"Value2":5,` + + + `"substruct":{"Value":"test1","Value2":"v"},` + + `"Sub2":{"Value":"test2","Value2":"v2"},` + + `"SubNil":null,` + + + `"SubSlice":[{"Value":"s1","Value2":""},{"Value":"s2","Value2":""}],` + + `"SubSliceNil":null,` + + + `"SubPtrSlice":[{"Value":"p1","Value2":""},{"Value":"p2","Value2":""}],` + + `"SubPtrSliceNil":null,` + + + `"SubA1":{"Value":"test3","Value2":"v3"},` + + `"SubA2":{"Value":"test4","Value2":"v4"},` + + + `"Anonymous":{"V":"bla","I":5},` + + `"Anonymous1":{"V":"bla1"},` + + + `"AnonymousSlice":[{"V":1},{"V":2}],` + + `"AnonymousPtrSlice":[{"V":3},{"V":4}],` + + + `"Slice":["test5","test6"],` + + + // Embedded fields go last. + `"V":"subp",` + + `"Value":"test"` + + "}" + +type OmitEmpty struct { + // NOTE: first field is empty to test comma printing. + + StrE, StrNE string `json:",omitempty"` + PtrE, PtrNE *string `json:",omitempty"` + + IntNE int `json:"intField,omitempty"` + IntE int `json:",omitempty"` + + // NOTE: omitempty has no effect on non-pointer struct fields. + SubE, SubNE SubStruct `json:",omitempty"` + SubPE, SubPNE *SubStruct `json:",omitempty"` +} + +var omitEmptyValue = OmitEmpty{ + StrNE: "str", + PtrNE: &str, + IntNE: 6, + SubNE: SubStruct{Value: "1", Value2: "2"}, + SubPNE: &SubStruct{Value: "3", Value2: "4"}, +} + +var omitEmptyString = "{" + + `"StrNE":"str",` + + `"PtrNE":"bla",` + + `"intField":6,` + + `"SubE":{"Value":"","Value2":""},` + + `"SubNE":{"Value":"1","Value2":"2"},` + + `"SubPNE":{"Value":"3","Value2":"4"}` + + "}" + +type Opts struct { + StrNull opt.String + StrEmpty opt.String + Str opt.String + StrOmitempty opt.String `json:",omitempty"` + + IntNull opt.Int + IntZero opt.Int + Int opt.Int +} + +var optsValue = Opts{ + StrEmpty: opt.OString(""), + Str: opt.OString("test"), + + IntZero: opt.OInt(0), + Int: opt.OInt(5), +} + +var optsString = `{` + + `"StrNull":null,` + + `"StrEmpty":"",` + + `"Str":"test",` + + `"IntNull":null,` + + `"IntZero":0,` + + `"Int":5` + + `}` + +type Raw struct { + Field easyjson.RawMessage + Field2 string +} + +var rawValue = Raw{ + Field: []byte(`{"a" : "b"}`), + Field2: "test", +} + +var rawString = `{` + + `"Field":{"a" : "b"},` + + `"Field2":"test"` + + `}` + +type StdMarshaler struct { + T time.Time + IP net.IP +} + +var stdMarshalerValue = StdMarshaler{ + T: time.Date(2016, 01, 02, 14, 15, 10, 0, time.UTC), + IP: net.IPv4(192, 168, 0, 1), +} +var stdMarshalerString = `{` + + `"T":"2016-01-02T14:15:10Z",` + + `"IP":"192.168.0.1"` + + `}` + +type UserMarshaler struct { + V vMarshaler + T tMarshaler +} + +type vMarshaler net.IP + +func (v vMarshaler) MarshalJSON() ([]byte, error) { + return []byte(`"0::0"`), nil +} + +func (v *vMarshaler) UnmarshalJSON([]byte) error { + *v = vMarshaler(net.IPv6zero) + return nil +} + +type tMarshaler net.IP + +func (v tMarshaler) MarshalText() ([]byte, error) { + return []byte(`[0::0]`), nil +} + +func (v *tMarshaler) UnmarshalText([]byte) error { + *v = tMarshaler(net.IPv6zero) + return nil +} + +var userMarshalerValue = UserMarshaler{ + V: vMarshaler(net.IPv6zero), + T: tMarshaler(net.IPv6zero), +} +var userMarshalerString = `{` + + `"V":"0::0",` + + `"T":"[0::0]"` + + `}` + +type unexportedStruct struct { + Value string +} + +var unexportedStructValue = unexportedStruct{"test"} +var unexportedStructString = `{"Value":"test"}` + +type ExcludedField struct { + Process bool `json:"process"` + DoNotProcess bool `json:"-"` + DoNotProcess1 bool `json:"-"` +} + +var excludedFieldValue = ExcludedField{ + Process: true, + DoNotProcess: false, + DoNotProcess1: false, +} +var excludedFieldString = `{"process":true}` + +type Slices struct { + ByteSlice []byte + EmptyByteSlice []byte + NilByteSlice []byte + IntSlice []int + EmptyIntSlice []int + NilIntSlice []int +} + +var sliceValue = Slices{ + ByteSlice: []byte("abc"), + EmptyByteSlice: []byte{}, + NilByteSlice: []byte(nil), + IntSlice: []int{1, 2, 3, 4, 5}, + EmptyIntSlice: []int{}, + NilIntSlice: []int(nil), +} + +var sliceString = `{` + + `"ByteSlice":"YWJj",` + + `"EmptyByteSlice":"",` + + `"NilByteSlice":null,` + + `"IntSlice":[1,2,3,4,5],` + + `"EmptyIntSlice":[],` + + `"NilIntSlice":null` + + `}` + +type Arrays struct { + ByteArray [3]byte + EmptyByteArray [0]byte + IntArray [5]int + EmptyIntArray [0]int +} + +var arrayValue = Arrays{ + ByteArray: [3]byte{'a', 'b', 'c'}, + EmptyByteArray: [0]byte{}, + IntArray: [5]int{1, 2, 3, 4, 5}, + EmptyIntArray: [0]int{}, +} + +var arrayString = `{` + + `"ByteArray":"YWJj",` + + `"EmptyByteArray":"",` + + `"IntArray":[1,2,3,4,5],` + + `"EmptyIntArray":[]` + + `}` + +var arrayOverflowString = `{` + + `"ByteArray":"YWJjbnNk",` + + `"EmptyByteArray":"YWJj",` + + `"IntArray":[1,2,3,4,5,6],` + + `"EmptyIntArray":[7,8]` + + `}` + +var arrayUnderflowValue = Arrays{ + ByteArray: [3]byte{'x', 0, 0}, + EmptyByteArray: [0]byte{}, + IntArray: [5]int{1, 2, 0, 0, 0}, + EmptyIntArray: [0]int{}, +} + +var arrayUnderflowString = `{` + + `"ByteArray":"eA==",` + + `"IntArray":[1,2]` + + `}` + +type Str string + +type Maps struct { + Map map[string]string + InterfaceMap map[string]interface{} + NilMap map[string]string + + CustomMap map[Str]Str +} + +var mapsValue = Maps{ + Map: map[string]string{"A": "b"}, // only one item since map iteration is randomized + InterfaceMap: map[string]interface{}{"G": float64(1)}, + + CustomMap: map[Str]Str{"c": "d"}, +} + +var mapsString = `{` + + `"Map":{"A":"b"},` + + `"InterfaceMap":{"G":1},` + + `"NilMap":null,` + + `"CustomMap":{"c":"d"}` + + `}` + +type NamedSlice []Str +type NamedMap map[Str]Str + +type DeepNest struct { + SliceMap map[Str][]Str + SliceMap1 map[Str][]Str + SliceMap2 map[Str][]Str + NamedSliceMap map[Str]NamedSlice + NamedMapMap map[Str]NamedMap + MapSlice []map[Str]Str + NamedSliceSlice []NamedSlice + NamedMapSlice []NamedMap + NamedStringSlice []NamedString +} + +var deepNestValue = DeepNest{ + SliceMap: map[Str][]Str{ + "testSliceMap": []Str{ + "0", + "1", + }, + }, + SliceMap1: map[Str][]Str{ + "testSliceMap1": []Str(nil), + }, + SliceMap2: map[Str][]Str{ + "testSliceMap2": []Str{}, + }, + NamedSliceMap: map[Str]NamedSlice{ + "testNamedSliceMap": NamedSlice{ + "2", + "3", + }, + }, + NamedMapMap: map[Str]NamedMap{ + "testNamedMapMap": NamedMap{ + "key1": "value1", + }, + }, + MapSlice: []map[Str]Str{ + map[Str]Str{ + "testMapSlice": "someValue", + }, + }, + NamedSliceSlice: []NamedSlice{ + NamedSlice{ + "someValue1", + "someValue2", + }, + NamedSlice{ + "someValue3", + "someValue4", + }, + }, + NamedMapSlice: []NamedMap{ + NamedMap{ + "key2": "value2", + }, + NamedMap{ + "key3": "value3", + }, + }, + NamedStringSlice: []NamedString{ + "value4", "value5", + }, +} + +var deepNestString = `{` + + `"SliceMap":{` + + `"testSliceMap":["0","1"]` + + `},` + + `"SliceMap1":{` + + `"testSliceMap1":null` + + `},` + + `"SliceMap2":{` + + `"testSliceMap2":[]` + + `},` + + `"NamedSliceMap":{` + + `"testNamedSliceMap":["2","3"]` + + `},` + + `"NamedMapMap":{` + + `"testNamedMapMap":{"key1":"value1"}` + + `},` + + `"MapSlice":[` + + `{"testMapSlice":"someValue"}` + + `],` + + `"NamedSliceSlice":[` + + `["someValue1","someValue2"],` + + `["someValue3","someValue4"]` + + `],` + + `"NamedMapSlice":[` + + `{"key2":"value2"},` + + `{"key3":"value3"}` + + `],` + + `"NamedStringSlice":["value4","value5"]` + + `}` + +//easyjson:json +type Ints []int + +var IntsValue = Ints{1, 2, 3, 4, 5} + +var IntsString = `[1,2,3,4,5]` + +//easyjson:json +type MapStringString map[string]string + +var mapStringStringValue = MapStringString{"a": "b"} + +var mapStringStringString = `{"a":"b"}` + +type RequiredOptionalStruct struct { + FirstName string `json:"first_name,required"` + Lastname string `json:"last_name"` +} + +//easyjson:json +type EncodingFlagsTestMap struct { + F map[string]string +} + +//easyjson:json +type EncodingFlagsTestSlice struct { + F []string +} + +type StructWithInterface struct { + Field1 int `json:"f1"` + Field2 interface{} `json:"f2"` + Field3 string `json:"f3"` +} + +type EmbeddedStruct struct { + Field1 int `json:"f1"` + Field2 string `json:"f2"` +} + +var structWithInterfaceString = `{"f1":1,"f2":{"f1":11,"f2":"22"},"f3":"3"}` +var structWithInterfaceValueFilled = StructWithInterface{1, &EmbeddedStruct{11, "22"}, "3"} diff --git a/src/vendor/github.com/mailru/easyjson/tests/errors.go b/src/vendor/github.com/mailru/easyjson/tests/errors.go new file mode 100644 index 00000000..2ec3299a --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/tests/errors.go @@ -0,0 +1,23 @@ +package tests + +//easyjson:json +type ErrorIntSlice []int + +//easyjson:json +type ErrorBoolSlice []bool + +//easyjson:json +type ErrorUintSlice []uint + +//easyjson:json +type ErrorStruct struct { + Int int `json:"int"` + String string `json:"string"` + Slice []int `json:"slice"` + IntSlice []int `json:"int_slice"` +} + +type ErrorNestedStruct struct { + ErrorStruct ErrorStruct `json:"error_struct"` + Int int `json:"int"` +} diff --git a/src/vendor/github.com/mailru/easyjson/tests/errors_test.go b/src/vendor/github.com/mailru/easyjson/tests/errors_test.go new file mode 100644 index 00000000..756f7db6 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/tests/errors_test.go @@ -0,0 +1,243 @@ +package tests + +import ( + "testing" + + "github.com/mailru/easyjson/jlexer" +) + +func TestMultipleErrorsInt(t *testing.T) { + for i, test := range []struct { + Data []byte + Offsets []int + }{ + { + Data: []byte(`[1, 2, 3, "4", "5"]`), + Offsets: []int{10, 15}, + }, + { + Data: []byte(`[1, {"2":"3"}, 3, "4"]`), + Offsets: []int{4, 18}, + }, + { + Data: []byte(`[1, "2", "3", "4", "5", "6"]`), + Offsets: []int{4, 9, 14, 19, 24}, + }, + { + Data: []byte(`[1, 2, 3, 4, "5"]`), + Offsets: []int{13}, + }, + { + Data: []byte(`[{"1": "2"}]`), + Offsets: []int{1}, + }, + } { + l := jlexer.Lexer{ + Data: test.Data, + UseMultipleErrors: true, + } + + var v ErrorIntSlice + + v.UnmarshalEasyJSON(&l) + + errors := l.GetNonFatalErrors() + + if len(errors) != len(test.Offsets) { + t.Errorf("[%d] TestMultipleErrorsInt(): errornum: want: %d, got %d", i, len(test.Offsets), len(errors)) + return + } + + for ii, e := range errors { + if e.Offset != test.Offsets[ii] { + t.Errorf("[%d] TestMultipleErrorsInt(): offset[%d]: want %d, got %d", i, ii, test.Offsets[ii], e.Offset) + } + } + } +} + +func TestMultipleErrorsBool(t *testing.T) { + for i, test := range []struct { + Data []byte + Offsets []int + }{ + { + Data: []byte(`[true, false, true, false]`), + }, + { + Data: []byte(`["test", "value", "lol", "1"]`), + Offsets: []int{1, 9, 18, 25}, + }, + { + Data: []byte(`[true, 42, {"a":"b", "c":"d"}, false]`), + Offsets: []int{7, 11}, + }, + } { + l := jlexer.Lexer{ + Data: test.Data, + UseMultipleErrors: true, + } + + var v ErrorBoolSlice + v.UnmarshalEasyJSON(&l) + + errors := l.GetNonFatalErrors() + + if len(errors) != len(test.Offsets) { + t.Errorf("[%d] TestMultipleErrorsBool(): errornum: want: %d, got %d", i, len(test.Offsets), len(errors)) + return + } + for ii, e := range errors { + if e.Offset != test.Offsets[ii] { + t.Errorf("[%d] TestMultipleErrorsBool(): offset[%d]: want %d, got %d", i, ii, test.Offsets[ii], e.Offset) + } + } + } +} + +func TestMultipleErrorsUint(t *testing.T) { + for i, test := range []struct { + Data []byte + Offsets []int + }{ + { + Data: []byte(`[42, 42, 42]`), + }, + { + Data: []byte(`[17, "42", 32]`), + Offsets: []int{5}, + }, + { + Data: []byte(`["zz", "zz"]`), + Offsets: []int{1, 7}, + }, + { + Data: []byte(`[{}, 42]`), + Offsets: []int{1}, + }, + } { + l := jlexer.Lexer{ + Data: test.Data, + UseMultipleErrors: true, + } + + var v ErrorUintSlice + v.UnmarshalEasyJSON(&l) + + errors := l.GetNonFatalErrors() + + if len(errors) != len(test.Offsets) { + t.Errorf("[%d] TestMultipleErrorsUint(): errornum: want: %d, got %d", i, len(test.Offsets), len(errors)) + return + } + for ii, e := range errors { + if e.Offset != test.Offsets[ii] { + t.Errorf("[%d] TestMultipleErrorsUint(): offset[%d]: want %d, got %d", i, ii, test.Offsets[ii], e.Offset) + } + } + } +} + +func TestMultipleErrorsStruct(t *testing.T) { + for i, test := range []struct { + Data []byte + Offsets []int + }{ + { + Data: []byte(`{"string": "test", "slice":[42, 42, 42], "int_slice":[1, 2, 3]}`), + }, + { + Data: []byte(`{"string": {"test": "test"}, "slice":[42, 42, 42], "int_slice":["1", 2, 3]}`), + Offsets: []int{11, 64}, + }, + { + Data: []byte(`{"slice": [42, 42], "string": {"test": "test"}, "int_slice":["1", "2", 3]}`), + Offsets: []int{30, 61, 66}, + }, + { + Data: []byte(`{"string": "test", "slice": {}}`), + Offsets: []int{28}, + }, + { + Data: []byte(`{"slice":5, "string" : "test"}`), + Offsets: []int{9}, + }, + { + Data: []byte(`{"slice" : "test", "string" : "test"}`), + Offsets: []int{11}, + }, + { + Data: []byte(`{"slice": "", "string" : {}, "int":{}}`), + Offsets: []int{10, 25, 35}, + }, + } { + l := jlexer.Lexer{ + Data: test.Data, + UseMultipleErrors: true, + } + var v ErrorStruct + v.UnmarshalEasyJSON(&l) + + errors := l.GetNonFatalErrors() + + if len(errors) != len(test.Offsets) { + t.Errorf("[%d] TestMultipleErrorsStruct(): errornum: want: %d, got %d", i, len(test.Offsets), len(errors)) + return + } + for ii, e := range errors { + if e.Offset != test.Offsets[ii] { + t.Errorf("[%d] TestMultipleErrorsStruct(): offset[%d]: want %d, got %d", i, ii, test.Offsets[ii], e.Offset) + } + } + } +} + +func TestMultipleErrorsNestedStruct(t *testing.T) { + for i, test := range []struct { + Data []byte + Offsets []int + }{ + { + Data: []byte(`{"error_struct":{}}`), + }, + { + Data: []byte(`{"error_struct":5}`), + Offsets: []int{16}, + }, + { + Data: []byte(`{"error_struct":[]}`), + Offsets: []int{16}, + }, + { + Data: []byte(`{"error_struct":{"int":{}}}`), + Offsets: []int{23}, + }, + { + Data: []byte(`{"error_struct":{"int_slice":{}}, "int":4}`), + Offsets: []int{29}, + }, + { + Data: []byte(`{"error_struct":{"int_slice":["1", 2, "3"]}, "int":[]}`), + Offsets: []int{30, 38, 51}, + }, + } { + l := jlexer.Lexer{ + Data: test.Data, + UseMultipleErrors: true, + } + var v ErrorNestedStruct + v.UnmarshalEasyJSON(&l) + + errors := l.GetNonFatalErrors() + + if len(errors) != len(test.Offsets) { + t.Errorf("[%d] TestMultipleErrorsNestedStruct(): errornum: want: %d, got %d", i, len(test.Offsets), len(errors)) + return + } + for ii, e := range errors { + if e.Offset != test.Offsets[ii] { + t.Errorf("[%d] TestMultipleErrorsNestedStruct(): offset[%d]: want %d, got %d", i, ii, test.Offsets[ii], e.Offset) + } + } + } +} diff --git a/src/vendor/github.com/mailru/easyjson/tests/named_type.go b/src/vendor/github.com/mailru/easyjson/tests/named_type.go new file mode 100644 index 00000000..0ff8dfeb --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/tests/named_type.go @@ -0,0 +1,22 @@ +package tests + +//easyjson:json +type NamedType struct { + Inner struct { + // easyjson is mistakenly naming the type of this field 'tests.MyString' in the generated output + // something about a named type inside an anonmymous type is triggering this bug + Field MyString `tag:"value"` + Field2 int "tag:\"value with ` in it\"" + } +} + +type MyString string + +var namedTypeValue NamedType + +func init() { + namedTypeValue.Inner.Field = "test" + namedTypeValue.Inner.Field2 = 123 +} + +var namedTypeValueString = `{"Inner":{"Field":"test","Field2":123}}` diff --git a/src/vendor/github.com/mailru/easyjson/tests/nested_easy.go b/src/vendor/github.com/mailru/easyjson/tests/nested_easy.go new file mode 100644 index 00000000..35d77043 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/tests/nested_easy.go @@ -0,0 +1,25 @@ +package tests + +import ( + "github.com/mailru/easyjson" + "github.com/mailru/easyjson/jwriter" +) + +//easyjson:json +type NestedInterfaces struct { + Value interface{} + Slice []interface{} + Map map[string]interface{} +} + +type NestedEasyMarshaler struct { + EasilyMarshaled bool +} + +var _ easyjson.Marshaler = &NestedEasyMarshaler{} + +func (i *NestedEasyMarshaler) MarshalEasyJSON(w *jwriter.Writer) { + // We use this method only to indicate that easyjson.Marshaler + // interface was really used while encoding. + i.EasilyMarshaled = true +} diff --git a/src/vendor/github.com/mailru/easyjson/tests/nothing.go b/src/vendor/github.com/mailru/easyjson/tests/nothing.go new file mode 100644 index 00000000..35334f5f --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/tests/nothing.go @@ -0,0 +1,3 @@ +package tests + +// No structs in this file diff --git a/src/vendor/github.com/mailru/easyjson/tests/omitempty.go b/src/vendor/github.com/mailru/easyjson/tests/omitempty.go new file mode 100644 index 00000000..ede5eb95 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/tests/omitempty.go @@ -0,0 +1,12 @@ +package tests + +//easyjson:json +type OmitEmptyDefault struct { + Field string + Str string + Str1 string `json:"s,!omitempty"` + Str2 string `json:",!omitempty"` +} + +var omitEmptyDefaultValue = OmitEmptyDefault{Field: "test"} +var omitEmptyDefaultString = `{"Field":"test","s":"","Str2":""}` diff --git a/src/vendor/github.com/mailru/easyjson/tests/required_test.go b/src/vendor/github.com/mailru/easyjson/tests/required_test.go new file mode 100644 index 00000000..8cc743d8 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/tests/required_test.go @@ -0,0 +1,28 @@ +package tests + +import ( + "fmt" + "testing" +) + +func TestRequiredField(t *testing.T) { + cases := []struct{ json, errorMessage string }{ + {`{"first_name":"Foo", "last_name": "Bar"}`, ""}, + {`{"last_name":"Bar"}`, "key 'first_name' is required"}, + {"{}", "key 'first_name' is required"}, + } + + for _, tc := range cases { + var v RequiredOptionalStruct + err := v.UnmarshalJSON([]byte(tc.json)) + if tc.errorMessage == "" { + if err != nil { + t.Errorf("%s. UnmarshalJSON didn`t expect error: %v", tc.json, err) + } + } else { + if fmt.Sprintf("%v", err) != tc.errorMessage { + t.Errorf("%s. UnmarshalJSON expected error: %v. got: %v", tc.json, tc.errorMessage, err) + } + } + } +} diff --git a/src/vendor/github.com/mailru/easyjson/tests/snake.go b/src/vendor/github.com/mailru/easyjson/tests/snake.go new file mode 100644 index 00000000..9b64f861 --- /dev/null +++ b/src/vendor/github.com/mailru/easyjson/tests/snake.go @@ -0,0 +1,10 @@ +package tests + +//easyjson:json +type SnakeStruct struct { + WeirdHTTPStuff bool + CustomNamedField string `json:"cUsToM"` +} + +var snakeStructValue SnakeStruct +var snakeStructString = `{"weird_http_stuff":false,"cUsToM":""}` diff --git a/src/vendor/github.com/pierrre/gotestcover/.gitignore b/src/vendor/github.com/pierrre/gotestcover/.gitignore new file mode 100644 index 00000000..1b3cea9a --- /dev/null +++ b/src/vendor/github.com/pierrre/gotestcover/.gitignore @@ -0,0 +1,2 @@ +coverage.txt +coverage.html \ No newline at end of file diff --git a/src/vendor/github.com/pierrre/gotestcover/.travis.yml b/src/vendor/github.com/pierrre/gotestcover/.travis.yml new file mode 100644 index 00000000..a2665a0b --- /dev/null +++ b/src/vendor/github.com/pierrre/gotestcover/.travis.yml @@ -0,0 +1,23 @@ +language: go + +sudo: false + +go: + - 1.6.2 + - tip + +before_install: + # Redo the travis setup but with the pierre/gotestcover path. This is needed so the package path is correct + - mkdir -p $HOME/gopath/src/github.com/pierrre/gotestcover + - rsync -az ${TRAVIS_BUILD_DIR}/ $HOME/gopath/src/github.com/pierrre/gotestcover + - export TRAVIS_BUILD_DIR=$HOME/gopath/src/github.com/pierrre/gotestcover + - cd $HOME/gopath/src/github.com/pierrre/gotestcover + +install: make setup + +script: + - make check + - make coverage + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/src/vendor/github.com/pierrre/gotestcover/LICENSE b/src/vendor/github.com/pierrre/gotestcover/LICENSE new file mode 100644 index 00000000..210d800c --- /dev/null +++ b/src/vendor/github.com/pierrre/gotestcover/LICENSE @@ -0,0 +1,7 @@ +Copyright (C) 2015 Pierre Durand + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/src/vendor/github.com/pierrre/gotestcover/Makefile b/src/vendor/github.com/pierrre/gotestcover/Makefile new file mode 100644 index 00000000..97e3b852 --- /dev/null +++ b/src/vendor/github.com/pierrre/gotestcover/Makefile @@ -0,0 +1,19 @@ +#/bin/bash + +setup: + go get -u -v github.com/golang/lint/golint + go get -v -t ./... + +check: + gofmt -d . + go tool vet . + golint + +coverage: + gotestcover -coverprofile=coverage.txt github.com/pierrre/gotestcover + go tool cover -html=coverage.txt -o=coverage.html + +clean: + -rm coverage.txt + -rm coverage.html + gofmt -w . \ No newline at end of file diff --git a/src/vendor/github.com/pierrre/gotestcover/README.md b/src/vendor/github.com/pierrre/gotestcover/README.md new file mode 100644 index 00000000..d477f35a --- /dev/null +++ b/src/vendor/github.com/pierrre/gotestcover/README.md @@ -0,0 +1,29 @@ +# Go test cover with multiple packages support + +## Deprecated +Just use this script instead: +``` +echo 'mode: atomic' > coverage.txt && go list ./... | xargs -n1 -I{} sh -c 'go test -covermode=atomic -coverprofile=coverage.tmp {} && tail -n +2 coverage.tmp >> coverage.txt' && rm coverage.tmp +``` +It's easier to customize, gives you better control, and doesn't require to download a third-party tool. + +The repository will remain, but I will not update it anymore. +If you want to add new features, create a new fork. + +## Features +- Coverage profile with multiple packages (`go test` doesn't support that) + +## Install +`go get github.com/pierrre/gotestcover` + +## Usage +```sh +gotestcover -coverprofile=cover.out mypackage +go tool cover -html=cover.out -o=cover.html +``` + +Run on multiple package with: +- `package1 package2` +- `package/...` + +Some `go test / build` flags are available. diff --git a/src/vendor/github.com/pierrre/gotestcover/gotestcover.go b/src/vendor/github.com/pierrre/gotestcover/gotestcover.go new file mode 100644 index 00000000..8a914b90 --- /dev/null +++ b/src/vendor/github.com/pierrre/gotestcover/gotestcover.go @@ -0,0 +1,282 @@ +// Package gotestcover provides multiple packages support for Go test cover. +package main + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" + "sync" +) + +var ( + // go build + flagA bool + flagX bool + flagRace bool + flagTags string + + // go test + flagV bool + flagCount int + flagCPU string + flagParallel string + flagRun string + flagShort bool + flagTimeout string + flagCoverMode string + flagCoverProfile string + + // custom + flagParallelPackages = runtime.GOMAXPROCS(0) + + // GAE/Go + flagGoogleAppEngine bool +) + +func main() { + err := run() + if err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +func run() error { + err := parseFlags() + if err != nil { + return err + } + pkgArgs, flagArgs := parseArgs() + pkgs, err := resolvePackages(pkgArgs) + if err != nil { + return err + } + cov, failed := runAllPackageTests(pkgs, flagArgs, func(out string) { + fmt.Print(out) + }) + err = writeCoverProfile(cov) + if err != nil { + return err + } + if failed { + return fmt.Errorf("test failed") + } + return nil +} + +func parseFlags() error { + flag.BoolVar(&flagA, "a", flagA, "see 'go build' help") + flag.BoolVar(&flagX, "x", flagX, "see 'go build' help") + flag.BoolVar(&flagRace, "race", flagRace, "see 'go build' help") + flag.StringVar(&flagTags, "tags", flagTags, "see 'go build' help") + + flag.BoolVar(&flagV, "v", flagV, "see 'go test' help") + flag.IntVar(&flagCount, "count", flagCount, "see 'go test' help") + flag.StringVar(&flagCPU, "cpu", flagCPU, "see 'go test' help") + flag.StringVar(&flagParallel, "parallel", flagParallel, "see 'go test' help") + flag.StringVar(&flagRun, "run", flagRun, "see 'go test' help") + flag.BoolVar(&flagShort, "short", flagShort, "see 'go test' help") + flag.StringVar(&flagTimeout, "timeout", flagTimeout, "see 'go test' help") + flag.StringVar(&flagCoverMode, "covermode", flagCoverMode, "see 'go test' help") + flag.StringVar(&flagCoverProfile, "coverprofile", flagCoverProfile, "see 'go test' help") + + flag.IntVar(&flagParallelPackages, "parallelpackages", flagParallelPackages, "Number of package test run in parallel") + + flag.BoolVar(&flagGoogleAppEngine, "gae", flagGoogleAppEngine, "Bool of Command exec in GAE/Go") + + flag.Parse() + if flagCoverProfile == "" { + return fmt.Errorf("flag coverprofile must be set") + } + if flagParallelPackages < 1 { + return fmt.Errorf("flag parallelpackages must be greater than or equal to 1") + } + return nil +} + +func parseArgs() (pkgArgs, flagArgs []string) { + args := flag.Args() + for i, a := range args { + if strings.HasPrefix(a, "-") { + return args[:i], args[i:] + } + } + return args, nil +} + +func resolvePackages(pkgArgs []string) ([]string, error) { + cmdArgs := []string{"list"} + cmdArgs = append(cmdArgs, pkgArgs...) + cmdOut, err := runGoCommand(cmdArgs...) + if err != nil { + return nil, err + } + var pkgs []string + sc := bufio.NewScanner(bytes.NewReader(cmdOut)) + for sc.Scan() { + pkgs = append(pkgs, sc.Text()) + } + return pkgs, nil +} + +func runAllPackageTests(pkgs []string, flgs []string, pf func(string)) ([]byte, bool) { + pkgch := make(chan string) + type res struct { + out string + cov []byte + err error + } + resch := make(chan res) + wg := new(sync.WaitGroup) + wg.Add(flagParallelPackages) + go func() { + for _, pkg := range pkgs { + pkgch <- pkg + } + close(pkgch) + wg.Wait() + close(resch) + }() + for i := 0; i < flagParallelPackages; i++ { + go func() { + for p := range pkgch { + out, cov, err := runPackageTests(p, flgs) + resch <- res{ + out: out, + cov: cov, + err: err, + } + } + wg.Done() + }() + } + failed := false + var cov []byte + for r := range resch { + if r.err == nil { + pf(r.out) + cov = append(cov, r.cov...) + } else { + pf(r.err.Error()) + failed = true + } + } + return cov, failed +} + +func runPackageTests(pkg string, flgs []string) (out string, cov []byte, err error) { + coverFile, err := ioutil.TempFile("", "gotestcover-") + if err != nil { + return "", nil, err + } + defer os.Remove(coverFile.Name()) + defer coverFile.Close() + var args []string + args = append(args, "test") + + if flagA { + args = append(args, "-a") + } + if flagX { + args = append(args, "-x") + } + if flagRace { + args = append(args, "-race") + } + if flagTags != "" { + args = append(args, "-tags", flagTags) + } + + if flagV { + args = append(args, "-v") + } + if flagCount != 0 { + args = append(args, "-count", fmt.Sprint(flagCount)) + } + if flagCPU != "" { + args = append(args, "-cpu", flagCPU) + } + if flagParallel != "" { + args = append(args, "-parallel", flagParallel) + } + if flagRun != "" { + args = append(args, "-run", flagRun) + } + if flagShort { + args = append(args, "-short") + } + if flagTimeout != "" { + args = append(args, "-timeout", flagTimeout) + } + args = append(args, "-cover") + if flagCoverMode != "" { + args = append(args, "-covermode", flagCoverMode) + } + args = append(args, "-coverprofile", coverFile.Name()) + + args = append(args, pkg) + + args = append(args, flgs...) + + cmdOut, err := runGoCommand(args...) + if err != nil { + return "", nil, err + } + cov, err = ioutil.ReadAll(coverFile) + if err != nil { + return "", nil, err + } + cov = removeFirstLine(cov) + return string(cmdOut), cov, nil +} + +func writeCoverProfile(cov []byte) error { + if len(cov) == 0 { + return nil + } + buf := new(bytes.Buffer) + mode := flagCoverMode + if mode == "" { + if flagRace { + mode = "atomic" + } else { + mode = "set" + } + } + fmt.Fprintf(buf, "mode: %s\n", mode) + buf.Write(cov) + return ioutil.WriteFile(flagCoverProfile, buf.Bytes(), os.FileMode(0644)) +} + +func runGoCommand(args ...string) ([]byte, error) { + goCmd := "go" + if flagGoogleAppEngine { + goCmd = "goapp" + } + cmd := exec.Command(goCmd, args...) + out, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("command %s: %s\n%s", cmd.Args, err, out) + } + return out, nil +} + +func removeFirstLine(b []byte) []byte { + out := new(bytes.Buffer) + sc := bufio.NewScanner(bytes.NewReader(b)) + firstLine := true + for sc.Scan() { + if firstLine { + firstLine = false + continue + } + fmt.Fprintf(out, "%s\n", sc.Bytes()) + } + return out.Bytes() +} diff --git a/src/vendor/github.com/pierrre/gotestcover/gotestcover_test.go b/src/vendor/github.com/pierrre/gotestcover/gotestcover_test.go new file mode 100644 index 00000000..3a662d69 --- /dev/null +++ b/src/vendor/github.com/pierrre/gotestcover/gotestcover_test.go @@ -0,0 +1,87 @@ +package main + +import ( + "os" + "testing" +) + +func TestParseFlags(t *testing.T) { + os.Args = []string{"gotestcover", + "-v", + "-a", + "-x", + "-tags=foobar", + "-race", + "-cpu=4", + "-parallel=2", + "-run=abc", + "-short", + "-timeout=15s", + "-covermode=atomic", + "-parallelpackages=2", + "-coverprofile=cover.out", + "-gae", + } + + err := parseFlags() + + if err != nil { + t.Fatal(err) + } + + if !flagV { + t.Errorf("flagV should be set to true") + } + + if !flagA { + t.Errorf("flagA should be set to true") + } + + if !flagX { + t.Errorf("flagX should be set to true") + } + + if flagTags != "foobar" { + t.Errorf("flagCPU is not equal to foobar, got %s", flagTags) + } + + if !flagRace { + t.Errorf("flagRace should be set to true") + } + + if flagCPU != "4" { + t.Errorf("flagCPU is not equal to 4, got %s", flagCPU) + } + + if flagParallel != "2" { + t.Errorf("flagParallel is not equal to 2, got %s", flagParallel) + } + + if flagRun != "abc" { + t.Errorf("flagRun is not equal to 'abc', got %s", flagRun) + } + + if !flagShort { + t.Errorf("flagShort should be set to true") + } + + if flagTimeout != "15s" { + t.Errorf("flagTimeout is not equal to '15s', got %s", flagTimeout) + } + + if flagCoverMode != "atomic" { + t.Errorf("flagCoverMode is not equal to 'atomic', got %s", flagCoverMode) + } + + if flagParallelPackages != 2 { + t.Errorf("flagParallelPackages is not equal to '2', got %d", flagParallelPackages) + } + + if flagCoverProfile != "cover.out" { + t.Errorf("flagCoverProfile is not equal to 'cover.out', got %s", flagCoverProfile) + } + + if !flagGoogleAppEngine { + t.Errorf("flagGoogleAppEngine should be set to true") + } +} diff --git a/src/vendor/github.com/pkg/errors/.gitignore b/src/vendor/github.com/pkg/errors/.gitignore new file mode 100644 index 00000000..daf913b1 --- /dev/null +++ b/src/vendor/github.com/pkg/errors/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/src/vendor/github.com/pkg/errors/.travis.yml b/src/vendor/github.com/pkg/errors/.travis.yml new file mode 100644 index 00000000..567ccdbf --- /dev/null +++ b/src/vendor/github.com/pkg/errors/.travis.yml @@ -0,0 +1,11 @@ +language: go +go_import_path: github.com/pkg/errors +go: + - 1.4.3 + - 1.5.4 + - 1.6.3 + - 1.7.3 + - tip + +script: + - go test -v ./... diff --git a/src/vendor/github.com/pkg/errors/LICENSE b/src/vendor/github.com/pkg/errors/LICENSE new file mode 100644 index 00000000..835ba3e7 --- /dev/null +++ b/src/vendor/github.com/pkg/errors/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/vendor/github.com/pkg/errors/README.md b/src/vendor/github.com/pkg/errors/README.md new file mode 100644 index 00000000..273db3c9 --- /dev/null +++ b/src/vendor/github.com/pkg/errors/README.md @@ -0,0 +1,52 @@ +# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) + +Package errors provides simple error handling primitives. + +`go get github.com/pkg/errors` + +The traditional error handling idiom in Go is roughly akin to +```go +if err != nil { + return err +} +``` +which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. + +## Adding context to an error + +The errors.Wrap function returns a new error that adds context to the original error. For example +```go +_, err := ioutil.ReadAll(r) +if err != nil { + return errors.Wrap(err, "read failed") +} +``` +## Retrieving the cause of an error + +Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. +```go +type causer interface { + Cause() error +} +``` +`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: +```go +switch err := errors.Cause(err).(type) { +case *MyError: + // handle specifically +default: + // unknown error +} +``` + +[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). + +## Contributing + +We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. + +Before proposing a change, please discuss your change by raising an issue. + +## Licence + +BSD-2-Clause diff --git a/src/vendor/github.com/pkg/errors/appveyor.yml b/src/vendor/github.com/pkg/errors/appveyor.yml new file mode 100644 index 00000000..a932eade --- /dev/null +++ b/src/vendor/github.com/pkg/errors/appveyor.yml @@ -0,0 +1,32 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\pkg\errors +shallow_clone: true # for startup speed + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +# http://www.appveyor.com/docs/installed-software +install: + # some helpful output for debugging builds + - go version + - go env + # pre-installed MinGW at C:\MinGW is 32bit only + # but MSYS2 at C:\msys64 has mingw64 + - set PATH=C:\msys64\mingw64\bin;%PATH% + - gcc --version + - g++ --version + +build_script: + - go install -v ./... + +test_script: + - set PATH=C:\gopath\bin;%PATH% + - go test -v ./... + +#artifacts: +# - path: '%GOPATH%\bin\*.exe' +deploy: off diff --git a/src/vendor/github.com/pkg/errors/bench_test.go b/src/vendor/github.com/pkg/errors/bench_test.go new file mode 100644 index 00000000..dbee1c00 --- /dev/null +++ b/src/vendor/github.com/pkg/errors/bench_test.go @@ -0,0 +1,60 @@ +// +build go1.7 + +package errors + +import ( + "fmt" + "testing" + + stderrors "errors" +) + +func noErrors(at, depth int) error { + if at >= depth { + return stderrors.New("no error") + } + return noErrors(at+1, depth) +} + +func yesErrors(at, depth int) error { + if at >= depth { + return New("ye error") + } + return yesErrors(at+1, depth) +} + +func BenchmarkErrors(b *testing.B) { + var toperr error + type run struct { + stack int + std bool + } + runs := []run{ + {10, false}, + {10, true}, + {100, false}, + {100, true}, + {1000, false}, + {1000, true}, + } + for _, r := range runs { + part := "pkg/errors" + if r.std { + part = "errors" + } + name := fmt.Sprintf("%s-stack-%d", part, r.stack) + b.Run(name, func(b *testing.B) { + var err error + f := yesErrors + if r.std { + f = noErrors + } + b.ReportAllocs() + for i := 0; i < b.N; i++ { + err = f(0, r.stack) + } + b.StopTimer() + toperr = err + }) + } +} diff --git a/src/vendor/github.com/pkg/errors/errors.go b/src/vendor/github.com/pkg/errors/errors.go new file mode 100644 index 00000000..842ee804 --- /dev/null +++ b/src/vendor/github.com/pkg/errors/errors.go @@ -0,0 +1,269 @@ +// Package errors provides simple error handling primitives. +// +// The traditional error handling idiom in Go is roughly akin to +// +// if err != nil { +// return err +// } +// +// which applied recursively up the call stack results in error reports +// without context or debugging information. The errors package allows +// programmers to add context to the failure path in their code in a way +// that does not destroy the original value of the error. +// +// Adding context to an error +// +// The errors.Wrap function returns a new error that adds context to the +// original error by recording a stack trace at the point Wrap is called, +// and the supplied message. For example +// +// _, err := ioutil.ReadAll(r) +// if err != nil { +// return errors.Wrap(err, "read failed") +// } +// +// If additional control is required the errors.WithStack and errors.WithMessage +// functions destructure errors.Wrap into its component operations of annotating +// an error with a stack trace and an a message, respectively. +// +// Retrieving the cause of an error +// +// Using errors.Wrap constructs a stack of errors, adding context to the +// preceding error. Depending on the nature of the error it may be necessary +// to reverse the operation of errors.Wrap to retrieve the original error +// for inspection. Any error value which implements this interface +// +// type causer interface { +// Cause() error +// } +// +// can be inspected by errors.Cause. errors.Cause will recursively retrieve +// the topmost error which does not implement causer, which is assumed to be +// the original cause. For example: +// +// switch err := errors.Cause(err).(type) { +// case *MyError: +// // handle specifically +// default: +// // unknown error +// } +// +// causer interface is not exported by this package, but is considered a part +// of stable public API. +// +// Formatted printing of errors +// +// All error values returned from this package implement fmt.Formatter and can +// be formatted by the fmt package. The following verbs are supported +// +// %s print the error. If the error has a Cause it will be +// printed recursively +// %v see %s +// %+v extended format. Each Frame of the error's StackTrace will +// be printed in detail. +// +// Retrieving the stack trace of an error or wrapper +// +// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are +// invoked. This information can be retrieved with the following interface. +// +// type stackTracer interface { +// StackTrace() errors.StackTrace +// } +// +// Where errors.StackTrace is defined as +// +// type StackTrace []Frame +// +// The Frame type represents a call site in the stack trace. Frame supports +// the fmt.Formatter interface that can be used for printing information about +// the stack trace of this error. For example: +// +// if err, ok := err.(stackTracer); ok { +// for _, f := range err.StackTrace() { +// fmt.Printf("%+s:%d", f) +// } +// } +// +// stackTracer interface is not exported by this package, but is considered a part +// of stable public API. +// +// See the documentation for Frame.Format for more details. +package errors + +import ( + "fmt" + "io" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return &fundamental{ + msg: message, + stack: callers(), + } +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(format string, args ...interface{}) error { + return &fundamental{ + msg: fmt.Sprintf(format, args...), + stack: callers(), + } +} + +// fundamental is an error that has a message and a stack, but no caller. +type fundamental struct { + msg string + *stack +} + +func (f *fundamental) Error() string { return f.msg } + +func (f *fundamental) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + io.WriteString(s, f.msg) + f.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) + } +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +func WithStack(err error) error { + if err == nil { + return nil + } + return &withStack{ + err, + callers(), + } +} + +type withStack struct { + error + *stack +} + +func (w *withStack) Cause() error { return w.error } + +func (w *withStack) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v", w.Cause()) + w.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, w.Error()) + case 'q': + fmt.Fprintf(s, "%q", w.Error()) + } +} + +// Wrap returns an error annotating err with a stack trace +// at the point Wrap is called, and the supplied message. +// If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: message, + } + return &withStack{ + err, + callers(), + } +} + +// Wrapf returns an error annotating err with a stack trace +// at the point Wrapf is call, and the format specifier. +// If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } + return &withStack{ + err, + callers(), + } +} + +// WithMessage annotates err with a new message. +// If err is nil, WithMessage returns nil. +func WithMessage(err error, message string) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: message, + } +} + +type withMessage struct { + cause error + msg string +} + +func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } +func (w *withMessage) Cause() error { return w.cause } + +func (w *withMessage) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", w.Cause()) + io.WriteString(s, w.msg) + return + } + fallthrough + case 's', 'q': + io.WriteString(s, w.Error()) + } +} + +// Cause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func Cause(err error) error { + type causer interface { + Cause() error + } + + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} diff --git a/src/vendor/github.com/pkg/errors/errors_test.go b/src/vendor/github.com/pkg/errors/errors_test.go new file mode 100644 index 00000000..c4e6eef6 --- /dev/null +++ b/src/vendor/github.com/pkg/errors/errors_test.go @@ -0,0 +1,225 @@ +package errors + +import ( + "errors" + "fmt" + "io" + "reflect" + "testing" +) + +func TestNew(t *testing.T) { + tests := []struct { + err string + want error + }{ + {"", fmt.Errorf("")}, + {"foo", fmt.Errorf("foo")}, + {"foo", New("foo")}, + {"string with format specifiers: %v", errors.New("string with format specifiers: %v")}, + } + + for _, tt := range tests { + got := New(tt.err) + if got.Error() != tt.want.Error() { + t.Errorf("New.Error(): got: %q, want %q", got, tt.want) + } + } +} + +func TestWrapNil(t *testing.T) { + got := Wrap(nil, "no error") + if got != nil { + t.Errorf("Wrap(nil, \"no error\"): got %#v, expected nil", got) + } +} + +func TestWrap(t *testing.T) { + tests := []struct { + err error + message string + want string + }{ + {io.EOF, "read error", "read error: EOF"}, + {Wrap(io.EOF, "read error"), "client error", "client error: read error: EOF"}, + } + + for _, tt := range tests { + got := Wrap(tt.err, tt.message).Error() + if got != tt.want { + t.Errorf("Wrap(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) + } + } +} + +type nilError struct{} + +func (nilError) Error() string { return "nil error" } + +func TestCause(t *testing.T) { + x := New("error") + tests := []struct { + err error + want error + }{{ + // nil error is nil + err: nil, + want: nil, + }, { + // explicit nil error is nil + err: (error)(nil), + want: nil, + }, { + // typed nil is nil + err: (*nilError)(nil), + want: (*nilError)(nil), + }, { + // uncaused error is unaffected + err: io.EOF, + want: io.EOF, + }, { + // caused error returns cause + err: Wrap(io.EOF, "ignored"), + want: io.EOF, + }, { + err: x, // return from errors.New + want: x, + }, { + WithMessage(nil, "whoops"), + nil, + }, { + WithMessage(io.EOF, "whoops"), + io.EOF, + }, { + WithStack(nil), + nil, + }, { + WithStack(io.EOF), + io.EOF, + }} + + for i, tt := range tests { + got := Cause(tt.err) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("test %d: got %#v, want %#v", i+1, got, tt.want) + } + } +} + +func TestWrapfNil(t *testing.T) { + got := Wrapf(nil, "no error") + if got != nil { + t.Errorf("Wrapf(nil, \"no error\"): got %#v, expected nil", got) + } +} + +func TestWrapf(t *testing.T) { + tests := []struct { + err error + message string + want string + }{ + {io.EOF, "read error", "read error: EOF"}, + {Wrapf(io.EOF, "read error without format specifiers"), "client error", "client error: read error without format specifiers: EOF"}, + {Wrapf(io.EOF, "read error with %d format specifier", 1), "client error", "client error: read error with 1 format specifier: EOF"}, + } + + for _, tt := range tests { + got := Wrapf(tt.err, tt.message).Error() + if got != tt.want { + t.Errorf("Wrapf(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) + } + } +} + +func TestErrorf(t *testing.T) { + tests := []struct { + err error + want string + }{ + {Errorf("read error without format specifiers"), "read error without format specifiers"}, + {Errorf("read error with %d format specifier", 1), "read error with 1 format specifier"}, + } + + for _, tt := range tests { + got := tt.err.Error() + if got != tt.want { + t.Errorf("Errorf(%v): got: %q, want %q", tt.err, got, tt.want) + } + } +} + +func TestWithStackNil(t *testing.T) { + got := WithStack(nil) + if got != nil { + t.Errorf("WithStack(nil): got %#v, expected nil", got) + } +} + +func TestWithStack(t *testing.T) { + tests := []struct { + err error + want string + }{ + {io.EOF, "EOF"}, + {WithStack(io.EOF), "EOF"}, + } + + for _, tt := range tests { + got := WithStack(tt.err).Error() + if got != tt.want { + t.Errorf("WithStack(%v): got: %v, want %v", tt.err, got, tt.want) + } + } +} + +func TestWithMessageNil(t *testing.T) { + got := WithMessage(nil, "no error") + if got != nil { + t.Errorf("WithMessage(nil, \"no error\"): got %#v, expected nil", got) + } +} + +func TestWithMessage(t *testing.T) { + tests := []struct { + err error + message string + want string + }{ + {io.EOF, "read error", "read error: EOF"}, + {WithMessage(io.EOF, "read error"), "client error", "client error: read error: EOF"}, + } + + for _, tt := range tests { + got := WithMessage(tt.err, tt.message).Error() + if got != tt.want { + t.Errorf("WithMessage(%v, %q): got: %q, want %q", tt.err, tt.message, got, tt.want) + } + } +} + +// errors.New, etc values are not expected to be compared by value +// but the change in errors#27 made them incomparable. Assert that +// various kinds of errors have a functional equality operator, even +// if the result of that equality is always false. +func TestErrorEquality(t *testing.T) { + vals := []error{ + nil, + io.EOF, + errors.New("EOF"), + New("EOF"), + Errorf("EOF"), + Wrap(io.EOF, "EOF"), + Wrapf(io.EOF, "EOF%d", 2), + WithMessage(nil, "whoops"), + WithMessage(io.EOF, "whoops"), + WithStack(io.EOF), + WithStack(nil), + } + + for i := range vals { + for j := range vals { + _ = vals[i] == vals[j] // mustn't panic + } + } +} diff --git a/src/vendor/github.com/pkg/errors/example_test.go b/src/vendor/github.com/pkg/errors/example_test.go new file mode 100644 index 00000000..c1fc13e3 --- /dev/null +++ b/src/vendor/github.com/pkg/errors/example_test.go @@ -0,0 +1,205 @@ +package errors_test + +import ( + "fmt" + + "github.com/pkg/errors" +) + +func ExampleNew() { + err := errors.New("whoops") + fmt.Println(err) + + // Output: whoops +} + +func ExampleNew_printf() { + err := errors.New("whoops") + fmt.Printf("%+v", err) + + // Example output: + // whoops + // github.com/pkg/errors_test.ExampleNew_printf + // /home/dfc/src/github.com/pkg/errors/example_test.go:17 + // testing.runExample + // /home/dfc/go/src/testing/example.go:114 + // testing.RunExamples + // /home/dfc/go/src/testing/example.go:38 + // testing.(*M).Run + // /home/dfc/go/src/testing/testing.go:744 + // main.main + // /github.com/pkg/errors/_test/_testmain.go:106 + // runtime.main + // /home/dfc/go/src/runtime/proc.go:183 + // runtime.goexit + // /home/dfc/go/src/runtime/asm_amd64.s:2059 +} + +func ExampleWithMessage() { + cause := errors.New("whoops") + err := errors.WithMessage(cause, "oh noes") + fmt.Println(err) + + // Output: oh noes: whoops +} + +func ExampleWithStack() { + cause := errors.New("whoops") + err := errors.WithStack(cause) + fmt.Println(err) + + // Output: whoops +} + +func ExampleWithStack_printf() { + cause := errors.New("whoops") + err := errors.WithStack(cause) + fmt.Printf("%+v", err) + + // Example Output: + // whoops + // github.com/pkg/errors_test.ExampleWithStack_printf + // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:55 + // testing.runExample + // /usr/lib/go/src/testing/example.go:114 + // testing.RunExamples + // /usr/lib/go/src/testing/example.go:38 + // testing.(*M).Run + // /usr/lib/go/src/testing/testing.go:744 + // main.main + // github.com/pkg/errors/_test/_testmain.go:106 + // runtime.main + // /usr/lib/go/src/runtime/proc.go:183 + // runtime.goexit + // /usr/lib/go/src/runtime/asm_amd64.s:2086 + // github.com/pkg/errors_test.ExampleWithStack_printf + // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:56 + // testing.runExample + // /usr/lib/go/src/testing/example.go:114 + // testing.RunExamples + // /usr/lib/go/src/testing/example.go:38 + // testing.(*M).Run + // /usr/lib/go/src/testing/testing.go:744 + // main.main + // github.com/pkg/errors/_test/_testmain.go:106 + // runtime.main + // /usr/lib/go/src/runtime/proc.go:183 + // runtime.goexit + // /usr/lib/go/src/runtime/asm_amd64.s:2086 +} + +func ExampleWrap() { + cause := errors.New("whoops") + err := errors.Wrap(cause, "oh noes") + fmt.Println(err) + + // Output: oh noes: whoops +} + +func fn() error { + e1 := errors.New("error") + e2 := errors.Wrap(e1, "inner") + e3 := errors.Wrap(e2, "middle") + return errors.Wrap(e3, "outer") +} + +func ExampleCause() { + err := fn() + fmt.Println(err) + fmt.Println(errors.Cause(err)) + + // Output: outer: middle: inner: error + // error +} + +func ExampleWrap_extended() { + err := fn() + fmt.Printf("%+v\n", err) + + // Example output: + // error + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:47 + // github.com/pkg/errors_test.ExampleCause_printf + // /home/dfc/src/github.com/pkg/errors/example_test.go:63 + // testing.runExample + // /home/dfc/go/src/testing/example.go:114 + // testing.RunExamples + // /home/dfc/go/src/testing/example.go:38 + // testing.(*M).Run + // /home/dfc/go/src/testing/testing.go:744 + // main.main + // /github.com/pkg/errors/_test/_testmain.go:104 + // runtime.main + // /home/dfc/go/src/runtime/proc.go:183 + // runtime.goexit + // /home/dfc/go/src/runtime/asm_amd64.s:2059 + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:48: inner + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:49: middle + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:50: outer +} + +func ExampleWrapf() { + cause := errors.New("whoops") + err := errors.Wrapf(cause, "oh noes #%d", 2) + fmt.Println(err) + + // Output: oh noes #2: whoops +} + +func ExampleErrorf_extended() { + err := errors.Errorf("whoops: %s", "foo") + fmt.Printf("%+v", err) + + // Example output: + // whoops: foo + // github.com/pkg/errors_test.ExampleErrorf + // /home/dfc/src/github.com/pkg/errors/example_test.go:101 + // testing.runExample + // /home/dfc/go/src/testing/example.go:114 + // testing.RunExamples + // /home/dfc/go/src/testing/example.go:38 + // testing.(*M).Run + // /home/dfc/go/src/testing/testing.go:744 + // main.main + // /github.com/pkg/errors/_test/_testmain.go:102 + // runtime.main + // /home/dfc/go/src/runtime/proc.go:183 + // runtime.goexit + // /home/dfc/go/src/runtime/asm_amd64.s:2059 +} + +func Example_stackTrace() { + type stackTracer interface { + StackTrace() errors.StackTrace + } + + err, ok := errors.Cause(fn()).(stackTracer) + if !ok { + panic("oops, err does not implement stackTracer") + } + + st := err.StackTrace() + fmt.Printf("%+v", st[0:2]) // top two frames + + // Example output: + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:47 + // github.com/pkg/errors_test.Example_stackTrace + // /home/dfc/src/github.com/pkg/errors/example_test.go:127 +} + +func ExampleCause_printf() { + err := errors.Wrap(func() error { + return func() error { + return errors.Errorf("hello %s", fmt.Sprintf("world")) + }() + }(), "failed") + + fmt.Printf("%v", err) + + // Output: failed: hello world +} diff --git a/src/vendor/github.com/pkg/errors/format_test.go b/src/vendor/github.com/pkg/errors/format_test.go new file mode 100644 index 00000000..15fd7d89 --- /dev/null +++ b/src/vendor/github.com/pkg/errors/format_test.go @@ -0,0 +1,535 @@ +package errors + +import ( + "errors" + "fmt" + "io" + "regexp" + "strings" + "testing" +) + +func TestFormatNew(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + New("error"), + "%s", + "error", + }, { + New("error"), + "%v", + "error", + }, { + New("error"), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatNew\n" + + "\t.+/github.com/pkg/errors/format_test.go:26", + }, { + New("error"), + "%q", + `"error"`, + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatErrorf(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + Errorf("%s", "error"), + "%s", + "error", + }, { + Errorf("%s", "error"), + "%v", + "error", + }, { + Errorf("%s", "error"), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatErrorf\n" + + "\t.+/github.com/pkg/errors/format_test.go:56", + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatWrap(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + Wrap(New("error"), "error2"), + "%s", + "error2: error", + }, { + Wrap(New("error"), "error2"), + "%v", + "error2: error", + }, { + Wrap(New("error"), "error2"), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatWrap\n" + + "\t.+/github.com/pkg/errors/format_test.go:82", + }, { + Wrap(io.EOF, "error"), + "%s", + "error: EOF", + }, { + Wrap(io.EOF, "error"), + "%v", + "error: EOF", + }, { + Wrap(io.EOF, "error"), + "%+v", + "EOF\n" + + "error\n" + + "github.com/pkg/errors.TestFormatWrap\n" + + "\t.+/github.com/pkg/errors/format_test.go:96", + }, { + Wrap(Wrap(io.EOF, "error1"), "error2"), + "%+v", + "EOF\n" + + "error1\n" + + "github.com/pkg/errors.TestFormatWrap\n" + + "\t.+/github.com/pkg/errors/format_test.go:103\n", + }, { + Wrap(New("error with space"), "context"), + "%q", + `"context: error with space"`, + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatWrapf(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + Wrapf(io.EOF, "error%d", 2), + "%s", + "error2: EOF", + }, { + Wrapf(io.EOF, "error%d", 2), + "%v", + "error2: EOF", + }, { + Wrapf(io.EOF, "error%d", 2), + "%+v", + "EOF\n" + + "error2\n" + + "github.com/pkg/errors.TestFormatWrapf\n" + + "\t.+/github.com/pkg/errors/format_test.go:134", + }, { + Wrapf(New("error"), "error%d", 2), + "%s", + "error2: error", + }, { + Wrapf(New("error"), "error%d", 2), + "%v", + "error2: error", + }, { + Wrapf(New("error"), "error%d", 2), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatWrapf\n" + + "\t.+/github.com/pkg/errors/format_test.go:149", + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatWithStack(t *testing.T) { + tests := []struct { + error + format string + want []string + }{{ + WithStack(io.EOF), + "%s", + []string{"EOF"}, + }, { + WithStack(io.EOF), + "%v", + []string{"EOF"}, + }, { + WithStack(io.EOF), + "%+v", + []string{"EOF", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:175"}, + }, { + WithStack(New("error")), + "%s", + []string{"error"}, + }, { + WithStack(New("error")), + "%v", + []string{"error"}, + }, { + WithStack(New("error")), + "%+v", + []string{"error", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:189", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:189"}, + }, { + WithStack(WithStack(io.EOF)), + "%+v", + []string{"EOF", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:197", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:197"}, + }, { + WithStack(WithStack(Wrapf(io.EOF, "message"))), + "%+v", + []string{"EOF", + "message", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:205", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:205", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:205"}, + }, { + WithStack(Errorf("error%d", 1)), + "%+v", + []string{"error1", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:216", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:216"}, + }} + + for i, tt := range tests { + testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true) + } +} + +func TestFormatWithMessage(t *testing.T) { + tests := []struct { + error + format string + want []string + }{{ + WithMessage(New("error"), "error2"), + "%s", + []string{"error2: error"}, + }, { + WithMessage(New("error"), "error2"), + "%v", + []string{"error2: error"}, + }, { + WithMessage(New("error"), "error2"), + "%+v", + []string{ + "error", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:244", + "error2"}, + }, { + WithMessage(io.EOF, "addition1"), + "%s", + []string{"addition1: EOF"}, + }, { + WithMessage(io.EOF, "addition1"), + "%v", + []string{"addition1: EOF"}, + }, { + WithMessage(io.EOF, "addition1"), + "%+v", + []string{"EOF", "addition1"}, + }, { + WithMessage(WithMessage(io.EOF, "addition1"), "addition2"), + "%v", + []string{"addition2: addition1: EOF"}, + }, { + WithMessage(WithMessage(io.EOF, "addition1"), "addition2"), + "%+v", + []string{"EOF", "addition1", "addition2"}, + }, { + Wrap(WithMessage(io.EOF, "error1"), "error2"), + "%+v", + []string{"EOF", "error1", "error2", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:272"}, + }, { + WithMessage(Errorf("error%d", 1), "error2"), + "%+v", + []string{"error1", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:278", + "error2"}, + }, { + WithMessage(WithStack(io.EOF), "error"), + "%+v", + []string{ + "EOF", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:285", + "error"}, + }, { + WithMessage(Wrap(WithStack(io.EOF), "inside-error"), "outside-error"), + "%+v", + []string{ + "EOF", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:293", + "inside-error", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:293", + "outside-error"}, + }} + + for i, tt := range tests { + testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true) + } +} + +func TestFormatGeneric(t *testing.T) { + starts := []struct { + err error + want []string + }{ + {New("new-error"), []string{ + "new-error", + "github.com/pkg/errors.TestFormatGeneric\n" + + "\t.+/github.com/pkg/errors/format_test.go:315"}, + }, {Errorf("errorf-error"), []string{ + "errorf-error", + "github.com/pkg/errors.TestFormatGeneric\n" + + "\t.+/github.com/pkg/errors/format_test.go:319"}, + }, {errors.New("errors-new-error"), []string{ + "errors-new-error"}, + }, + } + + wrappers := []wrapper{ + { + func(err error) error { return WithMessage(err, "with-message") }, + []string{"with-message"}, + }, { + func(err error) error { return WithStack(err) }, + []string{ + "github.com/pkg/errors.(func·002|TestFormatGeneric.func2)\n\t" + + ".+/github.com/pkg/errors/format_test.go:333", + }, + }, { + func(err error) error { return Wrap(err, "wrap-error") }, + []string{ + "wrap-error", + "github.com/pkg/errors.(func·003|TestFormatGeneric.func3)\n\t" + + ".+/github.com/pkg/errors/format_test.go:339", + }, + }, { + func(err error) error { return Wrapf(err, "wrapf-error%d", 1) }, + []string{ + "wrapf-error1", + "github.com/pkg/errors.(func·004|TestFormatGeneric.func4)\n\t" + + ".+/github.com/pkg/errors/format_test.go:346", + }, + }, + } + + for s := range starts { + err := starts[s].err + want := starts[s].want + testFormatCompleteCompare(t, s, err, "%+v", want, false) + testGenericRecursive(t, err, want, wrappers, 3) + } +} + +func testFormatRegexp(t *testing.T, n int, arg interface{}, format, want string) { + got := fmt.Sprintf(format, arg) + gotLines := strings.SplitN(got, "\n", -1) + wantLines := strings.SplitN(want, "\n", -1) + + if len(wantLines) > len(gotLines) { + t.Errorf("test %d: wantLines(%d) > gotLines(%d):\n got: %q\nwant: %q", n+1, len(wantLines), len(gotLines), got, want) + return + } + + for i, w := range wantLines { + match, err := regexp.MatchString(w, gotLines[i]) + if err != nil { + t.Fatal(err) + } + if !match { + t.Errorf("test %d: line %d: fmt.Sprintf(%q, err):\n got: %q\nwant: %q", n+1, i+1, format, got, want) + } + } +} + +var stackLineR = regexp.MustCompile(`\.`) + +// parseBlocks parses input into a slice, where: +// - incase entry contains a newline, its a stacktrace +// - incase entry contains no newline, its a solo line. +// +// Detecting stack boundaries only works incase the WithStack-calls are +// to be found on the same line, thats why it is optionally here. +// +// Example use: +// +// for _, e := range blocks { +// if strings.ContainsAny(e, "\n") { +// // Match as stack +// } else { +// // Match as line +// } +// } +// +func parseBlocks(input string, detectStackboundaries bool) ([]string, error) { + var blocks []string + + stack := "" + wasStack := false + lines := map[string]bool{} // already found lines + + for _, l := range strings.Split(input, "\n") { + isStackLine := stackLineR.MatchString(l) + + switch { + case !isStackLine && wasStack: + blocks = append(blocks, stack, l) + stack = "" + lines = map[string]bool{} + case isStackLine: + if wasStack { + // Detecting two stacks after another, possible cause lines match in + // our tests due to WithStack(WithStack(io.EOF)) on same line. + if detectStackboundaries { + if lines[l] { + if len(stack) == 0 { + return nil, errors.New("len of block must not be zero here") + } + + blocks = append(blocks, stack) + stack = l + lines = map[string]bool{l: true} + continue + } + } + + stack = stack + "\n" + l + } else { + stack = l + } + lines[l] = true + case !isStackLine && !wasStack: + blocks = append(blocks, l) + default: + return nil, errors.New("must not happen") + } + + wasStack = isStackLine + } + + // Use up stack + if stack != "" { + blocks = append(blocks, stack) + } + return blocks, nil +} + +func testFormatCompleteCompare(t *testing.T, n int, arg interface{}, format string, want []string, detectStackBoundaries bool) { + gotStr := fmt.Sprintf(format, arg) + + got, err := parseBlocks(gotStr, detectStackBoundaries) + if err != nil { + t.Fatal(err) + } + + if len(got) != len(want) { + t.Fatalf("test %d: fmt.Sprintf(%s, err) -> wrong number of blocks: got(%d) want(%d)\n got: %s\nwant: %s\ngotStr: %q", + n+1, format, len(got), len(want), prettyBlocks(got), prettyBlocks(want), gotStr) + } + + for i := range got { + if strings.ContainsAny(want[i], "\n") { + // Match as stack + match, err := regexp.MatchString(want[i], got[i]) + if err != nil { + t.Fatal(err) + } + if !match { + t.Fatalf("test %d: block %d: fmt.Sprintf(%q, err):\ngot:\n%q\nwant:\n%q\nall-got:\n%s\nall-want:\n%s\n", + n+1, i+1, format, got[i], want[i], prettyBlocks(got), prettyBlocks(want)) + } + } else { + // Match as message + if got[i] != want[i] { + t.Fatalf("test %d: fmt.Sprintf(%s, err) at block %d got != want:\n got: %q\nwant: %q", n+1, format, i+1, got[i], want[i]) + } + } + } +} + +type wrapper struct { + wrap func(err error) error + want []string +} + +func prettyBlocks(blocks []string, prefix ...string) string { + var out []string + + for _, b := range blocks { + out = append(out, fmt.Sprintf("%v", b)) + } + + return " " + strings.Join(out, "\n ") +} + +func testGenericRecursive(t *testing.T, beforeErr error, beforeWant []string, list []wrapper, maxDepth int) { + if len(beforeWant) == 0 { + panic("beforeWant must not be empty") + } + for _, w := range list { + if len(w.want) == 0 { + panic("want must not be empty") + } + + err := w.wrap(beforeErr) + + // Copy required cause append(beforeWant, ..) modified beforeWant subtly. + beforeCopy := make([]string, len(beforeWant)) + copy(beforeCopy, beforeWant) + + beforeWant := beforeCopy + last := len(beforeWant) - 1 + var want []string + + // Merge two stacks behind each other. + if strings.ContainsAny(beforeWant[last], "\n") && strings.ContainsAny(w.want[0], "\n") { + want = append(beforeWant[:last], append([]string{beforeWant[last] + "((?s).*)" + w.want[0]}, w.want[1:]...)...) + } else { + want = append(beforeWant, w.want...) + } + + testFormatCompleteCompare(t, maxDepth, err, "%+v", want, false) + if maxDepth > 0 { + testGenericRecursive(t, err, want, list, maxDepth-1) + } + } +} diff --git a/src/vendor/github.com/pkg/errors/stack.go b/src/vendor/github.com/pkg/errors/stack.go new file mode 100644 index 00000000..6b1f2891 --- /dev/null +++ b/src/vendor/github.com/pkg/errors/stack.go @@ -0,0 +1,178 @@ +package errors + +import ( + "fmt" + "io" + "path" + "runtime" + "strings" +) + +// Frame represents a program counter inside a stack frame. +type Frame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f Frame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f Frame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s path of source file relative to the compile time GOPATH +// %+v equivalent to %+s:%d +func (f Frame) Format(s fmt.State, verb rune) { + switch verb { + case 's': + switch { + case s.Flag('+'): + pc := f.pc() + fn := runtime.FuncForPC(pc) + if fn == nil { + io.WriteString(s, "unknown") + } else { + file, _ := fn.FileLine(pc) + fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) + } + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + fmt.Fprintf(s, "%d", f.line()) + case 'n': + name := runtime.FuncForPC(f.pc()).Name() + io.WriteString(s, funcname(name)) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). +type StackTrace []Frame + +func (st StackTrace) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case s.Flag('+'): + for _, f := range st { + fmt.Fprintf(s, "\n%+v", f) + } + case s.Flag('#'): + fmt.Fprintf(s, "%#v", []Frame(st)) + default: + fmt.Fprintf(s, "%v", []Frame(st)) + } + case 's': + fmt.Fprintf(s, "%s", []Frame(st)) + } +} + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + for _, pc := range *s { + f := Frame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } + } +} + +func (s *stack) StackTrace() StackTrace { + f := make([]Frame, len(*s)) + for i := 0; i < len(f); i++ { + f[i] = Frame((*s)[i]) + } + return f +} + +func callers() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// funcname removes the path prefix component of a function's name reported by func.Name(). +func funcname(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] +} + +func trimGOPATH(name, file string) string { + // Here we want to get the source file path relative to the compile time + // GOPATH. As of Go 1.6.x there is no direct way to know the compiled + // GOPATH at runtime, but we can infer the number of path segments in the + // GOPATH. We note that fn.Name() returns the function name qualified by + // the import path, which does not include the GOPATH. Thus we can trim + // segments from the beginning of the file path until the number of path + // separators remaining is one more than the number of path separators in + // the function name. For example, given: + // + // GOPATH /home/user + // file /home/user/src/pkg/sub/file.go + // fn.Name() pkg/sub.Type.Method + // + // We want to produce: + // + // pkg/sub/file.go + // + // From this we can easily see that fn.Name() has one less path separator + // than our desired output. We count separators from the end of the file + // path until it finds two more than in the function name and then move + // one character forward to preserve the initial path segment without a + // leading separator. + const sep = "/" + goal := strings.Count(name, sep) + 2 + i := len(file) + for n := 0; n < goal; n++ { + i = strings.LastIndex(file[:i], sep) + if i == -1 { + // not enough separators found, set i so that the slice expression + // below leaves file unmodified + i = -len(sep) + break + } + } + // get back to 0 or trim the leading separator + file = file[i+len(sep):] + return file +} diff --git a/src/vendor/github.com/pkg/errors/stack_test.go b/src/vendor/github.com/pkg/errors/stack_test.go new file mode 100644 index 00000000..510c27a9 --- /dev/null +++ b/src/vendor/github.com/pkg/errors/stack_test.go @@ -0,0 +1,292 @@ +package errors + +import ( + "fmt" + "runtime" + "testing" +) + +var initpc, _, _, _ = runtime.Caller(0) + +func TestFrameLine(t *testing.T) { + var tests = []struct { + Frame + want int + }{{ + Frame(initpc), + 9, + }, { + func() Frame { + var pc, _, _, _ = runtime.Caller(0) + return Frame(pc) + }(), + 20, + }, { + func() Frame { + var pc, _, _, _ = runtime.Caller(1) + return Frame(pc) + }(), + 28, + }, { + Frame(0), // invalid PC + 0, + }} + + for _, tt := range tests { + got := tt.Frame.line() + want := tt.want + if want != got { + t.Errorf("Frame(%v): want: %v, got: %v", uintptr(tt.Frame), want, got) + } + } +} + +type X struct{} + +func (x X) val() Frame { + var pc, _, _, _ = runtime.Caller(0) + return Frame(pc) +} + +func (x *X) ptr() Frame { + var pc, _, _, _ = runtime.Caller(0) + return Frame(pc) +} + +func TestFrameFormat(t *testing.T) { + var tests = []struct { + Frame + format string + want string + }{{ + Frame(initpc), + "%s", + "stack_test.go", + }, { + Frame(initpc), + "%+s", + "github.com/pkg/errors.init\n" + + "\t.+/github.com/pkg/errors/stack_test.go", + }, { + Frame(0), + "%s", + "unknown", + }, { + Frame(0), + "%+s", + "unknown", + }, { + Frame(initpc), + "%d", + "9", + }, { + Frame(0), + "%d", + "0", + }, { + Frame(initpc), + "%n", + "init", + }, { + func() Frame { + var x X + return x.ptr() + }(), + "%n", + `\(\*X\).ptr`, + }, { + func() Frame { + var x X + return x.val() + }(), + "%n", + "X.val", + }, { + Frame(0), + "%n", + "", + }, { + Frame(initpc), + "%v", + "stack_test.go:9", + }, { + Frame(initpc), + "%+v", + "github.com/pkg/errors.init\n" + + "\t.+/github.com/pkg/errors/stack_test.go:9", + }, { + Frame(0), + "%v", + "unknown:0", + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.Frame, tt.format, tt.want) + } +} + +func TestFuncname(t *testing.T) { + tests := []struct { + name, want string + }{ + {"", ""}, + {"runtime.main", "main"}, + {"github.com/pkg/errors.funcname", "funcname"}, + {"funcname", "funcname"}, + {"io.copyBuffer", "copyBuffer"}, + {"main.(*R).Write", "(*R).Write"}, + } + + for _, tt := range tests { + got := funcname(tt.name) + want := tt.want + if got != want { + t.Errorf("funcname(%q): want: %q, got %q", tt.name, want, got) + } + } +} + +func TestTrimGOPATH(t *testing.T) { + var tests = []struct { + Frame + want string + }{{ + Frame(initpc), + "github.com/pkg/errors/stack_test.go", + }} + + for i, tt := range tests { + pc := tt.Frame.pc() + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + got := trimGOPATH(fn.Name(), file) + testFormatRegexp(t, i, got, "%s", tt.want) + } +} + +func TestStackTrace(t *testing.T) { + tests := []struct { + err error + want []string + }{{ + New("ooh"), []string{ + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:172", + }, + }, { + Wrap(New("ooh"), "ahh"), []string{ + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:177", // this is the stack of Wrap, not New + }, + }, { + Cause(Wrap(New("ooh"), "ahh")), []string{ + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:182", // this is the stack of New + }, + }, { + func() error { return New("ooh") }(), []string{ + `github.com/pkg/errors.(func·009|TestStackTrace.func1)` + + "\n\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New's caller + }, + }, { + Cause(func() error { + return func() error { + return Errorf("hello %s", fmt.Sprintf("world")) + }() + }()), []string{ + `github.com/pkg/errors.(func·010|TestStackTrace.func2.1)` + + "\n\t.+/github.com/pkg/errors/stack_test.go:196", // this is the stack of Errorf + `github.com/pkg/errors.(func·011|TestStackTrace.func2)` + + "\n\t.+/github.com/pkg/errors/stack_test.go:197", // this is the stack of Errorf's caller + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:198", // this is the stack of Errorf's caller's caller + }, + }} + for i, tt := range tests { + x, ok := tt.err.(interface { + StackTrace() StackTrace + }) + if !ok { + t.Errorf("expected %#v to implement StackTrace() StackTrace", tt.err) + continue + } + st := x.StackTrace() + for j, want := range tt.want { + testFormatRegexp(t, i, st[j], "%+v", want) + } + } +} + +func stackTrace() StackTrace { + const depth = 8 + var pcs [depth]uintptr + n := runtime.Callers(1, pcs[:]) + var st stack = pcs[0:n] + return st.StackTrace() +} + +func TestStackTraceFormat(t *testing.T) { + tests := []struct { + StackTrace + format string + want string + }{{ + nil, + "%s", + `\[\]`, + }, { + nil, + "%v", + `\[\]`, + }, { + nil, + "%+v", + "", + }, { + nil, + "%#v", + `\[\]errors.Frame\(nil\)`, + }, { + make(StackTrace, 0), + "%s", + `\[\]`, + }, { + make(StackTrace, 0), + "%v", + `\[\]`, + }, { + make(StackTrace, 0), + "%+v", + "", + }, { + make(StackTrace, 0), + "%#v", + `\[\]errors.Frame{}`, + }, { + stackTrace()[:2], + "%s", + `\[stack_test.go stack_test.go\]`, + }, { + stackTrace()[:2], + "%v", + `\[stack_test.go:225 stack_test.go:272\]`, + }, { + stackTrace()[:2], + "%+v", + "\n" + + "github.com/pkg/errors.stackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:225\n" + + "github.com/pkg/errors.TestStackTraceFormat\n" + + "\t.+/github.com/pkg/errors/stack_test.go:276", + }, { + stackTrace()[:2], + "%#v", + `\[\]errors.Frame{stack_test.go:225, stack_test.go:284}`, + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.StackTrace, tt.format, tt.want) + } +} diff --git a/src/vendor/github.com/renstrom/go-jump-consistent-hash/.travis.yml b/src/vendor/github.com/renstrom/go-jump-consistent-hash/.travis.yml new file mode 100644 index 00000000..e61f42b8 --- /dev/null +++ b/src/vendor/github.com/renstrom/go-jump-consistent-hash/.travis.yml @@ -0,0 +1,11 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + +sudo: false diff --git a/src/vendor/github.com/renstrom/go-jump-consistent-hash/LICENSE b/src/vendor/github.com/renstrom/go-jump-consistent-hash/LICENSE new file mode 100644 index 00000000..9cc75337 --- /dev/null +++ b/src/vendor/github.com/renstrom/go-jump-consistent-hash/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Peter Renström + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/src/vendor/github.com/renstrom/go-jump-consistent-hash/README.md b/src/vendor/github.com/renstrom/go-jump-consistent-hash/README.md new file mode 100644 index 00000000..fc31a2e8 --- /dev/null +++ b/src/vendor/github.com/renstrom/go-jump-consistent-hash/README.md @@ -0,0 +1,61 @@ +# Jump Consistent Hash + +[![Build Status](https://travis-ci.org/renstrom/go-jump-consistent-hash.svg?branch=master)](https://travis-ci.org/renstrom/go-jump-consistent-hash) +[![Godoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/renstrom/go-jump-consistent-hash) + +Go implementation of the jump consistent hash algorithm[1] by John Lamping and Eric Veach. + +[1] http://arxiv.org/pdf/1406.2294v1.pdf + +## Usage + +```go +import jump "github.com/renstrom/go-jump-consistent-hash" + +func main() { + h := jump.Hash(256, 1024) // h = 520 +} +``` + +Includes a helper function for using a `string` as key instead of an `uint64`. This requires a hasher that computes the string into a format accepted by `Hash()`. Such a hasher that uses [CRC-64 (ECMA)](https://en.wikipedia.org/wiki/Cyclic_redundancy_check) is also included for convenience. + +```go +h := jump.HashString("127.0.0.1", 8, jump.CRC64) // h = 7 +``` + +In reality though you probably want to use a `Hasher` so you won't have to repeat the bucket size and which key hasher used. It also uses more convenient types, like `int` instead of `int32`. + +```go +hasher := jump.New(8, jump.CRC64) +h := hasher.Hash("127.0.0.1") // h = 7 +``` + +If you want to use your own algorithm, you must implement the `KeyHasher` interface, which is a subset of the `hash.Hash64` interface available in the standard library. + +Here's an example of a custom `KeyHasher` that uses Google's [FarmHash](https://github.com/google/farmhash) algorithm (the successor of CityHash) to compute the final key. + +```go +type FarmHash struct { + buf bytes.Buffer +} + +func (f *FarmHash) Write(p []byte) (n int, err error) { + return f.buf.Write(p) +} + +func (f *FarmHash) Reset() { + f.buf.Reset() +} + +func (f *FarmHash) Sum64() uint64 { + // https://github.com/dgryski/go-farm + return farm.Hash64(f.buf.Bytes()) +} + +hasher := jump.New(8, &FarmHash{}) +h := hasher.Hash("127.0.0.1") // h = 5 +``` + +## License + +MIT diff --git a/src/vendor/github.com/renstrom/go-jump-consistent-hash/crc32.go b/src/vendor/github.com/renstrom/go-jump-consistent-hash/crc32.go new file mode 100644 index 00000000..a2b7843d --- /dev/null +++ b/src/vendor/github.com/renstrom/go-jump-consistent-hash/crc32.go @@ -0,0 +1,38 @@ +package jump + +import "hash" + +type crc32Hasher struct { + crc32 hash.Hash32 +} + +func (h *crc32Hasher) Write(p []byte) (n int, err error) { + return h.crc32.Write(p) +} + +func (h *crc32Hasher) Sum(b []byte) []byte { + return h.crc32.Sum(b) +} + +func (h *crc32Hasher) Reset() { + h.crc32.Reset() +} + +func (h *crc32Hasher) Size() int { + return h.crc32.Size() +} + +func (h *crc32Hasher) BlockSize() int { + return h.crc32.BlockSize() +} + +func (h *crc32Hasher) Sum32() uint32 { + return h.crc32.Sum32() +} + +func (h *crc32Hasher) Sum64() uint64 { + return uint64(h.crc32.Sum32()) +} + +var _ hash.Hash32 = (*crc32Hasher)(nil) +var _ hash.Hash64 = (*crc32Hasher)(nil) diff --git a/src/vendor/github.com/renstrom/go-jump-consistent-hash/doc.go b/src/vendor/github.com/renstrom/go-jump-consistent-hash/doc.go new file mode 100644 index 00000000..c05a27ed --- /dev/null +++ b/src/vendor/github.com/renstrom/go-jump-consistent-hash/doc.go @@ -0,0 +1,133 @@ +/* +Example + + h := jump.Hash(256, 1024) // h = 520 + +Reference C++ implementation[1] + + int32_t JumpConsistentHash(uint64_t key, int32_t num_buckets) { + int64_t b = -1, j = 0; + while (j < num_buckets) { + b = j; + key = key * 2862933555777941757ULL + 1; + j = (b + 1) * (double(1LL << 31) / double((key >> 33) + 1)); + } + return b; + } + +Explanation of the algorithm + +Jump consistent hash works by computing when its output changes as the +number of buckets increases. Let ch(key, num_buckets) be the consistent hash +for the key when there are num_buckets buckets. Clearly, for any key, k, +ch(k, 1) is 0, since there is only the one bucket. In order for the +consistent hash function to balanced, ch(k, 2) will have to stay at 0 for +half the keys, k, while it will have to jump to 1 for the other half. In +general, ch(k, n+1) has to stay the same as ch(k, n) for n/(n+1) of the +keys, and jump to n for the other 1/(n+1) of the keys. + +Here are examples of the consistent hash values for three keys, k1, k2, and +k3, as num_buckets goes up: + + │ 1 │ 2 │ 3 │ 4 │ 5 │ 6 │ 7 │ 8 │ 9 │ 10 │ 11 │ 12 │ 13 │ 14 + ───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼────┼────┼────┼────┼──── + k1 │ 0 │ 0 │ 2 │ 2 │ 4 │ 4 │ 4 │ 4 │ 4 │ 4 │ 4 │ 4 │ 4 │ 4 + ───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼────┼────┼────┼────┼──── + k2 │ 0 │ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │ 7 │ 7 │ 7 │ 7 │ 7 │ 7 │ 7 + ───┼───┼───┼───┼───┼───┼───┼───┼───┼───┼────┼────┼────┼────┼──── + k3 │ 0 │ 1 │ 1 │ 1 │ 1 │ 5 │ 5 │ 7 │ 7 │ 7 │ 10 │ 10 │ 10 │ 10 + +A linear time algorithm can be defined by using the formula for the +probability of ch(key, j) jumping when j increases. It essentially walks +across a row of this table. Given a key and number of buckets, the algorithm +considers each successive bucket, j, from 1 to num_buckets­1, and uses +ch(key, j) to compute ch(key, j+1). At each bucket, j, it decides whether to +keep ch(k, j+1) the same as ch(k, j), or to jump its value to j. In order to +jump for the right fraction of keys, it uses a pseudo­random number +generator with the key as its seed. To jump for 1/(j+1) of keys, it +generates a uniform random number between 0.0 and 1.0, and jumps if the +value is less than 1/(j+1). At the end of the loop, it has computed +ch(k, num_buckets), which is the desired answer. In code: + + int ch(int key, int num_buckets) { + random.seed(key); + int b = 0; // This will track ch(key,j+1). + for (int j = 1; j < num_buckets; j++) { + if (random.next() < 1.0 / (j + 1)) b = j; + } + return b; + } + +We can convert this to a logarithmic time algorithm by exploiting that +ch(key, j+1) is usually unchanged as j increases, only jumping occasionally. +The algorithm will only compute the destinations of jumps ­­ the j’s for +which ch(key, j+1) ≠ ch(key, j). Also notice that for these j’s, ch(key, +j+1) = j. To develop the algorithm, we will treat ch(key, j) as a random +variable, so that we can use the notation for random variables to analyze +the fractions of keys for which various propositions are true. That will +lead us to a closed form expression for a pseudo­random variable whose value +gives the destination of the next jump. + +Suppose that the algorithm is tracking the bucket numbers of the jumps for a +particular key, k. And suppose that b was the destination of the last jump, +that is, ch(k, b) ≠ ch(k, b+1), and ch(k, b+1) = b. Now, we want to find the +next jump, the smallest j such that ch(k, j+1) ≠ ch(k, b+1), or +equivalently, the largest j such that ch(k, j) = ch(k, b+1). We will make a +pseudo­random variable whose value is that j. To get a probabilistic +constraint on j, note that for any bucket number, i, we have j ≥ i if and +only if the consistent hash hasn’t changed by i, that is, if and only if +ch(k, i) = ch(k, b+1). Hence, the distribution of j must satisfy + + P(j ≥ i) = P( ch(k, i) = ch(k, b+1) ) + +Fortunately, it is easy to compute that probability. Notice that since P( +ch(k, 10) = ch(k, 11) ) is 10/11, and P( ch(k, 11) = ch(k, 12) ) is 11/12, +then P( ch(k, 10) = ch(k, 12) ) is 10/11 * 11/12 = 10/12. In general, if n ≥ +m, P( ch(k, n) = ch(k, m) ) = m / n. Thus for any i > b, + + P(j ≥ i) = P( ch(k, i) = ch(k, b+1) ) = (b+1) / i . + +Now, we generate a pseudo­random variable, r, (depending on k and j) that is +uniformly distributed between 0 and 1. Since we want P(j ≥ i) = (b+1) / i, +we set P(j ≥ i) iff r ≤ (b+1) / i. Solving the inequality for i yields P(j ≥ +i) iff i ≤ (b+1) / r. Since i is a lower bound on j, j will equal the +largest i for which P(j ≥ i), thus the largest i satisfying i ≤ (b+1) / r. +Thus, by the definition of the floor function, j = floor((b+1) / r). + +Using this formula, jump consistent hash finds ch(key, num_buckets) by +choosing successive jump destinations until it finds a position at or past +num_buckets. It then knows that the previous jump destination is the answer. + + int ch(int key, int num_buckets) { + random.seed(key); + int b = -1; // bucket number before the previous jump + int j = 0; // bucket number before the current jump + while (j < num_buckets) { + b = j; + r = random.next(); + j = floor((b + 1) / r); + } + return = b; + } + +To turn this into the actual code of figure 1, we need to implement random. +We want it to be fast, and yet to also to have well distributed successive +values. We use a 64­bit linear congruential generator; the particular +multiplier we use produces random numbers that are especially well +distributed in higher dimensions (i.e., when successive random values are +used to form tuples). We use the key as the seed. (For keys that don’t fit +into 64 bits, a 64 bit hash of the key should be used.) The congruential +generator updates the seed on each iteration, and the code derives a double +from the current seed. Tests show that this generator has good speed and +distribution. + +It is worth noting that unlike the algorithm of Karger et al., jump +consistent hash does not require the key to be hashed if it is already an +integer. This is because jump consistent hash has an embedded pseudorandom +number generator that essentially rehashes the key on every iteration. The +hash is not especially good (i.e., linear congruential), but since it is +applied repeatedly, additional hashing of the input key is not necessary. + +[1] http://arxiv.org/pdf/1406.2294v1.pdf +*/ +package jump diff --git a/src/vendor/github.com/renstrom/go-jump-consistent-hash/jump.go b/src/vendor/github.com/renstrom/go-jump-consistent-hash/jump.go new file mode 100644 index 00000000..3ff3b0cb --- /dev/null +++ b/src/vendor/github.com/renstrom/go-jump-consistent-hash/jump.go @@ -0,0 +1,88 @@ +package jump + +import ( + "hash" + "hash/crc32" + "hash/crc64" + "hash/fnv" + "io" +) + +// Hash takes a 64 bit key and the number of buckets. It outputs a bucket +// number in the range [0, buckets). +// If the number of buckets is less than or equal to 0 then one 1 is used. +func Hash(key uint64, buckets int32) int32 { + var b, j int64 + + if buckets <= 0 { + buckets = 1 + } + + for j < int64(buckets) { + b = j + key = key*2862933555777941757 + 1 + j = int64(float64(b+1) * (float64(int64(1)<<31) / float64((key>>33)+1))) + } + + return int32(b) +} + +// HashString takes string as key instead of an int and uses a KeyHasher to +// generate a key compatible with Hash(). +func HashString(key string, buckets int32, h KeyHasher) int32 { + h.Reset() + _, err := io.WriteString(h, key) + if err != nil { + panic(err) + } + return Hash(h.Sum64(), buckets) +} + +// KeyHasher is a subset of hash.Hash64 in the standard library. +type KeyHasher interface { + // Write (via the embedded io.Writer interface) adds more data to the + // running hash. + // It never returns an error. + io.Writer + + // Reset resets the KeyHasher to its initial state. + Reset() + + // Return the result of the added bytes (via io.Writer). + Sum64() uint64 +} + +// Hasher represents a jump consistent hasher using a string as key. +type Hasher struct { + n int32 + h KeyHasher +} + +// New returns a new instance of of Hasher. +func New(n int, h KeyHasher) *Hasher { + return &Hasher{int32(n), h} +} + +// N returns the number of buckets the hasher can assign to. +func (h *Hasher) N() int { + return int(h.n) +} + +// Hash returns the integer hash for the given key. +func (h *Hasher) Hash(key string) int { + return int(HashString(key, h.n, h.h)) +} + +// KeyHashers available in the standard library for use with HashString() and Hasher. +var ( + // CRC32 uses the 32-bit Cyclic Redundancy Check (CRC-32) with the IEEE + // polynomial. + CRC32 hash.Hash64 = &crc32Hasher{crc32.NewIEEE()} + // CRC64 uses the 64-bit Cyclic Redundancy Check (CRC-64) with the ECMA + // polynomial. + CRC64 hash.Hash64 = crc64.New(crc64.MakeTable(crc64.ECMA)) + // FNV1 uses the non-cryptographic hash function FNV-1. + FNV1 hash.Hash64 = fnv.New64() + // FNV1a uses the non-cryptographic hash function FNV-1a. + FNV1a hash.Hash64 = fnv.New64a() +) diff --git a/src/vendor/github.com/renstrom/go-jump-consistent-hash/jump_test.go b/src/vendor/github.com/renstrom/go-jump-consistent-hash/jump_test.go new file mode 100644 index 00000000..b92a90ba --- /dev/null +++ b/src/vendor/github.com/renstrom/go-jump-consistent-hash/jump_test.go @@ -0,0 +1,111 @@ +package jump + +import ( + "fmt" + "hash" + "strconv" + "testing" +) + +var jumpTestVectors = []struct { + key uint64 + buckets int32 + expected int32 +}{ + {1, 1, 0}, + {42, 57, 43}, + {0xDEAD10CC, 1, 0}, + {0xDEAD10CC, 666, 361}, + {256, 1024, 520}, + // Test negative values + {0, -10, 0}, + {0xDEAD10CC, -666, 0}, +} + +func TestJumpHash(t *testing.T) { + for _, v := range jumpTestVectors { + h := Hash(v.key, v.buckets) + if h != v.expected { + t.Errorf("expected bucket for key=%d to be %d, got %d", + v.key, v.expected, h) + } + } +} + +var jumpStringTestVectors = []struct { + key string + buckets int32 + hasher hash.Hash64 + expected int32 +}{ + {"localhost", 10, CRC32, 9}, + {"ёлка", 10, CRC64, 6}, + {"ветер", 10, FNV1, 3}, + {"中国", 10, FNV1a, 5}, + {"日本", 10, CRC64, 6}, +} + +func TestJumpHashString(t *testing.T) { + for _, v := range jumpStringTestVectors { + h := HashString(v.key, v.buckets, v.hasher) + if h != v.expected { + t.Errorf("expected bucket for key=%s to be %d, got %d", + strconv.Quote(v.key), v.expected, h) + } + } +} + +func TestHasher(t *testing.T) { + for _, v := range jumpStringTestVectors { + hasher := New(int(v.buckets), v.hasher) + h := hasher.Hash(v.key) + if int32(h) != v.expected { + t.Errorf("expected bucket for key=%s to be %d, got %d", + strconv.Quote(v.key), v.expected, h) + } + } +} + +func ExampleHash() { + fmt.Print(Hash(256, 1024)) + // Output: 520 +} + +func ExampleHashString() { + fmt.Print(HashString("127.0.0.1", 8, CRC64)) + // Output: 7 +} + +func BenchmarkHash(b *testing.B) { + for i := 0; i < b.N; i++ { + Hash(uint64(i), int32(i)) + } +} + +func BenchmarkHashStringCRC32(b *testing.B) { + s := "Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat." + for i := 0; i < b.N; i++ { + HashString(s, int32(i), CRC32) + } +} + +func BenchmarkHashStringCRC64(b *testing.B) { + s := "Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat." + for i := 0; i < b.N; i++ { + HashString(s, int32(i), CRC64) + } +} + +func BenchmarkHashStringFNV1(b *testing.B) { + s := "Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat." + for i := 0; i < b.N; i++ { + HashString(s, int32(i), FNV1) + } +} + +func BenchmarkHashStringFNV1a(b *testing.B) { + s := "Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat." + for i := 0; i < b.N; i++ { + HashString(s, int32(i), FNV1a) + } +} diff --git a/src/vendor/github.com/spf13/cobra/.gitignore b/src/vendor/github.com/spf13/cobra/.gitignore new file mode 100644 index 00000000..1b8c7c26 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/.gitignore @@ -0,0 +1,36 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore +# swap +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +# session +Session.vim +# temporary +.netrwhist +*~ +# auto-generated tag files +tags + +*.exe + +cobra.test diff --git a/src/vendor/github.com/spf13/cobra/.mailmap b/src/vendor/github.com/spf13/cobra/.mailmap new file mode 100644 index 00000000..94ec5306 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/.mailmap @@ -0,0 +1,3 @@ +Steve Francia +Bjørn Erik Pedersen +Fabiano Franz diff --git a/src/vendor/github.com/spf13/cobra/.travis.yml b/src/vendor/github.com/spf13/cobra/.travis.yml new file mode 100644 index 00000000..bd72adf6 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/.travis.yml @@ -0,0 +1,24 @@ +language: go + +matrix: + include: + - go: 1.4.3 + env: NOVET=true # No bundled vet. + - go: 1.5.4 + - go: 1.6.3 + - go: 1.7 + - go: tip + allow_failures: + - go: tip + +before_install: + - mkdir -p bin + - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck + - chmod +x bin/shellcheck +script: + - PATH=$PATH:$PWD/bin go test -v ./... + - go build + - diff -u <(echo -n) <(gofmt -d -s .) + - if [ -z $NOVET ]; then + diff -u <(echo -n) <(go tool vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint'); + fi diff --git a/src/vendor/github.com/spf13/cobra/LICENSE.txt b/src/vendor/github.com/spf13/cobra/LICENSE.txt new file mode 100644 index 00000000..298f0e26 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/src/vendor/github.com/spf13/cobra/README.md b/src/vendor/github.com/spf13/cobra/README.md new file mode 100644 index 00000000..5d2504b1 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/README.md @@ -0,0 +1,898 @@ +![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) + +Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files. + +Many of the most widely used Go projects are built using Cobra including: + +* [Kubernetes](http://kubernetes.io/) +* [Hugo](http://gohugo.io) +* [rkt](https://github.com/coreos/rkt) +* [etcd](https://github.com/coreos/etcd) +* [Docker (distribution)](https://github.com/docker/distribution) +* [OpenShift](https://www.openshift.com/) +* [Delve](https://github.com/derekparker/delve) +* [GopherJS](http://www.gopherjs.org/) +* [CockroachDB](http://www.cockroachlabs.com/) +* [Bleve](http://www.blevesearch.com/) +* [ProjectAtomic (enterprise)](http://www.projectatomic.io/) +* [Parse (CLI)](https://parse.com/) +* [GiantSwarm's swarm](https://github.com/giantswarm/cli) +* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) + + +[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) +[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra) +[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) + +![cobra](https://cloud.githubusercontent.com/assets/173412/10911369/84832a8e-8212-11e5-9f82-cc96660a4794.gif) + +# Overview + +Cobra is a library providing a simple interface to create powerful modern CLI +interfaces similar to git & go tools. + +Cobra is also an application that will generate your application scaffolding to rapidly +develop a Cobra-based application. + +Cobra provides: +* Easy subcommand-based CLIs: `app server`, `app fetch`, etc. +* Fully POSIX-compliant flags (including short & long versions) +* Nested subcommands +* Global, local and cascading flags +* Easy generation of applications & commands with `cobra create appname` & `cobra add cmdname` +* Intelligent suggestions (`app srver`... did you mean `app server`?) +* Automatic help generation for commands and flags +* Automatic detailed help for `app help [command]` +* Automatic help flag recognition of `-h`, `--help`, etc. +* Automatically generated bash autocomplete for your application +* Automatically generated man pages for your application +* Command aliases so you can change things without breaking them +* The flexibilty to define your own help, usage, etc. +* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps + +Cobra has an exceptionally clean interface and simple design without needless +constructors or initialization methods. + +Applications built with Cobra commands are designed to be as user-friendly as +possible. Flags can be placed before or after the command (as long as a +confusing space isn’t provided). Both short and long flags can be used. A +command need not even be fully typed. Help is automatically generated and +available for the application or for a specific command using either the help +command or the `--help` flag. + +# Concepts + +Cobra is built on a structure of commands, arguments & flags. + +**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. + +The best applications will read like sentences when used. Users will know how +to use the application because they will natively understand how to use it. + +The pattern to follow is +`APPNAME VERB NOUN --ADJECTIVE.` + or +`APPNAME COMMAND ARG --FLAG` + +A few good real world examples may better illustrate this point. + +In the following example, 'server' is a command, and 'port' is a flag: + + > hugo server --port=1313 + +In this command we are telling Git to clone the url bare. + + > git clone URL --bare + +## Commands + +Command is the central point of the application. Each interaction that +the application supports will be contained in a Command. A command can +have children commands and optionally run an action. + +In the example above, 'server' is the command. + +A Command has the following structure: + +```go +type Command struct { + Use string // The one-line usage message. + Short string // The short description shown in the 'help' output. + Long string // The long message shown in the 'help ' output. + Run func(cmd *Command, args []string) // Run runs the command. +} +``` + +## Flags + +A Flag is a way to modify the behavior of a command. Cobra supports +fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). +A Cobra command can define flags that persist through to children commands +and flags that are only available to that command. + +In the example above, 'port' is the flag. + +Flag functionality is provided by the [pflag +library](https://github.com/ogier/pflag), a fork of the flag standard library +which maintains the same interface while adding POSIX compliance. + +## Usage + +Cobra works by creating a set of commands and then organizing them into a tree. +The tree defines the structure of the application. + +Once each command is defined with its corresponding flags, then the +tree is assigned to the commander which is finally executed. + +# Installing +Using Cobra is easy. First, use `go get` to install the latest version +of the library. This command will install the `cobra` generator executible +along with the library: + + > go get -v github.com/spf13/cobra/cobra + +Next, include Cobra in your application: + +```go +import "github.com/spf13/cobra" +``` + +# Getting Started + +While you are welcome to provide your own organization, typically a Cobra based +application will follow the following organizational structure. + +``` + ▾ appName/ + ▾ cmd/ + add.go + your.go + commands.go + here.go + main.go +``` + +In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. + +```go +package main + +import "{pathToYourApp}/cmd" + +func main() { + if err := cmd.RootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(-1) + } +} +``` + +## Using the Cobra Generator + +Cobra provides its own program that will create your application and add any +commands you want. It's the easiest way to incorporate Cobra into your application. + +In order to use the cobra command, compile it using the following command: + + > go install github.com/spf13/cobra/cobra + +This will create the cobra executable under your go path bin directory! + +### cobra init + +The `cobra init [yourApp]` command will create your initial application code +for you. It is a very powerful application that will populate your program with +the right structure so you can immediately enjoy all the benefits of Cobra. It +will also automatically apply the license you specify to your application. + +Cobra init is pretty smart. You can provide it a full path, or simply a path +similar to what is expected in the import. + +``` +cobra init github.com/spf13/newAppName +``` + +### cobra add + +Once an application is initialized Cobra can create additional commands for you. +Let's say you created an app and you wanted the following commands for it: + +* app serve +* app config +* app config create + +In your project directory (where your main.go file is) you would run the following: + +``` +cobra add serve +cobra add config +cobra add create -p 'configCmd' +``` + +Once you have run these three commands you would have an app structure that would look like: + +``` + ▾ app/ + ▾ cmd/ + serve.go + config.go + create.go + main.go +``` + +at this point you can run `go run main.go` and it would run your app. `go run +main.go serve`, `go run main.go config`, `go run main.go config create` along +with `go run main.go help serve`, etc would all work. + +Obviously you haven't added your own code to these yet, the commands are ready +for you to give them their tasks. Have fun. + +### Configuring the cobra generator + +The cobra generator will be easier to use if you provide a simple configuration +file which will help you eliminate providing a bunch of repeated information in +flags over and over. + +An example ~/.cobra.yaml file: + +```yaml +author: Steve Francia +license: MIT +``` + +You can specify no license by setting `license` to `none` or you can specify +a custom license: + +```yaml +license: + header: This file is part of {{ .appName }}. + text: | + {{ .copyright }} + + This is my license. There are many like it, but this one is mine. + My license is my best friend. It is my life. I must master it as I must + master my life. +``` + +## Manually implementing Cobra + +To manually implement cobra you need to create a bare main.go file and a RootCmd file. +You will optionally provide additional commands as you see fit. + +### Create the root command + +The root command represents your binary itself. + + +#### Manually create rootCmd + +Cobra doesn't require any special constructors. Simply create your commands. + +Ideally you place this in app/cmd/root.go: + +```go +var RootCmd = &cobra.Command{ + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + Complete documentation is available at http://hugo.spf13.com`, + Run: func(cmd *cobra.Command, args []string) { + // Do Stuff Here + }, +} +``` + +You will additionally define flags and handle configuration in your init() function. + +for example cmd/root.go: + +```go +func init() { + cobra.OnInitialize(initConfig) + RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") + RootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/") + RootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution") + RootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)") + RootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration") + viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author")) + viper.BindPFlag("projectbase", RootCmd.PersistentFlags().Lookup("projectbase")) + viper.BindPFlag("useViper", RootCmd.PersistentFlags().Lookup("viper")) + viper.SetDefault("author", "NAME HERE ") + viper.SetDefault("license", "apache") +} +``` + +### Create your main.go + +With the root command you need to have your main function execute it. +Execute should be run on the root for clarity, though it can be called on any command. + +In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. + +```go +package main + +import "{pathToYourApp}/cmd" + +func main() { + if err := cmd.RootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(-1) + } +} +``` + + +### Create additional commands + +Additional commands can be defined and typically are each given their own file +inside of the cmd/ directory. + +If you wanted to create a version command you would create cmd/version.go and +populate it with the following: + +```go +package cmd + +import ( + "github.com/spf13/cobra" +) + +func init() { + RootCmd.AddCommand(versionCmd) +} + +var versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number of Hugo", + Long: `All software has versions. This is Hugo's`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") + }, +} +``` + +### Attach command to its parent + + +If you notice in the above example we attach the command to its parent. In +this case the parent is the rootCmd. In this example we are attaching it to the +root, but commands can be attached at any level. + +```go +RootCmd.AddCommand(versionCmd) +``` + +### Remove a command from its parent + +Removing a command is not a common action in simple programs, but it allows 3rd +parties to customize an existing command tree. + +In this example, we remove the existing `VersionCmd` command of an existing +root command, and we replace it with our own version: + +```go +mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd) +mainlib.RootCmd.AddCommand(versionCmd) +``` + +## Working with Flags + +Flags provide modifiers to control how the action command operates. + +### Assign flags to a command + +Since the flags are defined and used in different locations, we need to +define a variable outside with the correct scope to assign the flag to +work with. + +```go +var Verbose bool +var Source string +``` + +There are two different approaches to assign a flag. + +### Persistent Flags + +A flag can be 'persistent' meaning that this flag will be available to the +command it's assigned to as well as every command under that command. For +global flags, assign a flag as a persistent flag on the root. + +```go +RootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") +``` + +### Local Flags + +A flag can also be assigned locally which will only apply to that specific command. + +```go +RootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") +``` + + +## Example + +In the example below, we have defined three commands. Two are at the top level +and one (cmdTimes) is a child of one of the top commands. In this case the root +is not executable meaning that a subcommand is required. This is accomplished +by not providing a 'Run' for the 'rootCmd'. + +We have only defined one flag for a single command. + +More documentation about flags is available at https://github.com/spf13/pflag + +```go +package main + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +func main() { + + var echoTimes int + + var cmdPrint = &cobra.Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `print is for printing anything back to the screen. + For many years people have printed back to the screen. + `, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdEcho = &cobra.Command{ + Use: "echo [string to echo]", + Short: "Echo anything to the screen", + Long: `echo is for echoing anything back. + Echo works a lot like print, except it has a child command. + `, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdTimes = &cobra.Command{ + Use: "times [# times] [string to echo]", + Short: "Echo anything to the screen more times", + Long: `echo things multiple times back to the user by providing + a count and a string.`, + Run: func(cmd *cobra.Command, args []string) { + for i := 0; i < echoTimes; i++ { + fmt.Println("Echo: " + strings.Join(args, " ")) + } + }, + } + + cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") + + var rootCmd = &cobra.Command{Use: "app"} + rootCmd.AddCommand(cmdPrint, cmdEcho) + cmdEcho.AddCommand(cmdTimes) + rootCmd.Execute() +} +``` + +For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). + +## The Help Command + +Cobra automatically adds a help command to your application when you have subcommands. +This will be called when a user runs 'app help'. Additionally, help will also +support all other commands as input. Say, for instance, you have a command called +'create' without any additional configuration; Cobra will work when 'app help +create' is called. Every command will automatically have the '--help' flag added. + +### Example + +The following output is automatically generated by Cobra. Nothing beyond the +command and flag definitions are needed. + + > hugo help + + hugo is the main command, used to build your Hugo site. + + Hugo is a Fast and Flexible Static Site Generator + built with love by spf13 and friends in Go. + + Complete documentation is available at http://gohugo.io/. + + Usage: + hugo [flags] + hugo [command] + + Available Commands: + server Hugo runs its own webserver to render the files + version Print the version number of Hugo + config Print the site configuration + check Check content in the source directory + benchmark Benchmark hugo by building a site a number of times. + convert Convert your content to different formats + new Create new content for your site + list Listing out various types of content + undraft Undraft changes the content's draft status from 'True' to 'False' + genautocomplete Generate shell autocompletion script for Hugo + gendoc Generate Markdown documentation for the Hugo CLI. + genman Generate man page for Hugo + import Import your site from others. + + Flags: + -b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/ + -D, --buildDrafts[=false]: include content marked as draft + -F, --buildFuture[=false]: include content with publishdate in the future + --cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/ + --canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL + --config="": config file (default is path/config.yaml|json|toml) + -d, --destination="": filesystem path to write files to + --disableRSS[=false]: Do not build RSS files + --disableSitemap[=false]: Do not build Sitemap file + --editor="": edit new content with this editor, if provided + --ignoreCache[=false]: Ignores the cache directory for reading but still writes to it + --log[=false]: Enable Logging + --logFile="": Log File path (if set, logging enabled automatically) + --noTimes[=false]: Don't sync modification time of files + --pluralizeListTitles[=true]: Pluralize titles in lists using inflect + --preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu") + -s, --source="": filesystem path to read files relative from + --stepAnalysis[=false]: display memory and timing of different steps of the program + -t, --theme="": theme to use (located in /themes/THEMENAME/) + --uglyURLs[=false]: if true, use /filename.html instead of /filename/ + -v, --verbose[=false]: verbose output + --verboseLog[=false]: verbose logging + -w, --watch[=false]: watch filesystem for changes and recreate as needed + + Use "hugo [command] --help" for more information about a command. + + +Help is just a command like any other. There is no special logic or behavior +around it. In fact, you can provide your own if you want. + +### Defining your own help + +You can provide your own Help command or your own template for the default command to use. + +The default help command is + +```go +func (c *Command) initHelp() { + if c.helpCommand == nil { + c.helpCommand = &Command{ + Use: "help [command]", + Short: "Help about any command", + Long: `Help provides help for any command in the application. + Simply type ` + c.Name() + ` help [path to command] for full details.`, + Run: c.HelpFunc(), + } + } + c.AddCommand(c.helpCommand) +} +``` + +You can provide your own command, function or template through the following methods: + +```go +command.SetHelpCommand(cmd *Command) + +command.SetHelpFunc(f func(*Command, []string)) + +command.SetHelpTemplate(s string) +``` + +The latter two will also apply to any children commands. + +## Usage + +When the user provides an invalid flag or invalid command, Cobra responds by +showing the user the 'usage'. + +### Example +You may recognize this from the help above. That's because the default help +embeds the usage as part of its output. + + Usage: + hugo [flags] + hugo [command] + + Available Commands: + server Hugo runs its own webserver to render the files + version Print the version number of Hugo + config Print the site configuration + check Check content in the source directory + benchmark Benchmark hugo by building a site a number of times. + convert Convert your content to different formats + new Create new content for your site + list Listing out various types of content + undraft Undraft changes the content's draft status from 'True' to 'False' + genautocomplete Generate shell autocompletion script for Hugo + gendoc Generate Markdown documentation for the Hugo CLI. + genman Generate man page for Hugo + import Import your site from others. + + Flags: + -b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/ + -D, --buildDrafts[=false]: include content marked as draft + -F, --buildFuture[=false]: include content with publishdate in the future + --cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/ + --canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL + --config="": config file (default is path/config.yaml|json|toml) + -d, --destination="": filesystem path to write files to + --disableRSS[=false]: Do not build RSS files + --disableSitemap[=false]: Do not build Sitemap file + --editor="": edit new content with this editor, if provided + --ignoreCache[=false]: Ignores the cache directory for reading but still writes to it + --log[=false]: Enable Logging + --logFile="": Log File path (if set, logging enabled automatically) + --noTimes[=false]: Don't sync modification time of files + --pluralizeListTitles[=true]: Pluralize titles in lists using inflect + --preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu") + -s, --source="": filesystem path to read files relative from + --stepAnalysis[=false]: display memory and timing of different steps of the program + -t, --theme="": theme to use (located in /themes/THEMENAME/) + --uglyURLs[=false]: if true, use /filename.html instead of /filename/ + -v, --verbose[=false]: verbose output + --verboseLog[=false]: verbose logging + -w, --watch[=false]: watch filesystem for changes and recreate as needed + +### Defining your own usage +You can provide your own usage function or template for Cobra to use. + +The default usage function is: + +```go +return func(c *Command) error { + err := tmpl(c.Out(), c.UsageTemplate(), c) + return err +} +``` + +Like help, the function and template are overridable through public methods: + +```go +command.SetUsageFunc(f func(*Command) error) + +command.SetUsageTemplate(s string) +``` + +## PreRun or PostRun Hooks + +It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: + +- `PersistentPreRun` +- `PreRun` +- `Run` +- `PostRun` +- `PersistentPostRun` + +An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: + +```go +package main + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func main() { + + var rootCmd = &cobra.Command{ + Use: "root [sub]", + Short: "My root command", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) + }, + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) + }, + } + + var subCmd = &cobra.Command{ + Use: "sub [no options!]", + Short: "My subcommand", + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) + }, + } + + rootCmd.AddCommand(subCmd) + + rootCmd.SetArgs([]string{""}) + _ = rootCmd.Execute() + fmt.Print("\n") + rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) + _ = rootCmd.Execute() +} +``` + + +## Alternative Error Handling + +Cobra also has functions where the return signature is an error. This allows for errors to bubble up to the top, +providing a way to handle the errors in one location. The current list of functions that return an error is: + +* PersistentPreRunE +* PreRunE +* RunE +* PostRunE +* PersistentPostRunE + +If you would like to silence the default `error` and `usage` output in favor of your own, you can set `SilenceUsage` +and `SilenceErrors` to `false` on the command. A child command respects these flags if they are set on the parent +command. + +**Example Usage using RunE:** + +```go +package main + +import ( + "errors" + "log" + + "github.com/spf13/cobra" +) + +func main() { + var rootCmd = &cobra.Command{ + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + Complete documentation is available at http://hugo.spf13.com`, + RunE: func(cmd *cobra.Command, args []string) error { + // Do Stuff Here + return errors.New("some random error") + }, + } + + if err := rootCmd.Execute(); err != nil { + log.Fatal(err) + } +} +``` + +## Suggestions when "unknown command" happens + +Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: + +``` +$ hugo srever +Error: unknown command "srever" for "hugo" + +Did you mean this? + server + +Run 'hugo --help' for usage. +``` + +Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. + +If you need to disable suggestions or tweak the string distance in your command, use: + +```go +command.DisableSuggestions = true +``` + +or + +```go +command.SuggestionsMinimumDistance = 1 +``` + +You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: + +``` +$ kubectl remove +Error: unknown command "remove" for "kubectl" + +Did you mean this? + delete + +Run 'kubectl help' for usage. +``` + +## Generating Markdown-formatted documentation for your command + +Cobra can generate a Markdown-formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](doc/md_docs.md). + +## Generating man pages for your command + +Cobra can generate a man page based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Man Docs](doc/man_docs.md). + +## Generating bash completions for your command + +Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md). + +## Debugging + +Cobra provides a ‘DebugFlags’ method on a command which, when called, will print +out everything Cobra knows about the flags for each command. + +### Example + +```go +command.DebugFlags() +``` + +## Release Notes +* **0.9.0** June 17, 2014 + * flags can appears anywhere in the args (provided they are unambiguous) + * --help prints usage screen for app or command + * Prefix matching for commands + * Cleaner looking help and usage output + * Extensive test suite +* **0.8.0** Nov 5, 2013 + * Reworked interface to remove commander completely + * Command now primary structure + * No initialization needed + * Usage & Help templates & functions definable at any level + * Updated Readme +* **0.7.0** Sept 24, 2013 + * Needs more eyes + * Test suite + * Support for automatic error messages + * Support for help command + * Support for printing to any io.Writer instead of os.Stderr + * Support for persistent flags which cascade down tree + * Ready for integration into Hugo +* **0.1.0** Sept 3, 2013 + * Implement first draft + +## Extensions + +Libraries for extending Cobra: + +* [cmdns](https://github.com/gosuri/cmdns): Enables name spacing a command's immediate children. It provides an alternative way to structure subcommands, similar to `heroku apps:create` and `ovrclk clusters:launch`. + +## ToDo +* Launch proper documentation site + +## Contributing + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +## Contributors + +Names in no particular order: + +* [spf13](https://github.com/spf13), +[eparis](https://github.com/eparis), +[bep](https://github.com/bep), and many more! + +## License + +Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) + + +[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge") diff --git a/src/vendor/github.com/spf13/cobra/bash_completions.go b/src/vendor/github.com/spf13/cobra/bash_completions.go new file mode 100644 index 00000000..7a5bd4d7 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/bash_completions.go @@ -0,0 +1,641 @@ +package cobra + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + + "github.com/spf13/pflag" +) + +const ( + BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions" + BashCompCustom = "cobra_annotation_bash_completion_custom" + BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" + BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" +) + +func preamble(out io.Writer, name string) error { + _, err := fmt.Fprintf(out, "# bash completion for %-36s -*- shell-script -*-\n", name) + if err != nil { + return err + } + _, err = fmt.Fprint(out, ` +__debug() +{ + if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then + echo "$*" >> "${BASH_COMP_DEBUG_FILE}" + fi +} + +# Homebrew on Macs have version 1.3 of bash-completion which doesn't include +# _init_completion. This is a very minimal version of that function. +__my_init_completion() +{ + COMPREPLY=() + _get_comp_words_by_ref "$@" cur prev words cword +} + +__index_of_word() +{ + local w word=$1 + shift + index=0 + for w in "$@"; do + [[ $w = "$word" ]] && return + index=$((index+1)) + done + index=-1 +} + +__contains_word() +{ + local w word=$1; shift + for w in "$@"; do + [[ $w = "$word" ]] && return + done + return 1 +} + +__handle_reply() +{ + __debug "${FUNCNAME[0]}" + case $cur in + -*) + if [[ $(type -t compopt) = "builtin" ]]; then + compopt -o nospace + fi + local allflags + if [ ${#must_have_one_flag[@]} -ne 0 ]; then + allflags=("${must_have_one_flag[@]}") + else + allflags=("${flags[*]} ${two_word_flags[*]}") + fi + COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) + if [[ $(type -t compopt) = "builtin" ]]; then + [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace + fi + + # complete after --flag=abc + if [[ $cur == *=* ]]; then + if [[ $(type -t compopt) = "builtin" ]]; then + compopt +o nospace + fi + + local index flag + flag="${cur%%=*}" + __index_of_word "${flag}" "${flags_with_completion[@]}" + if [[ ${index} -ge 0 ]]; then + COMPREPLY=() + PREFIX="" + cur="${cur#*=}" + ${flags_completion[${index}]} + if [ -n "${ZSH_VERSION}" ]; then + # zfs completion needs --flag= prefix + eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" + fi + fi + fi + return 0; + ;; + esac + + # check if we are handling a flag with special work handling + local index + __index_of_word "${prev}" "${flags_with_completion[@]}" + if [[ ${index} -ge 0 ]]; then + ${flags_completion[${index}]} + return + fi + + # we are parsing a flag and don't have a special handler, no completion + if [[ ${cur} != "${words[cword]}" ]]; then + return + fi + + local completions + completions=("${commands[@]}") + if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then + completions=("${must_have_one_noun[@]}") + fi + if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then + completions+=("${must_have_one_flag[@]}") + fi + COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") ) + + if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then + COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") ) + fi + + if [[ ${#COMPREPLY[@]} -eq 0 ]]; then + declare -F __custom_func >/dev/null && __custom_func + fi + + __ltrim_colon_completions "$cur" +} + +# The arguments should be in the form "ext1|ext2|extn" +__handle_filename_extension_flag() +{ + local ext="$1" + _filedir "@(${ext})" +} + +__handle_subdirs_in_dir_flag() +{ + local dir="$1" + pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 +} + +__handle_flag() +{ + __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + # if a command required a flag, and we found it, unset must_have_one_flag() + local flagname=${words[c]} + local flagvalue + # if the word contained an = + if [[ ${words[c]} == *"="* ]]; then + flagvalue=${flagname#*=} # take in as flagvalue after the = + flagname=${flagname%%=*} # strip everything after the = + flagname="${flagname}=" # but put the = back + fi + __debug "${FUNCNAME[0]}: looking for ${flagname}" + if __contains_word "${flagname}" "${must_have_one_flag[@]}"; then + must_have_one_flag=() + fi + + # if you set a flag which only applies to this command, don't show subcommands + if __contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then + commands=() + fi + + # keep flag value with flagname as flaghash + if [ -n "${flagvalue}" ] ; then + flaghash[${flagname}]=${flagvalue} + elif [ -n "${words[ $((c+1)) ]}" ] ; then + flaghash[${flagname}]=${words[ $((c+1)) ]} + else + flaghash[${flagname}]="true" # pad "true" for bool flag + fi + + # skip the argument to a two word flag + if __contains_word "${words[c]}" "${two_word_flags[@]}"; then + c=$((c+1)) + # if we are looking for a flags value, don't show commands + if [[ $c -eq $cword ]]; then + commands=() + fi + fi + + c=$((c+1)) + +} + +__handle_noun() +{ + __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + if __contains_word "${words[c]}" "${must_have_one_noun[@]}"; then + must_have_one_noun=() + elif __contains_word "${words[c]}" "${noun_aliases[@]}"; then + must_have_one_noun=() + fi + + nouns+=("${words[c]}") + c=$((c+1)) +} + +__handle_command() +{ + __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + local next_command + if [[ -n ${last_command} ]]; then + next_command="_${last_command}_${words[c]//:/__}" + else + if [[ $c -eq 0 ]]; then + next_command="_$(basename "${words[c]//:/__}")" + else + next_command="_${words[c]//:/__}" + fi + fi + c=$((c+1)) + __debug "${FUNCNAME[0]}: looking for ${next_command}" + declare -F $next_command >/dev/null && $next_command +} + +__handle_word() +{ + if [[ $c -ge $cword ]]; then + __handle_reply + return + fi + __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + if [[ "${words[c]}" == -* ]]; then + __handle_flag + elif __contains_word "${words[c]}" "${commands[@]}"; then + __handle_command + elif [[ $c -eq 0 ]] && __contains_word "$(basename "${words[c]}")" "${commands[@]}"; then + __handle_command + else + __handle_noun + fi + __handle_word +} + +`) + return err +} + +func postscript(w io.Writer, name string) error { + name = strings.Replace(name, ":", "__", -1) + _, err := fmt.Fprintf(w, "__start_%s()\n", name) + if err != nil { + return err + } + _, err = fmt.Fprintf(w, `{ + local cur prev words cword + declare -A flaghash 2>/dev/null || : + if declare -F _init_completion >/dev/null 2>&1; then + _init_completion -s || return + else + __my_init_completion -n "=" || return + fi + + local c=0 + local flags=() + local two_word_flags=() + local local_nonpersistent_flags=() + local flags_with_completion=() + local flags_completion=() + local commands=("%s") + local must_have_one_flag=() + local must_have_one_noun=() + local last_command + local nouns=() + + __handle_word +} + +`, name) + if err != nil { + return err + } + _, err = fmt.Fprintf(w, `if [[ $(type -t compopt) = "builtin" ]]; then + complete -o default -F __start_%s %s +else + complete -o default -o nospace -F __start_%s %s +fi + +`, name, name, name, name) + if err != nil { + return err + } + _, err = fmt.Fprintf(w, "# ex: ts=4 sw=4 et filetype=sh\n") + return err +} + +func writeCommands(cmd *Command, w io.Writer) error { + if _, err := fmt.Fprintf(w, " commands=()\n"); err != nil { + return err + } + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c == cmd.helpCommand { + continue + } + if _, err := fmt.Fprintf(w, " commands+=(%q)\n", c.Name()); err != nil { + return err + } + } + _, err := fmt.Fprintf(w, "\n") + return err +} + +func writeFlagHandler(name string, annotations map[string][]string, w io.Writer) error { + for key, value := range annotations { + switch key { + case BashCompFilenameExt: + _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) + if err != nil { + return err + } + + if len(value) > 0 { + ext := "__handle_filename_extension_flag " + strings.Join(value, "|") + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) + } else { + ext := "_filedir" + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) + } + if err != nil { + return err + } + case BashCompCustom: + _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) + if err != nil { + return err + } + if len(value) > 0 { + handlers := strings.Join(value, "; ") + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", handlers) + } else { + _, err = fmt.Fprintf(w, " flags_completion+=(:)\n") + } + if err != nil { + return err + } + case BashCompSubdirsInDir: + _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) + + if len(value) == 1 { + ext := "__handle_subdirs_in_dir_flag " + value[0] + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) + } else { + ext := "_filedir -d" + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) + } + if err != nil { + return err + } + } + } + return nil +} + +func writeShortFlag(flag *pflag.Flag, w io.Writer) error { + b := (len(flag.NoOptDefVal) > 0) + name := flag.Shorthand + format := " " + if !b { + format += "two_word_" + } + format += "flags+=(\"-%s\")\n" + if _, err := fmt.Fprintf(w, format, name); err != nil { + return err + } + return writeFlagHandler("-"+name, flag.Annotations, w) +} + +func writeFlag(flag *pflag.Flag, w io.Writer) error { + b := (len(flag.NoOptDefVal) > 0) + name := flag.Name + format := " flags+=(\"--%s" + if !b { + format += "=" + } + format += "\")\n" + if _, err := fmt.Fprintf(w, format, name); err != nil { + return err + } + return writeFlagHandler("--"+name, flag.Annotations, w) +} + +func writeLocalNonPersistentFlag(flag *pflag.Flag, w io.Writer) error { + b := (len(flag.NoOptDefVal) > 0) + name := flag.Name + format := " local_nonpersistent_flags+=(\"--%s" + if !b { + format += "=" + } + format += "\")\n" + _, err := fmt.Fprintf(w, format, name) + return err +} + +func writeFlags(cmd *Command, w io.Writer) error { + _, err := fmt.Fprintf(w, ` flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + +`) + if err != nil { + return err + } + localNonPersistentFlags := cmd.LocalNonPersistentFlags() + var visitErr error + cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + if err := writeFlag(flag, w); err != nil { + visitErr = err + return + } + if len(flag.Shorthand) > 0 { + if err := writeShortFlag(flag, w); err != nil { + visitErr = err + return + } + } + if localNonPersistentFlags.Lookup(flag.Name) != nil { + if err := writeLocalNonPersistentFlag(flag, w); err != nil { + visitErr = err + return + } + } + }) + if visitErr != nil { + return visitErr + } + cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + if err := writeFlag(flag, w); err != nil { + visitErr = err + return + } + if len(flag.Shorthand) > 0 { + if err := writeShortFlag(flag, w); err != nil { + visitErr = err + return + } + } + }) + if visitErr != nil { + return visitErr + } + + _, err = fmt.Fprintf(w, "\n") + return err +} + +func writeRequiredFlag(cmd *Command, w io.Writer) error { + if _, err := fmt.Fprintf(w, " must_have_one_flag=()\n"); err != nil { + return err + } + flags := cmd.NonInheritedFlags() + var visitErr error + flags.VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + for key := range flag.Annotations { + switch key { + case BashCompOneRequiredFlag: + format := " must_have_one_flag+=(\"--%s" + b := (flag.Value.Type() == "bool") + if !b { + format += "=" + } + format += "\")\n" + if _, err := fmt.Fprintf(w, format, flag.Name); err != nil { + visitErr = err + return + } + + if len(flag.Shorthand) > 0 { + if _, err := fmt.Fprintf(w, " must_have_one_flag+=(\"-%s\")\n", flag.Shorthand); err != nil { + visitErr = err + return + } + } + } + } + }) + return visitErr +} + +func writeRequiredNouns(cmd *Command, w io.Writer) error { + if _, err := fmt.Fprintf(w, " must_have_one_noun=()\n"); err != nil { + return err + } + sort.Sort(sort.StringSlice(cmd.ValidArgs)) + for _, value := range cmd.ValidArgs { + if _, err := fmt.Fprintf(w, " must_have_one_noun+=(%q)\n", value); err != nil { + return err + } + } + return nil +} + +func writeArgAliases(cmd *Command, w io.Writer) error { + if _, err := fmt.Fprintf(w, " noun_aliases=()\n"); err != nil { + return err + } + sort.Sort(sort.StringSlice(cmd.ArgAliases)) + for _, value := range cmd.ArgAliases { + if _, err := fmt.Fprintf(w, " noun_aliases+=(%q)\n", value); err != nil { + return err + } + } + return nil +} + +func gen(cmd *Command, w io.Writer) error { + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c == cmd.helpCommand { + continue + } + if err := gen(c, w); err != nil { + return err + } + } + commandName := cmd.CommandPath() + commandName = strings.Replace(commandName, " ", "_", -1) + commandName = strings.Replace(commandName, ":", "__", -1) + if _, err := fmt.Fprintf(w, "_%s()\n{\n", commandName); err != nil { + return err + } + if _, err := fmt.Fprintf(w, " last_command=%q\n", commandName); err != nil { + return err + } + if err := writeCommands(cmd, w); err != nil { + return err + } + if err := writeFlags(cmd, w); err != nil { + return err + } + if err := writeRequiredFlag(cmd, w); err != nil { + return err + } + if err := writeRequiredNouns(cmd, w); err != nil { + return err + } + if err := writeArgAliases(cmd, w); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "}\n\n"); err != nil { + return err + } + return nil +} + +func (cmd *Command) GenBashCompletion(w io.Writer) error { + if err := preamble(w, cmd.Name()); err != nil { + return err + } + if len(cmd.BashCompletionFunction) > 0 { + if _, err := fmt.Fprintf(w, "%s\n", cmd.BashCompletionFunction); err != nil { + return err + } + } + if err := gen(cmd, w); err != nil { + return err + } + return postscript(w, cmd.Name()) +} + +func nonCompletableFlag(flag *pflag.Flag) bool { + return flag.Hidden || len(flag.Deprecated) > 0 +} + +func (cmd *Command) GenBashCompletionFile(filename string) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return cmd.GenBashCompletion(outFile) +} + +// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag, if it exists. +func (cmd *Command) MarkFlagRequired(name string) error { + return MarkFlagRequired(cmd.Flags(), name) +} + +// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag, if it exists. +func (cmd *Command) MarkPersistentFlagRequired(name string) error { + return MarkFlagRequired(cmd.PersistentFlags(), name) +} + +// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag in the flag set, if it exists. +func MarkFlagRequired(flags *pflag.FlagSet, name string) error { + return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) +} + +// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func (cmd *Command) MarkFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(cmd.Flags(), name, extensions...) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. +// Generated bash autocompletion will call the bash function f for the flag. +func (cmd *Command) MarkFlagCustom(name string, f string) error { + return MarkFlagCustom(cmd.Flags(), name, f) +} + +// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func (cmd *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(cmd.PersistentFlags(), name, extensions...) +} + +// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { + return flags.SetAnnotation(name, BashCompFilenameExt, extensions) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag in the flag set, if it exists. +// Generated bash autocompletion will call the bash function f for the flag. +func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { + return flags.SetAnnotation(name, BashCompCustom, []string{f}) +} diff --git a/src/vendor/github.com/spf13/cobra/bash_completions.md b/src/vendor/github.com/spf13/cobra/bash_completions.md new file mode 100644 index 00000000..6e3b71f1 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/bash_completions.md @@ -0,0 +1,206 @@ +# Generating Bash Completions For Your Own cobra.Command + +Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows: + +```go +package main + +import ( + "io/ioutil" + "os" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd" +) + +func main() { + kubectl := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, ioutil.Discard, ioutil.Discard) + kubectl.GenBashCompletionFile("out.sh") +} +``` + +That will get you completions of subcommands and flags. If you make additional annotations to your code, you can get even more intelligent and flexible behavior. + +## Creating your own custom functions + +Some more actual code that works in kubernetes: + +```bash +const ( + bash_completion_func = `__kubectl_parse_get() +{ + local kubectl_output out + if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then + out=($(echo "${kubectl_output}" | awk '{print $1}')) + COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) + fi +} + +__kubectl_get_resource() +{ + if [[ ${#nouns[@]} -eq 0 ]]; then + return 1 + fi + __kubectl_parse_get ${nouns[${#nouns[@]} -1]} + if [[ $? -eq 0 ]]; then + return 0 + fi +} + +__custom_func() { + case ${last_command} in + kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) + __kubectl_get_resource + return + ;; + *) + ;; + esac +} +`) +``` + +And then I set that in my command definition: + +```go +cmds := &cobra.Command{ + Use: "kubectl", + Short: "kubectl controls the Kubernetes cluster manager", + Long: `kubectl controls the Kubernetes cluster manager. + +Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, + Run: runHelp, + BashCompletionFunction: bash_completion_func, +} +``` + +The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! + +## Have the completions code complete your 'nouns' + +In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like: + +```go +validArgs []string = { "pod", "node", "service", "replicationcontroller" } + +cmd := &cobra.Command{ + Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", + Short: "Display one or many resources", + Long: get_long, + Example: get_example, + Run: func(cmd *cobra.Command, args []string) { + err := RunGet(f, out, cmd, args) + util.CheckErr(err) + }, + ValidArgs: validArgs, +} +``` + +Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like + +```bash +# kubectl get [tab][tab] +node pod replicationcontroller service +``` + +## Plural form and shortcuts for nouns + +If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`: + +```go` +argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } + +cmd := &cobra.Command{ + ... + ValidArgs: validArgs, + ArgAliases: argAliases +} +``` + +The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by +the completion algorithm if entered manually, e.g. in: + +```bash +# kubectl get rc [tab][tab] +backend frontend database +``` + +Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns +in this example again instead of the replication controllers. + +## Mark flags as required + +Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy. + +```go +cmd.MarkFlagRequired("pod") +cmd.MarkFlagRequired("container") +``` + +and you'll get something like + +```bash +# kubectl exec [tab][tab][tab] +-c --container= -p --pod= +``` + +# Specify valid filename extensions for flags that take a filename + +In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions. + +```go + annotations := []string{"json", "yaml", "yml"} + annotation := make(map[string][]string) + annotation[cobra.BashCompFilenameExt] = annotations + + flag := &pflag.Flag{ + Name: "filename", + Shorthand: "f", + Usage: usage, + Value: value, + DefValue: value.String(), + Annotations: annotation, + } + cmd.Flags().AddFlag(flag) +``` + +Now when you run a command with this filename flag you'll get something like + +```bash +# kubectl create -f +test/ example/ rpmbuild/ +hello.yml test.json +``` + +So while there are many other files in the CWD it only shows me subdirs and those with valid extensions. + +# Specifiy custom flag completion + +Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specifiy +a custom flag completion function with cobra.BashCompCustom: + +```go + annotation := make(map[string][]string) + annotation[cobra.BashCompFilenameExt] = []string{"__kubectl_get_namespaces"} + + flag := &pflag.Flag{ + Name: "namespace", + Usage: usage, + Annotations: annotation, + } + cmd.Flags().AddFlag(flag) +``` + +In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction` +value, e.g.: + +```bash +__kubectl_get_namespaces() +{ + local template + template="{{ range .items }}{{ .metadata.name }} {{ end }}" + local kubectl_out + if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then + COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) + fi +} +``` diff --git a/src/vendor/github.com/spf13/cobra/bash_completions_test.go b/src/vendor/github.com/spf13/cobra/bash_completions_test.go new file mode 100644 index 00000000..185570b2 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/bash_completions_test.go @@ -0,0 +1,180 @@ +package cobra + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "strings" + "testing" +) + +var _ = fmt.Println +var _ = os.Stderr + +func checkOmit(t *testing.T, found, unexpected string) { + if strings.Contains(found, unexpected) { + t.Errorf("Unexpected response.\nGot: %q\nBut should not have!\n", unexpected) + } +} + +func check(t *testing.T, found, expected string) { + if !strings.Contains(found, expected) { + t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) + } +} + +func runShellCheck(s string) error { + excluded := []string{ + "SC2034", // PREFIX appears unused. Verify it or export it. + } + cmd := exec.Command("shellcheck", "-s", "bash", "-", "-e", strings.Join(excluded, ",")) + cmd.Stderr = os.Stderr + cmd.Stdout = os.Stdout + + stdin, err := cmd.StdinPipe() + if err != nil { + return err + } + go func() { + defer stdin.Close() + stdin.Write([]byte(s)) + }() + + return cmd.Run() +} + +// World worst custom function, just keep telling you to enter hello! +const ( + bashCompletionFunc = `__custom_func() { +COMPREPLY=( "hello" ) +} +` +) + +func TestBashCompletions(t *testing.T) { + c := initializeWithRootCmd() + cmdEcho.AddCommand(cmdTimes) + c.AddCommand(cmdEcho, cmdPrint, cmdDeprecated, cmdColon) + + // custom completion function + c.BashCompletionFunction = bashCompletionFunc + + // required flag + c.MarkFlagRequired("introot") + + // valid nouns + validArgs := []string{"pod", "node", "service", "replicationcontroller"} + c.ValidArgs = validArgs + + // noun aliases + argAliases := []string{"pods", "nodes", "services", "replicationcontrollers", "po", "no", "svc", "rc"} + c.ArgAliases = argAliases + + // filename + var flagval string + c.Flags().StringVar(&flagval, "filename", "", "Enter a filename") + c.MarkFlagFilename("filename", "json", "yaml", "yml") + + // persistent filename + var flagvalPersistent string + c.PersistentFlags().StringVar(&flagvalPersistent, "persistent-filename", "", "Enter a filename") + c.MarkPersistentFlagFilename("persistent-filename") + c.MarkPersistentFlagRequired("persistent-filename") + + // filename extensions + var flagvalExt string + c.Flags().StringVar(&flagvalExt, "filename-ext", "", "Enter a filename (extension limited)") + c.MarkFlagFilename("filename-ext") + + // filename extensions + var flagvalCustom string + c.Flags().StringVar(&flagvalCustom, "custom", "", "Enter a filename (extension limited)") + c.MarkFlagCustom("custom", "__complete_custom") + + // subdirectories in a given directory + var flagvalTheme string + c.Flags().StringVar(&flagvalTheme, "theme", "", "theme to use (located in /themes/THEMENAME/)") + c.Flags().SetAnnotation("theme", BashCompSubdirsInDir, []string{"themes"}) + + out := new(bytes.Buffer) + c.GenBashCompletion(out) + str := out.String() + + check(t, str, "_cobra-test") + check(t, str, "_cobra-test_echo") + check(t, str, "_cobra-test_echo_times") + check(t, str, "_cobra-test_print") + check(t, str, "_cobra-test_cmd__colon") + + // check for required flags + check(t, str, `must_have_one_flag+=("--introot=")`) + check(t, str, `must_have_one_flag+=("--persistent-filename=")`) + // check for custom completion function + check(t, str, `COMPREPLY=( "hello" )`) + // check for required nouns + check(t, str, `must_have_one_noun+=("pod")`) + // check for noun aliases + check(t, str, `noun_aliases+=("pods")`) + check(t, str, `noun_aliases+=("rc")`) + checkOmit(t, str, `must_have_one_noun+=("pods")`) + // check for filename extension flags + check(t, str, `flags_completion+=("_filedir")`) + // check for filename extension flags + check(t, str, `flags_completion+=("__handle_filename_extension_flag json|yaml|yml")`) + // check for custom flags + check(t, str, `flags_completion+=("__complete_custom")`) + // check for subdirs_in_dir flags + check(t, str, `flags_completion+=("__handle_subdirs_in_dir_flag themes")`) + + checkOmit(t, str, cmdDeprecated.Name()) + + // if available, run shellcheck against the script + if err := exec.Command("which", "shellcheck").Run(); err != nil { + return + } + err := runShellCheck(str) + if err != nil { + t.Fatalf("shellcheck failed: %v", err) + } +} + +func TestBashCompletionHiddenFlag(t *testing.T) { + var cmdTrue = &Command{ + Use: "does nothing", + Run: func(cmd *Command, args []string) {}, + } + + const flagName = "hidden-foo-bar-baz" + + var flagValue bool + cmdTrue.Flags().BoolVar(&flagValue, flagName, false, "hidden flag") + cmdTrue.Flags().MarkHidden(flagName) + + out := new(bytes.Buffer) + cmdTrue.GenBashCompletion(out) + bashCompletion := out.String() + if strings.Contains(bashCompletion, flagName) { + t.Errorf("expected completion to not include %q flag: Got %v", flagName, bashCompletion) + } +} + +func TestBashCompletionDeprecatedFlag(t *testing.T) { + var cmdTrue = &Command{ + Use: "does nothing", + Run: func(cmd *Command, args []string) {}, + } + + const flagName = "deprecated-foo-bar-baz" + + var flagValue bool + cmdTrue.Flags().BoolVar(&flagValue, flagName, false, "hidden flag") + cmdTrue.Flags().MarkDeprecated(flagName, "use --does-not-exist instead") + + out := new(bytes.Buffer) + cmdTrue.GenBashCompletion(out) + bashCompletion := out.String() + if strings.Contains(bashCompletion, flagName) { + t.Errorf("expected completion to not include %q flag: Got %v", flagName, bashCompletion) + } +} diff --git a/src/vendor/github.com/spf13/cobra/cobra.go b/src/vendor/github.com/spf13/cobra/cobra.go new file mode 100644 index 00000000..b39c715a --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/cobra.go @@ -0,0 +1,173 @@ +// Copyright © 2013 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Commands similar to git, go tools and other modern CLI tools +// inspired by go, go-Commander, gh and subcommand + +package cobra + +import ( + "fmt" + "io" + "reflect" + "strconv" + "strings" + "text/template" + "unicode" +) + +var templateFuncs = template.FuncMap{ + "trim": strings.TrimSpace, + "trimRightSpace": trimRightSpace, + "appendIfNotPresent": appendIfNotPresent, + "rpad": rpad, + "gt": Gt, + "eq": Eq, +} + +var initializers []func() + +// Automatic prefix matching can be a dangerous thing to automatically enable in CLI tools. +// Set this to true to enable it. +var EnablePrefixMatching = false + +// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default. +// To disable sorting, set it to false. +var EnableCommandSorting = true + +// AddTemplateFunc adds a template function that's available to Usage and Help +// template generation. +func AddTemplateFunc(name string, tmplFunc interface{}) { + templateFuncs[name] = tmplFunc +} + +// AddTemplateFuncs adds multiple template functions availalble to Usage and +// Help template generation. +func AddTemplateFuncs(tmplFuncs template.FuncMap) { + for k, v := range tmplFuncs { + templateFuncs[k] = v + } +} + +// OnInitialize takes a series of func() arguments and appends them to a slice of func(). +func OnInitialize(y ...func()) { + initializers = append(initializers, y...) +} + +// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, +// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as +// ints and then compared. +func Gt(a interface{}, b interface{}) bool { + var left, right int64 + av := reflect.ValueOf(a) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + left = int64(av.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + left = av.Int() + case reflect.String: + left, _ = strconv.ParseInt(av.String(), 10, 64) + } + + bv := reflect.ValueOf(b) + + switch bv.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + right = int64(bv.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + right = bv.Int() + case reflect.String: + right, _ = strconv.ParseInt(bv.String(), 10, 64) + } + + return left > right +} + +// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. +func Eq(a interface{}, b interface{}) bool { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + panic("Eq called on unsupported type") + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() == bv.Int() + case reflect.String: + return av.String() == bv.String() + } + return false +} + +func trimRightSpace(s string) string { + return strings.TrimRightFunc(s, unicode.IsSpace) +} + +// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s. +func appendIfNotPresent(s, stringToAppend string) string { + if strings.Contains(s, stringToAppend) { + return s + } + return s + " " + stringToAppend +} + +// rpad adds padding to the right of a string. +func rpad(s string, padding int) string { + template := fmt.Sprintf("%%-%ds", padding) + return fmt.Sprintf(template, s) +} + +// tmpl executes the given template text on data, writing the result to w. +func tmpl(w io.Writer, text string, data interface{}) error { + t := template.New("top") + t.Funcs(templateFuncs) + template.Must(t.Parse(text)) + return t.Execute(w, data) +} + +// ld compares two strings and returns the levenshtein distance between them. +func ld(s, t string, ignoreCase bool) int { + if ignoreCase { + s = strings.ToLower(s) + t = strings.ToLower(t) + } + d := make([][]int, len(s)+1) + for i := range d { + d[i] = make([]int, len(t)+1) + } + for i := range d { + d[i][0] = i + } + for j := range d[0] { + d[0][j] = j + } + for j := 1; j <= len(t); j++ { + for i := 1; i <= len(s); i++ { + if s[i-1] == t[j-1] { + d[i][j] = d[i-1][j-1] + } else { + min := d[i-1][j] + if d[i][j-1] < min { + min = d[i][j-1] + } + if d[i-1][j-1] < min { + min = d[i-1][j-1] + } + d[i][j] = min + 1 + } + } + + } + return d[len(s)][len(t)] +} diff --git a/src/vendor/github.com/spf13/cobra/cobra/cmd/add.go b/src/vendor/github.com/spf13/cobra/cobra/cmd/add.go new file mode 100644 index 00000000..b89d4c47 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/cobra/cmd/add.go @@ -0,0 +1,128 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +func init() { + RootCmd.AddCommand(addCmd) +} + +var pName string + +// initialize Command +var addCmd = &cobra.Command{ + Use: "add [command name]", + Aliases: []string{"command"}, + Short: "Add a command to a Cobra Application", + Long: `Add (cobra add) will create a new command, with a license and +the appropriate structure for a Cobra-based CLI application, +and register it to its parent (default RootCmd). + +If you want your command to be public, pass in the command name +with an initial uppercase letter. + +Example: cobra add server -> resulting in a new cmd/server.go + `, + + Run: func(cmd *cobra.Command, args []string) { + if len(args) != 1 { + er("add needs a name for the command") + } + guessProjectPath() + createCmdFile(args[0]) + }, +} + +func init() { + addCmd.Flags().StringVarP(&pName, "parent", "p", "RootCmd", "name of parent command for this command") +} + +func parentName() string { + if !strings.HasSuffix(strings.ToLower(pName), "cmd") { + return pName + "Cmd" + } + + return pName +} + +func createCmdFile(cmdName string) { + lic := getLicense() + + template := `{{ comment .copyright }} +{{ comment .license }} + +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +// {{.cmdName}}Cmd represents the {{.cmdName}} command +var {{ .cmdName }}Cmd = &cobra.Command{ + Use: "{{ .cmdName }}", + Short: "A brief description of your command", + Long: ` + "`" + `A longer description that spans multiple lines and likely contains examples +and usage of using your command. For example: + +Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.` + "`" + `, + Run: func(cmd *cobra.Command, args []string) { + // TODO: Work your own magic here + fmt.Println("{{ .cmdName }} called") + }, +} + +func init() { + {{ .parentName }}.AddCommand({{ .cmdName }}Cmd) + + // Here you will define your flags and configuration settings. + + // Cobra supports Persistent Flags which will work for this command + // and all subcommands, e.g.: + // {{.cmdName}}Cmd.PersistentFlags().String("foo", "", "A help for foo") + + // Cobra supports local flags which will only run when this command + // is called directly, e.g.: + // {{.cmdName}}Cmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") + +} +` + + var data map[string]interface{} + data = make(map[string]interface{}) + + data["copyright"] = copyrightLine() + data["license"] = lic.Header + data["appName"] = projectName() + data["viper"] = viper.GetBool("useViper") + data["parentName"] = parentName() + data["cmdName"] = cmdName + + err := writeTemplateToFile(filepath.Join(ProjectPath(), guessCmdDir()), cmdName+".go", template, data) + if err != nil { + er(err) + } + fmt.Println(cmdName, "created at", filepath.Join(ProjectPath(), guessCmdDir(), cmdName+".go")) +} diff --git a/src/vendor/github.com/spf13/cobra/cobra/cmd/helpers.go b/src/vendor/github.com/spf13/cobra/cobra/cmd/helpers.go new file mode 100644 index 00000000..7cd3be18 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/cobra/cmd/helpers.go @@ -0,0 +1,356 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "text/template" + "time" + + "github.com/spf13/viper" +) + +// var BaseDir = "" +// var AppName = "" +// var CommandDir = "" + +var funcMap template.FuncMap +var projectPath = "" +var inputPath = "" +var projectBase = "" + +// for testing only +var testWd = "" + +var cmdDirs = []string{"cmd", "cmds", "command", "commands"} + +func init() { + funcMap = template.FuncMap{ + "comment": commentifyString, + } +} + +func er(msg interface{}) { + fmt.Println("Error:", msg) + os.Exit(-1) +} + +// Check if a file or directory exists. +func exists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func ProjectPath() string { + if projectPath == "" { + guessProjectPath() + } + + return projectPath +} + +// wrapper of the os package so we can test better +func getWd() (string, error) { + if testWd == "" { + return os.Getwd() + } + return testWd, nil +} + +func guessCmdDir() string { + guessProjectPath() + if b, _ := isEmpty(projectPath); b { + return "cmd" + } + + files, _ := filepath.Glob(projectPath + string(os.PathSeparator) + "c*") + for _, f := range files { + for _, c := range cmdDirs { + if f == c { + return c + } + } + } + + return "cmd" +} + +func guessImportPath() string { + guessProjectPath() + + if !strings.HasPrefix(projectPath, getSrcPath()) { + er("Cobra only supports project within $GOPATH") + } + + return filepath.ToSlash(filepath.Clean(strings.TrimPrefix(projectPath, getSrcPath()))) +} + +func getSrcPath() string { + return filepath.Join(os.Getenv("GOPATH"), "src") + string(os.PathSeparator) +} + +func projectName() string { + return filepath.Base(ProjectPath()) +} + +func guessProjectPath() { + // if no path is provided... assume CWD. + if inputPath == "" { + x, err := getWd() + if err != nil { + er(err) + } + + // inspect CWD + base := filepath.Base(x) + + // if we are in the cmd directory.. back up + for _, c := range cmdDirs { + if base == c { + projectPath = filepath.Dir(x) + return + } + } + + if projectPath == "" { + projectPath = filepath.Clean(x) + return + } + } + + srcPath := getSrcPath() + // if provided, inspect for logical locations + if strings.ContainsRune(inputPath, os.PathSeparator) { + if filepath.IsAbs(inputPath) || filepath.HasPrefix(inputPath, string(os.PathSeparator)) { + // if Absolute, use it + projectPath = filepath.Clean(inputPath) + return + } + // If not absolute but contains slashes, + // assuming it means create it from $GOPATH + count := strings.Count(inputPath, string(os.PathSeparator)) + + switch count { + // If only one directory deep, assume "github.com" + case 1: + projectPath = filepath.Join(srcPath, "github.com", inputPath) + return + case 2: + projectPath = filepath.Join(srcPath, inputPath) + return + default: + er("Unknown directory") + } + } else { + // hardest case.. just a word. + if projectBase == "" { + x, err := getWd() + if err == nil { + projectPath = filepath.Join(x, inputPath) + return + } + er(err) + } else { + projectPath = filepath.Join(srcPath, projectBase, inputPath) + return + } + } +} + +// isEmpty checks if a given path is empty. +func isEmpty(path string) (bool, error) { + if b, _ := exists(path); !b { + return false, fmt.Errorf("%q path does not exist", path) + } + fi, err := os.Stat(path) + if err != nil { + return false, err + } + if fi.IsDir() { + f, err := os.Open(path) + // FIX: Resource leak - f.close() should be called here by defer or is missed + // if the err != nil branch is taken. + defer f.Close() + if err != nil { + return false, err + } + list, _ := f.Readdir(-1) + // f.Close() - see bug fix above + return len(list) == 0, nil + } + return fi.Size() == 0, nil +} + +// isDir checks if a given path is a directory. +func isDir(path string) (bool, error) { + fi, err := os.Stat(path) + if err != nil { + return false, err + } + return fi.IsDir(), nil +} + +// dirExists checks if a path exists and is a directory. +func dirExists(path string) (bool, error) { + fi, err := os.Stat(path) + if err == nil && fi.IsDir() { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func writeTemplateToFile(path string, file string, template string, data interface{}) error { + filename := filepath.Join(path, file) + + r, err := templateToReader(template, data) + + if err != nil { + return err + } + + err = safeWriteToDisk(filename, r) + + if err != nil { + return err + } + return nil +} + +func writeStringToFile(path, file, text string) error { + filename := filepath.Join(path, file) + + r := strings.NewReader(text) + err := safeWriteToDisk(filename, r) + + if err != nil { + return err + } + return nil +} + +func templateToReader(tpl string, data interface{}) (io.Reader, error) { + tmpl := template.New("") + tmpl.Funcs(funcMap) + tmpl, err := tmpl.Parse(tpl) + + if err != nil { + return nil, err + } + buf := new(bytes.Buffer) + err = tmpl.Execute(buf, data) + + return buf, err +} + +// Same as WriteToDisk but checks to see if file/directory already exists. +func safeWriteToDisk(inpath string, r io.Reader) (err error) { + dir, _ := filepath.Split(inpath) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = os.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + return + } + } + + ex, err := exists(inpath) + if err != nil { + return + } + if ex { + return fmt.Errorf("%v already exists", inpath) + } + + file, err := os.Create(inpath) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +func getLicense() License { + l := whichLicense() + if l != "" { + if x, ok := Licenses[l]; ok { + return x + } + } + + return Licenses["apache"] +} + +func whichLicense() string { + // if explicitly flagged, use that + if userLicense != "" { + return matchLicense(userLicense) + } + + // if already present in the project, use that + // TODO: Inspect project for existing license + + // default to viper's setting + + if viper.IsSet("license.header") || viper.IsSet("license.text") { + if custom, ok := Licenses["custom"]; ok { + custom.Header = viper.GetString("license.header") + custom.Text = viper.GetString("license.text") + Licenses["custom"] = custom + return "custom" + } + } + + return matchLicense(viper.GetString("license")) +} + +func copyrightLine() string { + author := viper.GetString("author") + year := time.Now().Format("2006") + + return "Copyright © " + year + " " + author +} + +func commentifyString(in string) string { + var newlines []string + lines := strings.Split(in, "\n") + for _, x := range lines { + if !strings.HasPrefix(x, "//") { + if x != "" { + newlines = append(newlines, "// "+x) + } else { + newlines = append(newlines, "//") + } + } else { + newlines = append(newlines, x) + } + } + return strings.Join(newlines, "\n") +} diff --git a/src/vendor/github.com/spf13/cobra/cobra/cmd/helpers_test.go b/src/vendor/github.com/spf13/cobra/cobra/cmd/helpers_test.go new file mode 100644 index 00000000..bd0f7595 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/cobra/cmd/helpers_test.go @@ -0,0 +1,40 @@ +package cmd + +import ( + "fmt" + "os" + "path/filepath" + "testing" +) + +var _ = fmt.Println +var _ = os.Stderr + +func checkGuess(t *testing.T, wd, input, expected string) { + testWd = wd + inputPath = input + guessProjectPath() + + if projectPath != expected { + t.Errorf("Unexpected Project Path. \n Got: %q\nExpected: %q\n", projectPath, expected) + } + + reset() +} + +func reset() { + testWd = "" + inputPath = "" + projectPath = "" +} + +func TestProjectPath(t *testing.T) { + checkGuess(t, "", filepath.Join("github.com", "spf13", "hugo"), filepath.Join(getSrcPath(), "github.com", "spf13", "hugo")) + checkGuess(t, "", filepath.Join("spf13", "hugo"), filepath.Join(getSrcPath(), "github.com", "spf13", "hugo")) + checkGuess(t, "", filepath.Join("/", "bar", "foo"), filepath.Join("/", "bar", "foo")) + checkGuess(t, "/bar/foo", "baz", filepath.Join("/", "bar", "foo", "baz")) + checkGuess(t, "/bar/foo/cmd", "", filepath.Join("/", "bar", "foo")) + checkGuess(t, "/bar/foo/command", "", filepath.Join("/", "bar", "foo")) + checkGuess(t, "/bar/foo/commands", "", filepath.Join("/", "bar", "foo")) + checkGuess(t, "github.com/spf13/hugo/../hugo", "", filepath.Join("github.com", "spf13", "hugo")) +} diff --git a/src/vendor/github.com/spf13/cobra/cobra/cmd/init.go b/src/vendor/github.com/spf13/cobra/cobra/cmd/init.go new file mode 100644 index 00000000..13792f12 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/cobra/cmd/init.go @@ -0,0 +1,245 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "bytes" + "fmt" + "os" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +func init() { + RootCmd.AddCommand(initCmd) +} + +// initialize Command +var initCmd = &cobra.Command{ + Use: "init [name]", + Aliases: []string{"initialize", "initialise", "create"}, + Short: "Initialize a Cobra Application", + Long: `Initialize (cobra init) will create a new application, with a license +and the appropriate structure for a Cobra-based CLI application. + + * If a name is provided, it will be created in the current directory; + * If no name is provided, the current directory will be assumed; + * If a relative path is provided, it will be created inside $GOPATH + (e.g. github.com/spf13/hugo); + * If an absolute path is provided, it will be created; + * If the directory already exists but is empty, it will be used. + +Init will not use an existing directory with contents.`, + + Run: func(cmd *cobra.Command, args []string) { + switch len(args) { + case 0: + inputPath = "" + + case 1: + inputPath = args[0] + + default: + er("init doesn't support more than 1 parameter") + } + guessProjectPath() + initializePath(projectPath) + }, +} + +func initializePath(path string) { + b, err := exists(path) + if err != nil { + er(err) + } + + if !b { // If path doesn't yet exist, create it + err := os.MkdirAll(path, os.ModePerm) + if err != nil { + er(err) + } + } else { // If path exists and is not empty don't use it + empty, err := exists(path) + if err != nil { + er(err) + } + if !empty { + er("Cobra will not create a new project in a non empty directory") + } + } + // We have a directory and it's empty.. Time to initialize it. + + createLicenseFile() + createMainFile() + createRootCmdFile() +} + +func createLicenseFile() { + lic := getLicense() + + // Don't bother writing a LICENSE file if there is no text. + if lic.Text != "" { + data := make(map[string]interface{}) + + // Try to remove the email address, if any + data["copyright"] = strings.Split(copyrightLine(), " <")[0] + + data["appName"] = projectName() + + // Generate license template from text and data. + r, _ := templateToReader(lic.Text, data) + buf := new(bytes.Buffer) + buf.ReadFrom(r) + + err := writeTemplateToFile(ProjectPath(), "LICENSE", buf.String(), data) + _ = err + // if err != nil { + // er(err) + // } + } +} + +func createMainFile() { + lic := getLicense() + + template := `{{ comment .copyright }} +{{if .license}}{{ comment .license }} +{{end}} +package main + +import "{{ .importpath }}" + +func main() { + cmd.Execute() +} +` + data := make(map[string]interface{}) + + data["copyright"] = copyrightLine() + data["appName"] = projectName() + + // Generate license template from header and data. + r, _ := templateToReader(lic.Header, data) + buf := new(bytes.Buffer) + buf.ReadFrom(r) + data["license"] = buf.String() + + data["importpath"] = guessImportPath() + "/" + guessCmdDir() + + err := writeTemplateToFile(ProjectPath(), "main.go", template, data) + _ = err + // if err != nil { + // er(err) + // } +} + +func createRootCmdFile() { + lic := getLicense() + + template := `{{ comment .copyright }} +{{if .license}}{{ comment .license }} +{{end}} +package cmd + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" +{{ if .viper }} "github.com/spf13/viper" +{{ end }}) +{{if .viper}} +var cfgFile string +{{ end }} +// RootCmd represents the base command when called without any subcommands +var RootCmd = &cobra.Command{ + Use: "{{ .appName }}", + Short: "A brief description of your application", + Long: ` + "`" + `A longer description that spans multiple lines and likely contains +examples and usage of using your application. For example: + +Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.` + "`" + `, +// Uncomment the following line if your bare application +// has an action associated with it: +// Run: func(cmd *cobra.Command, args []string) { }, +} + +// Execute adds all child commands to the root command sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func Execute() { + if err := RootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(-1) + } +} + +func init() { +{{ if .viper }} cobra.OnInitialize(initConfig) + +{{ end }} // Here you will define your flags and configuration settings. + // Cobra supports Persistent Flags, which, if defined here, + // will be global for your application. +{{ if .viper }} + RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.{{ .appName }}.yaml)") +{{ else }} + // RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.{{ .appName }}.yaml)") +{{ end }} // Cobra also supports local flags, which will only run + // when this action is called directly. + RootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") +} +{{ if .viper }} +// initConfig reads in config file and ENV variables if set. +func initConfig() { + if cfgFile != "" { // enable ability to specify config file via flag + viper.SetConfigFile(cfgFile) + } + + viper.SetConfigName(".{{ .appName }}") // name of config file (without extension) + viper.AddConfigPath("$HOME") // adding home directory as first search path + viper.AutomaticEnv() // read in environment variables that match + + // If a config file is found, read it in. + if err := viper.ReadInConfig(); err == nil { + fmt.Println("Using config file:", viper.ConfigFileUsed()) + } +} +{{ end }}` + + data := make(map[string]interface{}) + + data["copyright"] = copyrightLine() + data["appName"] = projectName() + + // Generate license template from header and data. + r, _ := templateToReader(lic.Header, data) + buf := new(bytes.Buffer) + buf.ReadFrom(r) + data["license"] = buf.String() + + data["viper"] = viper.GetBool("useViper") + + err := writeTemplateToFile(ProjectPath()+string(os.PathSeparator)+guessCmdDir(), "root.go", template, data) + if err != nil { + er(err) + } + + fmt.Println("Your Cobra application is ready at") + fmt.Println(ProjectPath()) + fmt.Println("Give it a try by going there and running `go run main.go`") + fmt.Println("Add commands to it by running `cobra add [cmdname]`") +} diff --git a/src/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go b/src/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go new file mode 100644 index 00000000..b4c742c3 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go @@ -0,0 +1,1384 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +import "strings" + +//Licenses contains all possible licenses a user can chose from +var Licenses map[string]License + +//License represents a software license agreement, containing the Name of +// the license, its possible matches (on the command line as given to cobra) +// the header to be used with each file on the file's creating, and the text +// of the license +type License struct { + Name string // The type of license in use + PossibleMatches []string // Similar names to guess + Text string // License text data + Header string // License header for source files +} + +// given a license name (in), try to match the license indicated +func matchLicense(in string) string { + for key, lic := range Licenses { + for _, match := range lic.PossibleMatches { + if strings.EqualFold(in, match) { + return key + } + } + } + return "" +} + +func init() { + Licenses = make(map[string]License) + + // Allows a user to not use a license. + Licenses["none"] = License{"None", []string{"none", "false"}, "", ""} + + // Allows a user to use config for a custom license. + Licenses["custom"] = License{"Custom", []string{}, "", ""} + + Licenses["apache"] = License{ + Name: "Apache 2.0", + PossibleMatches: []string{"apache", "apache20", "apache 2.0", "apache2.0", "apache-2.0"}, + Header: ` +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License.`, + Text: ` + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +`, + } + + Licenses["mit"] = License{ + Name: "Mit", + PossibleMatches: []string{"mit"}, + Header: ` +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE.`, + Text: `The MIT License (MIT) + +{{ .copyright }} + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +`, + } + + Licenses["bsd"] = License{ + Name: "NewBSD", + PossibleMatches: []string{"bsd", "newbsd", "3 clause bsd"}, + Header: ` +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE.`, + Text: `{{ .copyright }} +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +`, + } + + Licenses["freebsd"] = License{ + Name: "Simplified BSD License", + PossibleMatches: []string{"freebsd", "simpbsd", "simple bsd", "2 clause bsd"}, + Header: ` +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE.`, + Text: `{{ .copyright }} +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +`, + } + + Licenses["gpl2"] = License{ + Name: "GNU General Public License 2.0", + PossibleMatches: []string{"gpl2", "gnu gpl2"}, + Header: `{{ .copyright }} + + {{ .appName }} is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either version 2 + of the License, or (at your option) any later version. + + {{ .appName }} is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with {{ .appName }}. If not, see .`, + Text: ` GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS +`, + } + Licenses["gpl3"] = License{ + Name: "GNU General Public License 3.0", + PossibleMatches: []string{"gpl3", "gpl", "gnu gpl3", "gnu gpl"}, + Header: ` +This file is part of {{ .appName }}. + +{{ .appName }} is free software: you can redistribute it and/or modify +it under the terms of the GNU Lesser General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +{{ .appName }} is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public License +along with {{ .appName }}. If not, see . + `, + Text: ` GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS +`, + } + + // Licenses["apache20"] = License{ + // Name: "Apache 2.0", + // PossibleMatches: []string{"apache", "apache20", ""}, + // Header: ` + // `, + // Text: ` + // `, + // } +} diff --git a/src/vendor/github.com/spf13/cobra/cobra/cmd/root.go b/src/vendor/github.com/spf13/cobra/cobra/cmd/root.go new file mode 100644 index 00000000..065c8bf4 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/cobra/cmd/root.go @@ -0,0 +1,72 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var cfgFile string +var userLicense string + +// RootCmd represents the base command when called without any subcommands +var RootCmd = &cobra.Command{ + Use: "cobra", + Short: "A generator for Cobra based Applications", + Long: `Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.`, +} + +//Execute adds all child commands to the root command sets flags appropriately. +func Execute() { + if err := RootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(-1) + } +} + +func init() { + cobra.OnInitialize(initConfig) + RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") + RootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory, e.g. github.com/spf13/") + RootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution") + RootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `license` in config)") + RootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration") + viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author")) + viper.BindPFlag("projectbase", RootCmd.PersistentFlags().Lookup("projectbase")) + viper.BindPFlag("useViper", RootCmd.PersistentFlags().Lookup("viper")) + viper.SetDefault("author", "NAME HERE ") + viper.SetDefault("license", "apache") +} + +// Read in config file and ENV variables if set. +func initConfig() { + if cfgFile != "" { // enable ability to specify config file via flag + viper.SetConfigFile(cfgFile) + } + + viper.SetConfigName(".cobra") // name of config file (without extension) + viper.AddConfigPath("$HOME") // adding home directory as first search path + viper.AutomaticEnv() // read in environment variables that match + + // If a config file is found, read it in. + if err := viper.ReadInConfig(); err == nil { + fmt.Println("Using config file:", viper.ConfigFileUsed()) + } +} diff --git a/src/vendor/github.com/spf13/cobra/cobra/main.go b/src/vendor/github.com/spf13/cobra/cobra/main.go new file mode 100644 index 00000000..c3a9d9cb --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/cobra/main.go @@ -0,0 +1,20 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import "github.com/spf13/cobra/cobra/cmd" + +func main() { + cmd.Execute() +} diff --git a/src/vendor/github.com/spf13/cobra/cobra_test.go b/src/vendor/github.com/spf13/cobra/cobra_test.go new file mode 100644 index 00000000..699a2f63 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/cobra_test.go @@ -0,0 +1,1187 @@ +package cobra + +import ( + "bytes" + "fmt" + "os" + "reflect" + "runtime" + "strings" + "testing" + "text/template" + + "github.com/spf13/pflag" +) + +var _ = fmt.Println +var _ = os.Stderr + +var tp, te, tt, t1, tr []string +var rootPersPre, echoPre, echoPersPre, timesPersPre []string +var flagb1, flagb2, flagb3, flagbr, flagbp bool +var flags1, flags2a, flags2b, flags3, outs string +var flagi1, flagi2, flagi3, flagi4, flagir int +var globalFlag1 bool +var flagEcho, rootcalled bool +var versionUsed int + +const strtwoParentHelp = "help message for parent flag strtwo" +const strtwoChildHelp = "help message for child flag strtwo" + +var cmdHidden = &Command{ + Use: "hide [secret string to print]", + Short: "Print anything to screen (if command is known)", + Long: `an absolutely utterly useless command for testing.`, + Run: func(cmd *Command, args []string) { + outs = "hidden" + }, + Hidden: true, +} + +var cmdPrint = &Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `an absolutely utterly useless command for testing.`, + Run: func(cmd *Command, args []string) { + tp = args + }, +} + +var cmdEcho = &Command{ + Use: "echo [string to echo]", + Aliases: []string{"say"}, + Short: "Echo anything to the screen", + Long: `an utterly useless command for testing.`, + Example: "Just run cobra-test echo", + PersistentPreRun: func(cmd *Command, args []string) { + echoPersPre = args + }, + PreRun: func(cmd *Command, args []string) { + echoPre = args + }, + Run: func(cmd *Command, args []string) { + te = args + }, +} + +var cmdEchoSub = &Command{ + Use: "echosub [string to print]", + Short: "second sub command for echo", + Long: `an absolutely utterly useless command for testing gendocs!.`, + Run: func(cmd *Command, args []string) { + }, +} + +var cmdDeprecated = &Command{ + Use: "deprecated [can't do anything here]", + Short: "A command which is deprecated", + Long: `an absolutely utterly useless command for testing deprecation!.`, + Deprecated: "Please use echo instead", + Run: func(cmd *Command, args []string) { + }, +} + +var cmdTimes = &Command{ + Use: "times [# times] [string to echo]", + SuggestFor: []string{"counts"}, + Short: "Echo anything to the screen more times", + Long: `a slightly useless command for testing.`, + PersistentPreRun: func(cmd *Command, args []string) { + timesPersPre = args + }, + Run: func(cmd *Command, args []string) { + tt = args + }, +} + +var cmdRootNoRun = &Command{ + Use: "cobra-test", + Short: "The root can run its own function", + Long: "The root description for help", + PersistentPreRun: func(cmd *Command, args []string) { + rootPersPre = args + }, +} + +var cmdRootSameName = &Command{ + Use: "print", + Short: "Root with the same name as a subcommand", + Long: "The root description for help", +} + +var cmdRootWithRun = &Command{ + Use: "cobra-test", + Short: "The root can run its own function", + Long: "The root description for help", + Run: func(cmd *Command, args []string) { + tr = args + rootcalled = true + }, +} + +var cmdSubNoRun = &Command{ + Use: "subnorun", + Short: "A subcommand without a Run function", + Long: "A long output about a subcommand without a Run function", +} + +var cmdCustomFlags = &Command{ + Use: "customflags [flags] -- REMOTE_COMMAND", + Short: "A command that expects flags in a custom location", + Long: "A long output about a command that expects flags in a custom location", + Run: func(cmd *Command, args []string) { + }, +} + +var cmdVersion1 = &Command{ + Use: "version", + Short: "Print the version number", + Long: `First version of the version command`, + Run: func(cmd *Command, args []string) { + versionUsed = 1 + }, +} + +var cmdVersion2 = &Command{ + Use: "version", + Short: "Print the version number", + Long: `Second version of the version command`, + Run: func(cmd *Command, args []string) { + versionUsed = 2 + }, +} + +var cmdColon = &Command{ + Use: "cmd:colon", + Run: func(cmd *Command, args []string) { + }, +} + +func flagInit() { + cmdEcho.ResetFlags() + cmdPrint.ResetFlags() + cmdTimes.ResetFlags() + cmdRootNoRun.ResetFlags() + cmdRootSameName.ResetFlags() + cmdRootWithRun.ResetFlags() + cmdSubNoRun.ResetFlags() + cmdCustomFlags.ResetFlags() + cmdRootNoRun.PersistentFlags().StringVarP(&flags2a, "strtwo", "t", "two", strtwoParentHelp) + cmdEcho.Flags().IntVarP(&flagi1, "intone", "i", 123, "help message for flag intone") + cmdTimes.Flags().IntVarP(&flagi2, "inttwo", "j", 234, "help message for flag inttwo") + cmdPrint.Flags().IntVarP(&flagi3, "intthree", "i", 345, "help message for flag intthree") + cmdCustomFlags.Flags().IntVar(&flagi4, "intfour", 456, "help message for flag intfour") + cmdEcho.PersistentFlags().StringVarP(&flags1, "strone", "s", "one", "help message for flag strone") + cmdEcho.PersistentFlags().BoolVarP(&flagbp, "persistentbool", "p", false, "help message for flag persistentbool") + cmdTimes.PersistentFlags().StringVarP(&flags2b, "strtwo", "t", "2", strtwoChildHelp) + cmdPrint.PersistentFlags().StringVarP(&flags3, "strthree", "s", "three", "help message for flag strthree") + cmdEcho.Flags().BoolVarP(&flagb1, "boolone", "b", true, "help message for flag boolone") + cmdTimes.Flags().BoolVarP(&flagb2, "booltwo", "c", false, "help message for flag booltwo") + cmdPrint.Flags().BoolVarP(&flagb3, "boolthree", "b", true, "help message for flag boolthree") + cmdVersion1.ResetFlags() + cmdVersion2.ResetFlags() +} + +func commandInit() { + cmdEcho.ResetCommands() + cmdPrint.ResetCommands() + cmdTimes.ResetCommands() + cmdRootNoRun.ResetCommands() + cmdRootSameName.ResetCommands() + cmdRootWithRun.ResetCommands() + cmdSubNoRun.ResetCommands() + cmdCustomFlags.ResetCommands() +} + +func initialize() *Command { + tt, tp, te = nil, nil, nil + rootPersPre, echoPre, echoPersPre, timesPersPre = nil, nil, nil, nil + + var c = cmdRootNoRun + flagInit() + commandInit() + return c +} + +func initializeWithSameName() *Command { + tt, tp, te = nil, nil, nil + rootPersPre, echoPre, echoPersPre, timesPersPre = nil, nil, nil, nil + var c = cmdRootSameName + flagInit() + commandInit() + return c +} + +func initializeWithRootCmd() *Command { + cmdRootWithRun.ResetCommands() + tt, tp, te, tr, rootcalled = nil, nil, nil, nil, false + flagInit() + cmdRootWithRun.Flags().BoolVarP(&flagbr, "boolroot", "b", false, "help message for flag boolroot") + cmdRootWithRun.Flags().IntVarP(&flagir, "introot", "i", 321, "help message for flag introot") + commandInit() + return cmdRootWithRun +} + +type resulter struct { + Error error + Output string + Command *Command +} + +func fullSetupTest(input string) resulter { + c := initializeWithRootCmd() + + return fullTester(c, input) +} + +func noRRSetupTestSilenced(input string) resulter { + c := initialize() + c.SilenceErrors = true + c.SilenceUsage = true + return fullTester(c, input) +} + +func noRRSetupTest(input string) resulter { + c := initialize() + + return fullTester(c, input) +} + +func rootOnlySetupTest(input string) resulter { + c := initializeWithRootCmd() + + return simpleTester(c, input) +} + +func simpleTester(c *Command, input string) resulter { + buf := new(bytes.Buffer) + // Testing flag with invalid input + c.SetOutput(buf) + c.SetArgs(strings.Split(input, " ")) + + err := c.Execute() + output := buf.String() + + return resulter{err, output, c} +} + +func simpleTesterC(c *Command, input string) resulter { + buf := new(bytes.Buffer) + // Testing flag with invalid input + c.SetOutput(buf) + c.SetArgs(strings.Split(input, " ")) + + cmd, err := c.ExecuteC() + output := buf.String() + + return resulter{err, output, cmd} +} + +func fullTester(c *Command, input string) resulter { + buf := new(bytes.Buffer) + // Testing flag with invalid input + c.SetOutput(buf) + cmdEcho.AddCommand(cmdTimes) + c.AddCommand(cmdPrint, cmdEcho, cmdSubNoRun, cmdCustomFlags, cmdDeprecated) + c.SetArgs(strings.Split(input, " ")) + + err := c.Execute() + output := buf.String() + + return resulter{err, output, c} +} + +func logErr(t *testing.T, found, expected string) { + out := new(bytes.Buffer) + + _, _, line, ok := runtime.Caller(2) + if ok { + fmt.Fprintf(out, "Line: %d ", line) + } + fmt.Fprintf(out, "Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) + t.Errorf(out.String()) +} + +func checkStringContains(t *testing.T, found, expected string) { + if !strings.Contains(found, expected) { + logErr(t, found, expected) + } +} + +func checkResultContains(t *testing.T, x resulter, check string) { + checkStringContains(t, x.Output, check) +} + +func checkStringOmits(t *testing.T, found, expected string) { + if strings.Contains(found, expected) { + logErr(t, found, expected) + } +} + +func checkResultOmits(t *testing.T, x resulter, check string) { + checkStringOmits(t, x.Output, check) +} + +func checkOutputContains(t *testing.T, c *Command, check string) { + buf := new(bytes.Buffer) + c.SetOutput(buf) + c.Execute() + + if !strings.Contains(buf.String(), check) { + logErr(t, buf.String(), check) + } +} + +func TestSingleCommand(t *testing.T) { + noRRSetupTest("print one two") + + if te != nil || tt != nil { + t.Error("Wrong command called") + } + if tp == nil { + t.Error("Wrong command called") + } + if strings.Join(tp, " ") != "one two" { + t.Error("Command didn't parse correctly") + } +} + +func TestChildCommand(t *testing.T) { + noRRSetupTest("echo times one two") + + if te != nil || tp != nil { + t.Error("Wrong command called") + } + if tt == nil { + t.Error("Wrong command called") + } + if strings.Join(tt, " ") != "one two" { + t.Error("Command didn't parse correctly") + } +} + +func TestCommandAlias(t *testing.T) { + noRRSetupTest("say times one two") + + if te != nil || tp != nil { + t.Error("Wrong command called") + } + if tt == nil { + t.Error("Wrong command called") + } + if strings.Join(tt, " ") != "one two" { + t.Error("Command didn't parse correctly") + } +} + +func TestPrefixMatching(t *testing.T) { + EnablePrefixMatching = true + noRRSetupTest("ech times one two") + + if te != nil || tp != nil { + t.Error("Wrong command called") + } + if tt == nil { + t.Error("Wrong command called") + } + if strings.Join(tt, " ") != "one two" { + t.Error("Command didn't parse correctly") + } + + EnablePrefixMatching = false +} + +func TestNoPrefixMatching(t *testing.T) { + EnablePrefixMatching = false + + noRRSetupTest("ech times one two") + + if !(tt == nil && te == nil && tp == nil) { + t.Error("Wrong command called") + } +} + +func TestAliasPrefixMatching(t *testing.T) { + EnablePrefixMatching = true + noRRSetupTest("sa times one two") + + if te != nil || tp != nil { + t.Error("Wrong command called") + } + if tt == nil { + t.Error("Wrong command called") + } + if strings.Join(tt, " ") != "one two" { + t.Error("Command didn't parse correctly") + } + EnablePrefixMatching = false +} + +func TestChildSameName(t *testing.T) { + c := initializeWithSameName() + c.AddCommand(cmdPrint, cmdEcho) + c.SetArgs(strings.Split("print one two", " ")) + c.Execute() + + if te != nil || tt != nil { + t.Error("Wrong command called") + } + if tp == nil { + t.Error("Wrong command called") + } + if strings.Join(tp, " ") != "one two" { + t.Error("Command didn't parse correctly") + } +} + +func TestGrandChildSameName(t *testing.T) { + c := initializeWithSameName() + cmdTimes.AddCommand(cmdPrint) + c.AddCommand(cmdTimes) + c.SetArgs(strings.Split("times print one two", " ")) + c.Execute() + + if te != nil || tt != nil { + t.Error("Wrong command called") + } + if tp == nil { + t.Error("Wrong command called") + } + if strings.Join(tp, " ") != "one two" { + t.Error("Command didn't parse correctly") + } +} + +func TestUsage(t *testing.T) { + x := fullSetupTest("help") + checkResultContains(t, x, cmdRootWithRun.Use+" [flags]") + x = fullSetupTest("help customflags") + checkResultContains(t, x, cmdCustomFlags.Use) + checkResultOmits(t, x, cmdCustomFlags.Use+" [flags]") +} + +func TestFlagLong(t *testing.T) { + noRRSetupTest("echo --intone=13 something -- here") + + if cmdEcho.ArgsLenAtDash() != 1 { + t.Errorf("expected argsLenAtDash: %d but got %d", 1, cmdRootNoRun.ArgsLenAtDash()) + } + if strings.Join(te, " ") != "something here" { + t.Errorf("flags didn't leave proper args remaining..%s given", te) + } + if flagi1 != 13 { + t.Errorf("int flag didn't get correct value, had %d", flagi1) + } + if flagi2 != 234 { + t.Errorf("default flag value changed, 234 expected, %d given", flagi2) + } +} + +func TestFlagShort(t *testing.T) { + noRRSetupTest("echo -i13 -- something here") + + if cmdEcho.ArgsLenAtDash() != 0 { + t.Errorf("expected argsLenAtDash: %d but got %d", 0, cmdRootNoRun.ArgsLenAtDash()) + } + if strings.Join(te, " ") != "something here" { + t.Errorf("flags didn't leave proper args remaining..%s given", te) + } + if flagi1 != 13 { + t.Errorf("int flag didn't get correct value, had %d", flagi1) + } + if flagi2 != 234 { + t.Errorf("default flag value changed, 234 expected, %d given", flagi2) + } + + noRRSetupTest("echo -i 13 something here") + + if strings.Join(te, " ") != "something here" { + t.Errorf("flags didn't leave proper args remaining..%s given", te) + } + if flagi1 != 13 { + t.Errorf("int flag didn't get correct value, had %d", flagi1) + } + if flagi2 != 234 { + t.Errorf("default flag value changed, 234 expected, %d given", flagi2) + } + + noRRSetupTest("print -i99 one two") + + if strings.Join(tp, " ") != "one two" { + t.Errorf("flags didn't leave proper args remaining..%s given", tp) + } + if flagi3 != 99 { + t.Errorf("int flag didn't get correct value, had %d", flagi3) + } + if flagi1 != 123 { + t.Errorf("default flag value changed on different command with same shortname, 234 expected, %d given", flagi2) + } +} + +func TestChildCommandFlags(t *testing.T) { + noRRSetupTest("echo times -j 99 one two") + + if strings.Join(tt, " ") != "one two" { + t.Errorf("flags didn't leave proper args remaining..%s given", tt) + } + + // Testing with flag that shouldn't be persistent + r := noRRSetupTest("echo times -j 99 -i77 one two") + + if r.Error == nil { + t.Errorf("invalid flag should generate error") + } + + if !strings.Contains(r.Error.Error(), "unknown shorthand") { + t.Errorf("Wrong error message displayed, \n %s", r.Error) + } + + if flagi2 != 99 { + t.Errorf("flag value should be 99, %d given", flagi2) + } + + if flagi1 != 123 { + t.Errorf("unset flag should have default value, expecting 123, given %d", flagi1) + } + + // Testing with flag only existing on child + r = noRRSetupTest("echo -j 99 -i77 one two") + + if r.Error == nil { + t.Errorf("invalid flag should generate error") + } + if !strings.Contains(r.Error.Error(), "unknown shorthand flag") { + t.Errorf("Wrong error message displayed, \n %s", r.Error) + } + + // Testing with persistent flag overwritten by child + noRRSetupTest("echo times --strtwo=child one two") + + if flags2b != "child" { + t.Errorf("flag value should be child, %s given", flags2b) + } + + if flags2a != "two" { + t.Errorf("unset flag should have default value, expecting two, given %s", flags2a) + } + + // Testing flag with invalid input + r = noRRSetupTest("echo -i10E") + + if r.Error == nil { + t.Errorf("invalid input should generate error") + } + if !strings.Contains(r.Error.Error(), "invalid argument \"10E\" for i10E") { + t.Errorf("Wrong error message displayed, \n %s", r.Error) + } +} + +func TestTrailingCommandFlags(t *testing.T) { + x := fullSetupTest("echo two -x") + + if x.Error == nil { + t.Errorf("invalid flag should generate error") + } +} + +func TestInvalidSubcommandFlags(t *testing.T) { + cmd := initializeWithRootCmd() + cmd.AddCommand(cmdTimes) + + result := simpleTester(cmd, "times --inttwo=2 --badflag=bar") + // given that we are not checking here result.Error we check for + // stock usage message + checkResultContains(t, result, "cobra-test times [# times]") + if strings.Contains(result.Error.Error(), "unknown flag: --inttwo") { + t.Errorf("invalid --badflag flag shouldn't fail on 'unknown' --inttwo flag") + } + +} + +func TestSubcommandExecuteC(t *testing.T) { + cmd := initializeWithRootCmd() + double := &Command{ + Use: "double message", + Run: func(c *Command, args []string) { + msg := strings.Join(args, " ") + c.Println(msg, msg) + }, + } + + echo := &Command{ + Use: "echo message", + Run: func(c *Command, args []string) { + msg := strings.Join(args, " ") + c.Println(msg, msg) + }, + } + + cmd.AddCommand(double, echo) + + result := simpleTesterC(cmd, "double hello world") + checkResultContains(t, result, "hello world hello world") + + if result.Command.Name() != "double" { + t.Errorf("invalid cmd returned from ExecuteC: should be 'double' but got %s", result.Command.Name()) + } + + result = simpleTesterC(cmd, "echo msg to be echoed") + checkResultContains(t, result, "msg to be echoed") + + if result.Command.Name() != "echo" { + t.Errorf("invalid cmd returned from ExecuteC: should be 'echo' but got %s", result.Command.Name()) + } +} + +func TestSubcommandArgEvaluation(t *testing.T) { + cmd := initializeWithRootCmd() + + first := &Command{ + Use: "first", + Run: func(cmd *Command, args []string) { + }, + } + cmd.AddCommand(first) + + second := &Command{ + Use: "second", + Run: func(cmd *Command, args []string) { + fmt.Fprintf(cmd.OutOrStdout(), "%v", args) + }, + } + first.AddCommand(second) + + result := simpleTester(cmd, "first second first third") + + expectedOutput := fmt.Sprintf("%v", []string{"first third"}) + if result.Output != expectedOutput { + t.Errorf("exptected %v, got %v", expectedOutput, result.Output) + } +} + +func TestPersistentFlags(t *testing.T) { + fullSetupTest("echo -s something -p more here") + + // persistentFlag should act like normal flag on its own command + if strings.Join(te, " ") != "more here" { + t.Errorf("flags didn't leave proper args remaining..%s given", te) + } + if flags1 != "something" { + t.Errorf("string flag didn't get correct value, had %v", flags1) + } + if !flagbp { + t.Errorf("persistent bool flag not parsed correctly. Expected true, had %v", flagbp) + } + + // persistentFlag should act like normal flag on its own command + fullSetupTest("echo times -s again -c -p test here") + + if strings.Join(tt, " ") != "test here" { + t.Errorf("flags didn't leave proper args remaining..%s given", tt) + } + + if flags1 != "again" { + t.Errorf("string flag didn't get correct value, had %v", flags1) + } + + if !flagb2 { + t.Errorf("local flag not parsed correctly. Expected true, had %v", flagb2) + } + if !flagbp { + t.Errorf("persistent bool flag not parsed correctly. Expected true, had %v", flagbp) + } +} + +func TestHelpCommand(t *testing.T) { + x := fullSetupTest("help") + checkResultContains(t, x, cmdRootWithRun.Long) + + x = fullSetupTest("help echo") + checkResultContains(t, x, cmdEcho.Long) + + x = fullSetupTest("help echo times") + checkResultContains(t, x, cmdTimes.Long) +} + +func TestChildCommandHelp(t *testing.T) { + c := noRRSetupTest("print --help") + checkResultContains(t, c, strtwoParentHelp) + r := noRRSetupTest("echo times --help") + checkResultContains(t, r, strtwoChildHelp) +} + +func TestNonRunChildHelp(t *testing.T) { + x := noRRSetupTest("subnorun") + checkResultContains(t, x, cmdSubNoRun.Long) +} + +func TestRunnableRootCommand(t *testing.T) { + x := fullSetupTest("") + + if rootcalled != true { + t.Errorf("Root Function was not called\n out:%v", x.Error) + } +} + +func TestVisitParents(t *testing.T) { + c := &Command{Use: "app"} + sub := &Command{Use: "sub"} + dsub := &Command{Use: "dsub"} + sub.AddCommand(dsub) + c.AddCommand(sub) + total := 0 + add := func(x *Command) { + total++ + } + sub.VisitParents(add) + if total != 1 { + t.Errorf("Should have visited 1 parent but visited %d", total) + } + + total = 0 + dsub.VisitParents(add) + if total != 2 { + t.Errorf("Should have visited 2 parent but visited %d", total) + } + + total = 0 + c.VisitParents(add) + if total != 0 { + t.Errorf("Should have not visited any parent but visited %d", total) + } +} + +func TestRunnableRootCommandNilInput(t *testing.T) { + var emptyArg []string + c := initializeWithRootCmd() + + buf := new(bytes.Buffer) + // Testing flag with invalid input + c.SetOutput(buf) + cmdEcho.AddCommand(cmdTimes) + c.AddCommand(cmdPrint, cmdEcho) + c.SetArgs(emptyArg) + + err := c.Execute() + if err != nil { + t.Errorf("Execute() failed with %v", err) + } + + if rootcalled != true { + t.Errorf("Root Function was not called") + } +} + +func TestRunnableRootCommandEmptyInput(t *testing.T) { + args := make([]string, 3) + args[0] = "" + args[1] = "--introot=12" + args[2] = "" + c := initializeWithRootCmd() + + buf := new(bytes.Buffer) + // Testing flag with invalid input + c.SetOutput(buf) + cmdEcho.AddCommand(cmdTimes) + c.AddCommand(cmdPrint, cmdEcho) + c.SetArgs(args) + + c.Execute() + + if rootcalled != true { + t.Errorf("Root Function was not called.\n\nOutput was:\n\n%s\n", buf) + } +} + +func TestInvalidSubcommandWhenArgsAllowed(t *testing.T) { + fullSetupTest("echo invalid-sub") + + if te[0] != "invalid-sub" { + t.Errorf("Subcommand didn't work...") + } +} + +func TestRootFlags(t *testing.T) { + fullSetupTest("-i 17 -b") + + if flagbr != true { + t.Errorf("flag value should be true, %v given", flagbr) + } + + if flagir != 17 { + t.Errorf("flag value should be 17, %d given", flagir) + } +} + +func TestRootHelp(t *testing.T) { + x := fullSetupTest("--help") + + checkResultContains(t, x, "Available Commands:") + checkResultContains(t, x, "for more information about a command") + + if strings.Contains(x.Output, "unknown flag: --help") { + t.Errorf("--help shouldn't trigger an error, Got: \n %s", x.Output) + } + + if strings.Contains(x.Output, cmdEcho.Use) { + t.Errorf("--help shouldn't display subcommand's usage, Got: \n %s", x.Output) + } + + x = fullSetupTest("echo --help") + + if strings.Contains(x.Output, cmdTimes.Use) { + t.Errorf("--help shouldn't display subsubcommand's usage, Got: \n %s", x.Output) + } + + checkResultContains(t, x, "Available Commands:") + checkResultContains(t, x, "for more information about a command") + + if strings.Contains(x.Output, "unknown flag: --help") { + t.Errorf("--help shouldn't trigger an error, Got: \n %s", x.Output) + } + +} + +func TestFlagAccess(t *testing.T) { + initialize() + + local := cmdTimes.LocalFlags() + inherited := cmdTimes.InheritedFlags() + + for _, f := range []string{"inttwo", "strtwo", "booltwo"} { + if local.Lookup(f) == nil { + t.Errorf("LocalFlags expected to contain %s, Got: nil", f) + } + } + if inherited.Lookup("strone") == nil { + t.Errorf("InheritedFlags expected to contain strone, Got: nil") + } + if inherited.Lookup("strtwo") != nil { + t.Errorf("InheritedFlags shouldn not contain overwritten flag strtwo") + + } +} + +func TestNoNRunnableRootCommandNilInput(t *testing.T) { + var args []string + c := initialize() + + buf := new(bytes.Buffer) + // Testing flag with invalid input + c.SetOutput(buf) + cmdEcho.AddCommand(cmdTimes) + c.AddCommand(cmdPrint, cmdEcho) + c.SetArgs(args) + + c.Execute() + + if !strings.Contains(buf.String(), cmdRootNoRun.Long) { + t.Errorf("Expected to get help output, Got: \n %s", buf) + } +} + +func TestRootNoCommandHelp(t *testing.T) { + x := rootOnlySetupTest("--help") + + checkResultOmits(t, x, "Available Commands:") + checkResultOmits(t, x, "for more information about a command") + + if strings.Contains(x.Output, "unknown flag: --help") { + t.Errorf("--help shouldn't trigger an error, Got: \n %s", x.Output) + } + + x = rootOnlySetupTest("echo --help") + + checkResultOmits(t, x, "Available Commands:") + checkResultOmits(t, x, "for more information about a command") + + if strings.Contains(x.Output, "unknown flag: --help") { + t.Errorf("--help shouldn't trigger an error, Got: \n %s", x.Output) + } +} + +func TestRootUnknownCommand(t *testing.T) { + r := noRRSetupTest("bogus") + s := "Error: unknown command \"bogus\" for \"cobra-test\"\nRun 'cobra-test --help' for usage.\n" + + if r.Output != s { + t.Errorf("Unexpected response.\nExpecting to be:\n %q\nGot:\n %q\n", s, r.Output) + } + + r = noRRSetupTest("--strtwo=a bogus") + if r.Output != s { + t.Errorf("Unexpected response.\nExpecting to be:\n %q\nGot:\n %q\n", s, r.Output) + } +} + +func TestRootUnknownCommandSilenced(t *testing.T) { + r := noRRSetupTestSilenced("bogus") + + if r.Output != "" { + t.Errorf("Unexpected response.\nExpecting to be: \n\"\"\n Got:\n %q\n", r.Output) + } + + r = noRRSetupTestSilenced("--strtwo=a bogus") + if r.Output != "" { + t.Errorf("Unexpected response.\nExpecting to be:\n\"\"\nGot:\n %q\n", r.Output) + } +} + +func TestRootSuggestions(t *testing.T) { + outputWithSuggestions := "Error: unknown command \"%s\" for \"cobra-test\"\n\nDid you mean this?\n\t%s\n\nRun 'cobra-test --help' for usage.\n" + outputWithoutSuggestions := "Error: unknown command \"%s\" for \"cobra-test\"\nRun 'cobra-test --help' for usage.\n" + + cmd := initializeWithRootCmd() + cmd.AddCommand(cmdTimes) + + tests := map[string]string{ + "time": "times", + "tiems": "times", + "tims": "times", + "timeS": "times", + "rimes": "times", + "ti": "times", + "t": "times", + "timely": "times", + "ri": "", + "timezone": "", + "foo": "", + "counts": "times", + } + + for typo, suggestion := range tests { + for _, suggestionsDisabled := range []bool{false, true} { + cmd.DisableSuggestions = suggestionsDisabled + result := simpleTester(cmd, typo) + expected := "" + if len(suggestion) == 0 || suggestionsDisabled { + expected = fmt.Sprintf(outputWithoutSuggestions, typo) + } else { + expected = fmt.Sprintf(outputWithSuggestions, typo, suggestion) + } + if result.Output != expected { + t.Errorf("Unexpected response.\nExpecting to be:\n %q\nGot:\n %q\n", expected, result.Output) + } + } + } +} + +func TestFlagsBeforeCommand(t *testing.T) { + // short without space + x := fullSetupTest("-i10 echo") + if x.Error != nil { + t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error) + } + + // short (int) with equals + // It appears that pflags doesn't support this... + // Commenting out until support can be added + + //x = noRRSetupTest("echo -i=10") + //if x.Error != nil { + //t.Errorf("Valid Input shouldn't have errors, got:\n %s", x.Error) + //} + + // long with equals + x = noRRSetupTest("--intone=123 echo one two") + if x.Error != nil { + t.Errorf("Valid Input shouldn't have errors, got:\n %s", x.Error) + } + + // With parsing error properly reported + x = fullSetupTest("-i10E echo") + if !strings.Contains(x.Error.Error(), "invalid argument \"10E\" for i10E") { + t.Errorf("Wrong error message displayed, \n %s", x.Error) + } + + //With quotes + x = fullSetupTest("-s=\"walking\" echo") + if x.Error != nil { + t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error) + } + + //With quotes and space + x = fullSetupTest("-s=\"walking fast\" echo") + if x.Error != nil { + t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error) + } + + //With inner quote + x = fullSetupTest("-s=\"walking \\\"Inner Quote\\\" fast\" echo") + if x.Error != nil { + t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error) + } + + //With quotes and space + x = fullSetupTest("-s=\"walking \\\"Inner Quote\\\" fast\" echo") + if x.Error != nil { + t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error) + } + +} + +func TestRemoveCommand(t *testing.T) { + versionUsed = 0 + c := initializeWithRootCmd() + c.AddCommand(cmdVersion1) + c.RemoveCommand(cmdVersion1) + x := fullTester(c, "version") + if x.Error == nil { + t.Errorf("Removed command should not have been called\n") + return + } +} + +func TestCommandWithoutSubcommands(t *testing.T) { + c := initializeWithRootCmd() + + x := simpleTester(c, "") + if x.Error != nil { + t.Errorf("Calling command without subcommands should not have error: %v", x.Error) + return + } +} + +func TestCommandWithoutSubcommandsWithArg(t *testing.T) { + c := initializeWithRootCmd() + expectedArgs := []string{"arg"} + + x := simpleTester(c, "arg") + if x.Error != nil { + t.Errorf("Calling command without subcommands but with arg should not have error: %v", x.Error) + return + } + if !reflect.DeepEqual(expectedArgs, tr) { + t.Errorf("Calling command without subcommands but with arg has wrong args: expected: %v, actual: %v", expectedArgs, tr) + return + } +} + +func TestReplaceCommandWithRemove(t *testing.T) { + versionUsed = 0 + c := initializeWithRootCmd() + c.AddCommand(cmdVersion1) + c.RemoveCommand(cmdVersion1) + c.AddCommand(cmdVersion2) + x := fullTester(c, "version") + if x.Error != nil { + t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error) + return + } + if versionUsed == 1 { + t.Errorf("Removed command shouldn't be called\n") + } + if versionUsed != 2 { + t.Errorf("Replacing command should have been called but didn't\n") + } +} + +func TestDeprecatedSub(t *testing.T) { + c := fullSetupTest("deprecated") + + checkResultContains(t, c, cmdDeprecated.Deprecated) +} + +func TestPreRun(t *testing.T) { + noRRSetupTest("echo one two") + if echoPre == nil || echoPersPre == nil { + t.Error("PreRun or PersistentPreRun not called") + } + if rootPersPre != nil || timesPersPre != nil { + t.Error("Wrong *Pre functions called!") + } + + noRRSetupTest("echo times one two") + if timesPersPre == nil { + t.Error("PreRun or PersistentPreRun not called") + } + if echoPre != nil || echoPersPre != nil || rootPersPre != nil { + t.Error("Wrong *Pre functions called!") + } + + noRRSetupTest("print one two") + if rootPersPre == nil { + t.Error("Parent PersistentPreRun not called but should not have been") + } + if echoPre != nil || echoPersPre != nil || timesPersPre != nil { + t.Error("Wrong *Pre functions called!") + } +} + +// Check if cmdEchoSub gets PersistentPreRun from rootCmd even if is added last +func TestPeristentPreRunPropagation(t *testing.T) { + rootCmd := initialize() + + // First add the cmdEchoSub to cmdPrint + cmdPrint.AddCommand(cmdEchoSub) + // Now add cmdPrint to rootCmd + rootCmd.AddCommand(cmdPrint) + + rootCmd.SetArgs(strings.Split("print echosub lala", " ")) + rootCmd.Execute() + + if rootPersPre == nil || len(rootPersPre) == 0 || rootPersPre[0] != "lala" { + t.Error("RootCmd PersistentPreRun not called but should have been") + } +} + +func TestGlobalNormFuncPropagation(t *testing.T) { + normFunc := func(f *pflag.FlagSet, name string) pflag.NormalizedName { + return pflag.NormalizedName(name) + } + + rootCmd := initialize() + rootCmd.SetGlobalNormalizationFunc(normFunc) + if reflect.ValueOf(normFunc) != reflect.ValueOf(rootCmd.GlobalNormalizationFunc()) { + t.Error("rootCmd seems to have a wrong normalization function") + } + + // First add the cmdEchoSub to cmdPrint + cmdPrint.AddCommand(cmdEchoSub) + if cmdPrint.GlobalNormalizationFunc() != nil && cmdEchoSub.GlobalNormalizationFunc() != nil { + t.Error("cmdPrint and cmdEchoSub should had no normalization functions") + } + + // Now add cmdPrint to rootCmd + rootCmd.AddCommand(cmdPrint) + if reflect.ValueOf(cmdPrint.GlobalNormalizationFunc()).Pointer() != reflect.ValueOf(rootCmd.GlobalNormalizationFunc()).Pointer() || + reflect.ValueOf(cmdEchoSub.GlobalNormalizationFunc()).Pointer() != reflect.ValueOf(rootCmd.GlobalNormalizationFunc()).Pointer() { + t.Error("cmdPrint and cmdEchoSub should had the normalization function of rootCmd") + } +} + +func TestFlagOnPflagCommandLine(t *testing.T) { + flagName := "flagOnCommandLine" + pflag.CommandLine.String(flagName, "", "about my flag") + r := fullSetupTest("--help") + + checkResultContains(t, r, flagName) +} + +func TestAddTemplateFunctions(t *testing.T) { + AddTemplateFunc("t", func() bool { return true }) + AddTemplateFuncs(template.FuncMap{ + "f": func() bool { return false }, + "h": func() string { return "Hello," }, + "w": func() string { return "world." }}) + + const usage = "Hello, world." + + c := &Command{} + c.SetUsageTemplate(`{{if t}}{{h}}{{end}}{{if f}}{{h}}{{end}} {{w}}`) + + if us := c.UsageString(); us != usage { + t.Errorf("c.UsageString() != \"%s\", is \"%s\"", usage, us) + } +} + +func TestUsageIsNotPrintedTwice(t *testing.T) { + var cmd = &Command{Use: "root"} + var sub = &Command{Use: "sub"} + cmd.AddCommand(sub) + + r := simpleTester(cmd, "") + if strings.Count(r.Output, "Usage:") != 1 { + t.Error("Usage output is not printed exactly once") + } +} diff --git a/src/vendor/github.com/spf13/cobra/command.go b/src/vendor/github.com/spf13/cobra/command.go new file mode 100644 index 00000000..9ae98369 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/command.go @@ -0,0 +1,1257 @@ +// Copyright © 2013 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. +//In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + + flag "github.com/spf13/pflag" +) + +// Command is just that, a command for your application. +// eg. 'go run' ... 'run' is the command. Cobra requires +// you to define the usage and description as part of your command +// definition to ensure usability. +type Command struct { + // Name is the command name, usually the executable's name. + name string + // The one-line usage message. + Use string + // An array of aliases that can be used instead of the first word in Use. + Aliases []string + // An array of command names for which this command will be suggested - similar to aliases but only suggests. + SuggestFor []string + // The short description shown in the 'help' output. + Short string + // The long message shown in the 'help ' output. + Long string + // Examples of how to use the command + Example string + // List of all valid non-flag arguments that are accepted in bash completions + ValidArgs []string + // List of aliases for ValidArgs. These are not suggested to the user in the bash + // completion, but accepted if entered manually. + ArgAliases []string + // Custom functions used by the bash autocompletion generator + BashCompletionFunction string + // Is this command deprecated and should print this string when used? + Deprecated string + // Is this command hidden and should NOT show up in the list of available commands? + Hidden bool + // Full set of flags + flags *flag.FlagSet + // Set of flags childrens of this command will inherit + pflags *flag.FlagSet + // Flags that are declared specifically by this command (not inherited). + lflags *flag.FlagSet + // SilenceErrors is an option to quiet errors down stream + SilenceErrors bool + // Silence Usage is an option to silence usage when an error occurs. + SilenceUsage bool + // The *Run functions are executed in the following order: + // * PersistentPreRun() + // * PreRun() + // * Run() + // * PostRun() + // * PersistentPostRun() + // All functions get the same args, the arguments after the command name + // PersistentPreRun: children of this command will inherit and execute + PersistentPreRun func(cmd *Command, args []string) + // PersistentPreRunE: PersistentPreRun but returns an error + PersistentPreRunE func(cmd *Command, args []string) error + // PreRun: children of this command will not inherit. + PreRun func(cmd *Command, args []string) + // PreRunE: PreRun but returns an error + PreRunE func(cmd *Command, args []string) error + // Run: Typically the actual work function. Most commands will only implement this + Run func(cmd *Command, args []string) + // RunE: Run but returns an error + RunE func(cmd *Command, args []string) error + // PostRun: run after the Run command. + PostRun func(cmd *Command, args []string) + // PostRunE: PostRun but returns an error + PostRunE func(cmd *Command, args []string) error + // PersistentPostRun: children of this command will inherit and execute after PostRun + PersistentPostRun func(cmd *Command, args []string) + // PersistentPostRunE: PersistentPostRun but returns an error + PersistentPostRunE func(cmd *Command, args []string) error + // DisableAutoGenTag remove + DisableAutoGenTag bool + // Commands is the list of commands supported by this program. + commands []*Command + // Parent Command for this command + parent *Command + // max lengths of commands' string lengths for use in padding + commandsMaxUseLen int + commandsMaxCommandPathLen int + commandsMaxNameLen int + // is commands slice are sorted or not + commandsAreSorted bool + + flagErrorBuf *bytes.Buffer + + args []string // actual args parsed from flags + output *io.Writer // out writer if set in SetOutput(w) + usageFunc func(*Command) error // Usage can be defined by application + usageTemplate string // Can be defined by Application + helpTemplate string // Can be defined by Application + helpFunc func(*Command, []string) // Help can be defined by application + helpCommand *Command // The help command + // The global normalization function that we can use on every pFlag set and children commands + globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName + + // Disable the suggestions based on Levenshtein distance that go along with 'unknown command' messages + DisableSuggestions bool + // If displaying suggestions, allows to set the minimum levenshtein distance to display, must be > 0 + SuggestionsMinimumDistance int + + // Disable the flag parsing. If this is true all flags will be passed to the command as arguments. + DisableFlagParsing bool +} + +// os.Args[1:] by default, if desired, can be overridden +// particularly useful when testing. +func (c *Command) SetArgs(a []string) { + c.args = a +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (c *Command) SetOutput(output io.Writer) { + c.output = &output +} + +// Usage can be defined by application. +func (c *Command) SetUsageFunc(f func(*Command) error) { + c.usageFunc = f +} + +// Can be defined by Application. +func (c *Command) SetUsageTemplate(s string) { + c.usageTemplate = s +} + +// Can be defined by Application. +func (c *Command) SetHelpFunc(f func(*Command, []string)) { + c.helpFunc = f +} + +func (c *Command) SetHelpCommand(cmd *Command) { + c.helpCommand = cmd +} + +// Can be defined by Application. +func (c *Command) SetHelpTemplate(s string) { + c.helpTemplate = s +} + +// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. +// The user should not have a cyclic dependency on commands. +func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { + c.Flags().SetNormalizeFunc(n) + c.PersistentFlags().SetNormalizeFunc(n) + c.globNormFunc = n + + for _, command := range c.commands { + command.SetGlobalNormalizationFunc(n) + } +} + +func (c *Command) OutOrStdout() io.Writer { + return c.getOut(os.Stdout) +} + +func (c *Command) OutOrStderr() io.Writer { + return c.getOut(os.Stderr) +} + +func (c *Command) getOut(def io.Writer) io.Writer { + if c.output != nil { + return *c.output + } + if c.HasParent() { + return c.parent.getOut(def) + } + return def +} + +// UsageFunc returns either the function set by SetUsageFunc for this command +// or a parent, or it returns a default usage function. +func (c *Command) UsageFunc() (f func(*Command) error) { + if c.usageFunc != nil { + return c.usageFunc + } + + if c.HasParent() { + return c.parent.UsageFunc() + } + return func(c *Command) error { + c.mergePersistentFlags() + err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) + if err != nil { + c.Println(err) + } + return err + } +} + +// Usage puts out the usage for the command. +// Used when a user provides invalid input. +// Can be defined by user by overriding UsageFunc. +func (c *Command) Usage() error { + return c.UsageFunc()(c) +} + +// HelpFunc returns either the function set by SetHelpFunc for this command +// or a parent, or it returns a function with default help behavior. +func (c *Command) HelpFunc() func(*Command, []string) { + cmd := c + for cmd != nil { + if cmd.helpFunc != nil { + return cmd.helpFunc + } + cmd = cmd.parent + } + return func(*Command, []string) { + c.mergePersistentFlags() + err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) + if err != nil { + c.Println(err) + } + } +} + +// Help puts out the help for the command. +// Used when a user calls help [command]. +// Can be defined by user by overriding HelpFunc. +func (c *Command) Help() error { + c.HelpFunc()(c, []string{}) + return nil +} + +func (c *Command) UsageString() string { + tmpOutput := c.output + bb := new(bytes.Buffer) + c.SetOutput(bb) + c.Usage() + c.output = tmpOutput + return bb.String() +} + +var minUsagePadding = 25 + +func (c *Command) UsagePadding() int { + if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { + return minUsagePadding + } + return c.parent.commandsMaxUseLen +} + +var minCommandPathPadding = 11 + +// +func (c *Command) CommandPathPadding() int { + if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { + return minCommandPathPadding + } + return c.parent.commandsMaxCommandPathLen +} + +var minNamePadding = 11 + +func (c *Command) NamePadding() int { + if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { + return minNamePadding + } + return c.parent.commandsMaxNameLen +} + +func (c *Command) UsageTemplate() string { + if c.usageTemplate != "" { + return c.usageTemplate + } + + if c.HasParent() { + return c.parent.UsageTemplate() + } + return `Usage:{{if .Runnable}} + {{if .HasAvailableFlags}}{{appendIfNotPresent .UseLine "[flags]"}}{{else}}{{.UseLine}}{{end}}{{end}}{{if .HasAvailableSubCommands}} + {{ .CommandPath}} [command]{{end}}{{if gt .Aliases 0}} + +Aliases: + {{.NameAndAliases}} +{{end}}{{if .HasExample}} + +Examples: +{{ .Example }}{{end}}{{ if .HasAvailableSubCommands}} + +Available Commands:{{range .Commands}}{{if .IsAvailableCommand}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableLocalFlags}} + +Flags: +{{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{ if .HasAvailableInheritedFlags}} + +Global Flags: +{{.InheritedFlags.FlagUsages | trimRightSpace}}{{end}}{{if .HasHelpSubCommands}} + +Additional help topics:{{range .Commands}}{{if .IsHelpCommand}} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableSubCommands }} + +Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} +` +} + +func (c *Command) HelpTemplate() string { + if c.helpTemplate != "" { + return c.helpTemplate + } + + if c.HasParent() { + return c.parent.HelpTemplate() + } + return `{{with or .Long .Short }}{{. | trim}} + +{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` +} + +// Really only used when casting a command to a commander. +func (c *Command) resetChildrensParents() { + for _, x := range c.commands { + x.parent = c + } +} + +// Test if the named flag is a boolean flag. +func isBooleanFlag(name string, f *flag.FlagSet) bool { + flag := f.Lookup(name) + if flag == nil { + return false + } + return flag.Value.Type() == "bool" +} + +// Test if the named flag is a boolean flag. +func isBooleanShortFlag(name string, f *flag.FlagSet) bool { + result := false + f.VisitAll(func(f *flag.Flag) { + if f.Shorthand == name && f.Value.Type() == "bool" { + result = true + } + }) + return result +} + +func stripFlags(args []string, c *Command) []string { + if len(args) < 1 { + return args + } + c.mergePersistentFlags() + + commands := []string{} + + inQuote := false + inFlag := false + for _, y := range args { + if !inQuote { + switch { + case strings.HasPrefix(y, "\""): + inQuote = true + case strings.Contains(y, "=\""): + inQuote = true + case strings.HasPrefix(y, "--") && !strings.Contains(y, "="): + // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' + inFlag = !isBooleanFlag(y[2:], c.Flags()) + case strings.HasPrefix(y, "-") && !strings.Contains(y, "=") && len(y) == 2 && !isBooleanShortFlag(y[1:], c.Flags()): + inFlag = true + case inFlag: + inFlag = false + case y == "": + // strip empty commands, as the go tests expect this to be ok.... + case !strings.HasPrefix(y, "-"): + commands = append(commands, y) + inFlag = false + } + } + + if strings.HasSuffix(y, "\"") && !strings.HasSuffix(y, "\\\"") { + inQuote = false + } + } + + return commands +} + +// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like +// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). +func argsMinusFirstX(args []string, x string) []string { + for i, y := range args { + if x == y { + ret := []string{} + ret = append(ret, args[:i]...) + ret = append(ret, args[i+1:]...) + return ret + } + } + return args +} + +// find the target command given the args and command tree +// Meant to be run on the highest node. Only searches down. +func (c *Command) Find(args []string) (*Command, []string, error) { + if c == nil { + return nil, nil, fmt.Errorf("Called find() on a nil Command") + } + + var innerfind func(*Command, []string) (*Command, []string) + + innerfind = func(c *Command, innerArgs []string) (*Command, []string) { + argsWOflags := stripFlags(innerArgs, c) + if len(argsWOflags) == 0 { + return c, innerArgs + } + nextSubCmd := argsWOflags[0] + matches := make([]*Command, 0) + for _, cmd := range c.commands { + if cmd.Name() == nextSubCmd || cmd.HasAlias(nextSubCmd) { // exact name or alias match + return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd)) + } + if EnablePrefixMatching { + if strings.HasPrefix(cmd.Name(), nextSubCmd) { // prefix match + matches = append(matches, cmd) + } + for _, x := range cmd.Aliases { + if strings.HasPrefix(x, nextSubCmd) { + matches = append(matches, cmd) + } + } + } + } + + // only accept a single prefix match - multiple matches would be ambiguous + if len(matches) == 1 { + return innerfind(matches[0], argsMinusFirstX(innerArgs, argsWOflags[0])) + } + + return c, innerArgs + } + + commandFound, a := innerfind(c, args) + argsWOflags := stripFlags(a, commandFound) + + // no subcommand, always take args + if !commandFound.HasSubCommands() { + return commandFound, a, nil + } + + // root command with subcommands, do subcommand checking + if commandFound == c && len(argsWOflags) > 0 { + suggestionsString := "" + if !c.DisableSuggestions { + if c.SuggestionsMinimumDistance <= 0 { + c.SuggestionsMinimumDistance = 2 + } + if suggestions := c.SuggestionsFor(argsWOflags[0]); len(suggestions) > 0 { + suggestionsString += "\n\nDid you mean this?\n" + for _, s := range suggestions { + suggestionsString += fmt.Sprintf("\t%v\n", s) + } + } + } + return commandFound, a, fmt.Errorf("unknown command %q for %q%s", argsWOflags[0], commandFound.CommandPath(), suggestionsString) + } + + return commandFound, a, nil +} + +func (c *Command) SuggestionsFor(typedName string) []string { + suggestions := []string{} + for _, cmd := range c.commands { + if cmd.IsAvailableCommand() { + levenshteinDistance := ld(typedName, cmd.Name(), true) + suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance + suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName)) + if suggestByLevenshtein || suggestByPrefix { + suggestions = append(suggestions, cmd.Name()) + } + for _, explicitSuggestion := range cmd.SuggestFor { + if strings.EqualFold(typedName, explicitSuggestion) { + suggestions = append(suggestions, cmd.Name()) + } + } + } + } + return suggestions +} + +func (c *Command) VisitParents(fn func(*Command)) { + var traverse func(*Command) *Command + + traverse = func(x *Command) *Command { + if x != c { + fn(x) + } + if x.HasParent() { + return traverse(x.parent) + } + return x + } + traverse(c) +} + +func (c *Command) Root() *Command { + var findRoot func(*Command) *Command + + findRoot = func(x *Command) *Command { + if x.HasParent() { + return findRoot(x.parent) + } + return x + } + + return findRoot(c) +} + +// ArgsLenAtDash will return the length of f.Args at the moment when a -- was +// found during arg parsing. This allows your program to know which args were +// before the -- and which came after. (Description from +// https://godoc.org/github.com/spf13/pflag#FlagSet.ArgsLenAtDash). +func (c *Command) ArgsLenAtDash() int { + return c.Flags().ArgsLenAtDash() +} + +func (c *Command) execute(a []string) (err error) { + if c == nil { + return fmt.Errorf("Called Execute() on a nil Command") + } + + if len(c.Deprecated) > 0 { + c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) + } + + // initialize help flag as the last point possible to allow for user + // overriding + c.initHelpFlag() + + err = c.ParseFlags(a) + if err != nil { + return err + } + // If help is called, regardless of other flags, return we want help + // Also say we need help if the command isn't runnable. + helpVal, err := c.Flags().GetBool("help") + if err != nil { + // should be impossible to get here as we always declare a help + // flag in initHelpFlag() + c.Println("\"help\" flag declared as non-bool. Please correct your code") + return err + } + + if helpVal || !c.Runnable() { + return flag.ErrHelp + } + + c.preRun() + + argWoFlags := c.Flags().Args() + if c.DisableFlagParsing { + argWoFlags = a + } + + for p := c; p != nil; p = p.Parent() { + if p.PersistentPreRunE != nil { + if err := p.PersistentPreRunE(c, argWoFlags); err != nil { + return err + } + break + } else if p.PersistentPreRun != nil { + p.PersistentPreRun(c, argWoFlags) + break + } + } + if c.PreRunE != nil { + if err := c.PreRunE(c, argWoFlags); err != nil { + return err + } + } else if c.PreRun != nil { + c.PreRun(c, argWoFlags) + } + + if c.RunE != nil { + if err := c.RunE(c, argWoFlags); err != nil { + return err + } + } else { + c.Run(c, argWoFlags) + } + if c.PostRunE != nil { + if err := c.PostRunE(c, argWoFlags); err != nil { + return err + } + } else if c.PostRun != nil { + c.PostRun(c, argWoFlags) + } + for p := c; p != nil; p = p.Parent() { + if p.PersistentPostRunE != nil { + if err := p.PersistentPostRunE(c, argWoFlags); err != nil { + return err + } + break + } else if p.PersistentPostRun != nil { + p.PersistentPostRun(c, argWoFlags) + break + } + } + + return nil +} + +func (c *Command) preRun() { + for _, x := range initializers { + x() + } +} + +func (c *Command) errorMsgFromParse() string { + s := c.flagErrorBuf.String() + + x := strings.Split(s, "\n") + + if len(x) > 0 { + return x[0] + } + return "" +} + +// Call execute to use the args (os.Args[1:] by default) +// and run through the command tree finding appropriate matches +// for commands and then corresponding flags. +func (c *Command) Execute() error { + _, err := c.ExecuteC() + return err +} + +func (c *Command) ExecuteC() (cmd *Command, err error) { + + // Regardless of what command execute is called on, run on Root only + if c.HasParent() { + return c.Root().ExecuteC() + } + + // windows hook + if preExecHookFn != nil { + preExecHookFn(c) + } + + // initialize help as the last point possible to allow for user + // overriding + c.initHelpCmd() + + var args []string + + // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 + if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { + args = os.Args[1:] + } else { + args = c.args + } + + cmd, flags, err := c.Find(args) + if err != nil { + // If found parse to a subcommand and then failed, talk about the subcommand + if cmd != nil { + c = cmd + } + if !c.SilenceErrors { + c.Println("Error:", err.Error()) + c.Printf("Run '%v --help' for usage.\n", c.CommandPath()) + } + return c, err + } + err = cmd.execute(flags) + if err != nil { + // Always show help if requested, even if SilenceErrors is in + // effect + if err == flag.ErrHelp { + cmd.HelpFunc()(cmd, args) + return cmd, nil + } + + // If root command has SilentErrors flagged, + // all subcommands should respect it + if !cmd.SilenceErrors && !c.SilenceErrors { + c.Println("Error:", err.Error()) + } + + // If root command has SilentUsage flagged, + // all subcommands should respect it + if !cmd.SilenceUsage && !c.SilenceUsage { + c.Println(cmd.UsageString()) + } + return cmd, err + } + return cmd, nil +} + +func (c *Command) initHelpFlag() { + if c.Flags().Lookup("help") == nil { + c.Flags().BoolP("help", "h", false, "help for "+c.Name()) + } +} + +func (c *Command) initHelpCmd() { + if c.helpCommand == nil { + if !c.HasSubCommands() { + return + } + + c.helpCommand = &Command{ + Use: "help [command]", + Short: "Help about any command", + Long: `Help provides help for any command in the application. + Simply type ` + c.Name() + ` help [path to command] for full details.`, + PersistentPreRun: func(cmd *Command, args []string) {}, + PersistentPostRun: func(cmd *Command, args []string) {}, + + Run: func(c *Command, args []string) { + cmd, _, e := c.Root().Find(args) + if cmd == nil || e != nil { + c.Printf("Unknown help topic %#q.", args) + c.Root().Usage() + } else { + cmd.Help() + } + }, + } + } + c.AddCommand(c.helpCommand) +} + +// Used for testing. +func (c *Command) ResetCommands() { + c.commands = nil + c.helpCommand = nil +} + +// Sorts commands by their names. +type commandSorterByName []*Command + +func (c commandSorterByName) Len() int { return len(c) } +func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() } + +// Commands returns a sorted slice of child commands. +func (c *Command) Commands() []*Command { + // do not sort commands if it already sorted or sorting was disabled + if EnableCommandSorting && !c.commandsAreSorted { + sort.Sort(commandSorterByName(c.commands)) + c.commandsAreSorted = true + } + return c.commands +} + +// AddCommand adds one or more commands to this parent command. +func (c *Command) AddCommand(cmds ...*Command) { + for i, x := range cmds { + if cmds[i] == c { + panic("Command can't be a child of itself") + } + cmds[i].parent = c + // update max lengths + usageLen := len(x.Use) + if usageLen > c.commandsMaxUseLen { + c.commandsMaxUseLen = usageLen + } + commandPathLen := len(x.CommandPath()) + if commandPathLen > c.commandsMaxCommandPathLen { + c.commandsMaxCommandPathLen = commandPathLen + } + nameLen := len(x.Name()) + if nameLen > c.commandsMaxNameLen { + c.commandsMaxNameLen = nameLen + } + // If global normalization function exists, update all children + if c.globNormFunc != nil { + x.SetGlobalNormalizationFunc(c.globNormFunc) + } + c.commands = append(c.commands, x) + c.commandsAreSorted = false + } +} + +// RemoveCommand removes one or more commands from a parent command. +func (c *Command) RemoveCommand(cmds ...*Command) { + commands := []*Command{} +main: + for _, command := range c.commands { + for _, cmd := range cmds { + if command == cmd { + command.parent = nil + continue main + } + } + commands = append(commands, command) + } + c.commands = commands + // recompute all lengths + c.commandsMaxUseLen = 0 + c.commandsMaxCommandPathLen = 0 + c.commandsMaxNameLen = 0 + for _, command := range c.commands { + usageLen := len(command.Use) + if usageLen > c.commandsMaxUseLen { + c.commandsMaxUseLen = usageLen + } + commandPathLen := len(command.CommandPath()) + if commandPathLen > c.commandsMaxCommandPathLen { + c.commandsMaxCommandPathLen = commandPathLen + } + nameLen := len(command.Name()) + if nameLen > c.commandsMaxNameLen { + c.commandsMaxNameLen = nameLen + } + } +} + +// Print is a convenience method to Print to the defined output, fallback to Stderr if not set. +func (c *Command) Print(i ...interface{}) { + fmt.Fprint(c.OutOrStderr(), i...) +} + +// Println is a convenience method to Println to the defined output, fallback to Stderr if not set. +func (c *Command) Println(i ...interface{}) { + str := fmt.Sprintln(i...) + c.Print(str) +} + +// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set. +func (c *Command) Printf(format string, i ...interface{}) { + str := fmt.Sprintf(format, i...) + c.Print(str) +} + +// CommandPath returns the full path to this command. +func (c *Command) CommandPath() string { + str := c.Name() + x := c + for x.HasParent() { + str = x.parent.Name() + " " + str + x = x.parent + } + return str +} + +// UseLine puts out the full usage for a given command (including parents). +func (c *Command) UseLine() string { + str := "" + if c.HasParent() { + str = c.parent.CommandPath() + " " + } + return str + c.Use +} + +// For use in determining which flags have been assigned to which commands +// and which persist. +func (c *Command) DebugFlags() { + c.Println("DebugFlags called on", c.Name()) + var debugflags func(*Command) + + debugflags = func(x *Command) { + if x.HasFlags() || x.HasPersistentFlags() { + c.Println(x.Name()) + } + if x.HasFlags() { + x.flags.VisitAll(func(f *flag.Flag) { + if x.HasPersistentFlags() { + if x.persistentFlag(f.Name) == nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") + } + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") + } + }) + } + if x.HasPersistentFlags() { + x.pflags.VisitAll(func(f *flag.Flag) { + if x.HasFlags() { + if x.flags.Lookup(f.Name) == nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") + } + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") + } + }) + } + c.Println(x.flagErrorBuf) + if x.HasSubCommands() { + for _, y := range x.commands { + debugflags(y) + } + } + } + + debugflags(c) +} + +// Name returns the command's name: the first word in the use line. +func (c *Command) Name() string { + if c.name != "" { + return c.name + } + name := c.Use + i := strings.Index(name, " ") + if i >= 0 { + name = name[:i] + } + return name +} + +// HasAlias determines if a given string is an alias of the command. +func (c *Command) HasAlias(s string) bool { + for _, a := range c.Aliases { + if a == s { + return true + } + } + return false +} + +func (c *Command) NameAndAliases() string { + return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ") +} + +func (c *Command) HasExample() bool { + return len(c.Example) > 0 +} + +// Runnable determines if the command is itself runnable. +func (c *Command) Runnable() bool { + return c.Run != nil || c.RunE != nil +} + +// HasSubCommands determines if the command has children commands. +func (c *Command) HasSubCommands() bool { + return len(c.commands) > 0 +} + +// IsAvailableCommand determines if a command is available as a non-help command +// (this includes all non deprecated/hidden commands). +func (c *Command) IsAvailableCommand() bool { + if len(c.Deprecated) != 0 || c.Hidden { + return false + } + + if c.HasParent() && c.Parent().helpCommand == c { + return false + } + + if c.Runnable() || c.HasAvailableSubCommands() { + return true + } + + return false +} + +// IsHelpCommand determines if a command is a 'help' command; a help command is +// determined by the fact that it is NOT runnable/hidden/deprecated, and has no +// sub commands that are runnable/hidden/deprecated. +func (c *Command) IsHelpCommand() bool { + + // if a command is runnable, deprecated, or hidden it is not a 'help' command + if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden { + return false + } + + // if any non-help sub commands are found, the command is not a 'help' command + for _, sub := range c.commands { + if !sub.IsHelpCommand() { + return false + } + } + + // the command either has no sub commands, or no non-help sub commands + return true +} + +// HasHelpSubCommands determines if a command has any available 'help' sub commands +// that need to be shown in the usage/help default template under 'additional help +// topics'. +func (c *Command) HasHelpSubCommands() bool { + + // return true on the first found available 'help' sub command + for _, sub := range c.commands { + if sub.IsHelpCommand() { + return true + } + } + + // the command either has no sub commands, or no available 'help' sub commands + return false +} + +// HasAvailableSubCommands determines if a command has available sub commands that +// need to be shown in the usage/help default template under 'available commands'. +func (c *Command) HasAvailableSubCommands() bool { + + // return true on the first found available (non deprecated/help/hidden) + // sub command + for _, sub := range c.commands { + if sub.IsAvailableCommand() { + return true + } + } + + // the command either has no sub comamnds, or no available (non deprecated/help/hidden) + // sub commands + return false +} + +// HasParent determines if the command is a child command. +func (c *Command) HasParent() bool { + return c.parent != nil +} + +// GlobalNormalizationFunc returns the global normalization function or nil if doesn't exists. +func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { + return c.globNormFunc +} + +// Flage returns the complete FlagSet that applies +// to this command (local and persistent declared here and by all parents). +func (c *Command) Flags() *flag.FlagSet { + if c.flags == nil { + c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.flags.SetOutput(c.flagErrorBuf) + } + return c.flags +} + +// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. +func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { + persistentFlags := c.PersistentFlags() + + out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.LocalFlags().VisitAll(func(f *flag.Flag) { + if persistentFlags.Lookup(f.Name) == nil { + out.AddFlag(f) + } + }) + return out +} + +// LocalFlags returns the local FlagSet specifically set in the current command. +func (c *Command) LocalFlags() *flag.FlagSet { + c.mergePersistentFlags() + + local := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.lflags.VisitAll(func(f *flag.Flag) { + local.AddFlag(f) + }) + if !c.HasParent() { + flag.CommandLine.VisitAll(func(f *flag.Flag) { + if local.Lookup(f.Name) == nil { + local.AddFlag(f) + } + }) + } + return local +} + +// InheritedFlags returns all flags which were inherited from parents commands. +func (c *Command) InheritedFlags() *flag.FlagSet { + c.mergePersistentFlags() + + inherited := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + local := c.LocalFlags() + + var rmerge func(x *Command) + + rmerge = func(x *Command) { + if x.HasPersistentFlags() { + x.PersistentFlags().VisitAll(func(f *flag.Flag) { + if inherited.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { + inherited.AddFlag(f) + } + }) + } + if x.HasParent() { + rmerge(x.parent) + } + } + + if c.HasParent() { + rmerge(c.parent) + } + + return inherited +} + +// NonInheritedFlags returns all flags which were not inherited from parent commands. +func (c *Command) NonInheritedFlags() *flag.FlagSet { + return c.LocalFlags() +} + +// PersistentFlags returns the persistent FlagSet specifically set in the current command. +func (c *Command) PersistentFlags() *flag.FlagSet { + if c.pflags == nil { + c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.pflags.SetOutput(c.flagErrorBuf) + } + return c.pflags +} + +// ResetFlags is used in testing. +func (c *Command) ResetFlags() { + c.flagErrorBuf = new(bytes.Buffer) + c.flagErrorBuf.Reset() + c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags.SetOutput(c.flagErrorBuf) + c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags.SetOutput(c.flagErrorBuf) +} + +// Does the command contain any flags (local plus persistent from the entire structure). +func (c *Command) HasFlags() bool { + return c.Flags().HasFlags() +} + +// Does the command contain persistent flags. +func (c *Command) HasPersistentFlags() bool { + return c.PersistentFlags().HasFlags() +} + +// Does the command has flags specifically declared locally. +func (c *Command) HasLocalFlags() bool { + return c.LocalFlags().HasFlags() +} + +// Does the command have flags inherited from its parent command. +func (c *Command) HasInheritedFlags() bool { + return c.InheritedFlags().HasFlags() +} + +// Does the command contain any flags (local plus persistent from the entire +// structure) which are not hidden or deprecated. +func (c *Command) HasAvailableFlags() bool { + return c.Flags().HasAvailableFlags() +} + +// Does the command contain persistent flags which are not hidden or deprecated. +func (c *Command) HasAvailablePersistentFlags() bool { + return c.PersistentFlags().HasAvailableFlags() +} + +// Does the command has flags specifically declared locally which are not hidden +// or deprecated. +func (c *Command) HasAvailableLocalFlags() bool { + return c.LocalFlags().HasAvailableFlags() +} + +// Does the command have flags inherited from its parent command which are +// not hidden or deprecated. +func (c *Command) HasAvailableInheritedFlags() bool { + return c.InheritedFlags().HasAvailableFlags() +} + +// Flag climbs up the command tree looking for matching flag. +func (c *Command) Flag(name string) (flag *flag.Flag) { + flag = c.Flags().Lookup(name) + + if flag == nil { + flag = c.persistentFlag(name) + } + + return +} + +// Recursively find matching persistent flag. +func (c *Command) persistentFlag(name string) (flag *flag.Flag) { + if c.HasPersistentFlags() { + flag = c.PersistentFlags().Lookup(name) + } + + if flag == nil && c.HasParent() { + flag = c.parent.persistentFlag(name) + } + return +} + +// ParseFlags parses persistent flag tree and local flags. +func (c *Command) ParseFlags(args []string) (err error) { + if c.DisableFlagParsing { + return nil + } + c.mergePersistentFlags() + err = c.Flags().Parse(args) + return +} + +// Parent returns a commands parent command. +func (c *Command) Parent() *Command { + return c.parent +} + +func (c *Command) mergePersistentFlags() { + var rmerge func(x *Command) + + // Save the set of local flags + if c.lflags == nil { + c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.lflags.SetOutput(c.flagErrorBuf) + addtolocal := func(f *flag.Flag) { + c.lflags.AddFlag(f) + } + c.Flags().VisitAll(addtolocal) + c.PersistentFlags().VisitAll(addtolocal) + } + rmerge = func(x *Command) { + if !x.HasParent() { + flag.CommandLine.VisitAll(func(f *flag.Flag) { + if x.PersistentFlags().Lookup(f.Name) == nil { + x.PersistentFlags().AddFlag(f) + } + }) + } + if x.HasPersistentFlags() { + x.PersistentFlags().VisitAll(func(f *flag.Flag) { + if c.Flags().Lookup(f.Name) == nil { + c.Flags().AddFlag(f) + } + }) + } + if x.HasParent() { + rmerge(x.parent) + } + } + + rmerge(c) +} diff --git a/src/vendor/github.com/spf13/cobra/command_notwin.go b/src/vendor/github.com/spf13/cobra/command_notwin.go new file mode 100644 index 00000000..6159c1cc --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/command_notwin.go @@ -0,0 +1,5 @@ +// +build !windows + +package cobra + +var preExecHookFn func(*Command) diff --git a/src/vendor/github.com/spf13/cobra/command_test.go b/src/vendor/github.com/spf13/cobra/command_test.go new file mode 100644 index 00000000..b0c17e29 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/command_test.go @@ -0,0 +1,176 @@ +package cobra + +import ( + "os" + "reflect" + "testing" +) + +// test to ensure hidden commands run as intended +func TestHiddenCommandExecutes(t *testing.T) { + + // ensure that outs does not already equal what the command will be setting it + // to, if it did this test would not actually be testing anything... + if outs == "hidden" { + t.Errorf("outs should NOT EQUAL hidden") + } + + cmdHidden.Execute() + + // upon running the command, the value of outs should now be 'hidden' + if outs != "hidden" { + t.Errorf("Hidden command failed to run!") + } +} + +// test to ensure hidden commands do not show up in usage/help text +func TestHiddenCommandIsHidden(t *testing.T) { + if cmdHidden.IsAvailableCommand() { + t.Errorf("Hidden command found!") + } +} + +func TestStripFlags(t *testing.T) { + tests := []struct { + input []string + output []string + }{ + { + []string{"foo", "bar"}, + []string{"foo", "bar"}, + }, + { + []string{"foo", "--bar", "-b"}, + []string{"foo"}, + }, + { + []string{"-b", "foo", "--bar", "bar"}, + []string{}, + }, + { + []string{"-i10", "echo"}, + []string{"echo"}, + }, + { + []string{"-i=10", "echo"}, + []string{"echo"}, + }, + { + []string{"--int=100", "echo"}, + []string{"echo"}, + }, + { + []string{"-ib", "echo", "-bfoo", "baz"}, + []string{"echo", "baz"}, + }, + { + []string{"-i=baz", "bar", "-i", "foo", "blah"}, + []string{"bar", "blah"}, + }, + { + []string{"--int=baz", "-bbar", "-i", "foo", "blah"}, + []string{"blah"}, + }, + { + []string{"--cat", "bar", "-i", "foo", "blah"}, + []string{"bar", "blah"}, + }, + { + []string{"-c", "bar", "-i", "foo", "blah"}, + []string{"bar", "blah"}, + }, + { + []string{"--persist", "bar"}, + []string{"bar"}, + }, + { + []string{"-p", "bar"}, + []string{"bar"}, + }, + } + + cmdPrint := &Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `an utterly useless command for testing.`, + Run: func(cmd *Command, args []string) { + tp = args + }, + } + + var flagi int + var flagstr string + var flagbool bool + cmdPrint.PersistentFlags().BoolVarP(&flagbool, "persist", "p", false, "help for persistent one") + cmdPrint.Flags().IntVarP(&flagi, "int", "i", 345, "help message for flag int") + cmdPrint.Flags().StringVarP(&flagstr, "bar", "b", "bar", "help message for flag string") + cmdPrint.Flags().BoolVarP(&flagbool, "cat", "c", false, "help message for flag bool") + + for _, test := range tests { + output := stripFlags(test.input, cmdPrint) + if !reflect.DeepEqual(test.output, output) { + t.Errorf("expected: %v, got: %v", test.output, output) + } + } +} + +func Test_DisableFlagParsing(t *testing.T) { + as := []string{"-v", "-race", "-file", "foo.go"} + targs := []string{} + cmdPrint := &Command{ + DisableFlagParsing: true, + Run: func(cmd *Command, args []string) { + targs = args + }, + } + osargs := []string{"cmd"} + os.Args = append(osargs, as...) + err := cmdPrint.Execute() + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(as, targs) { + t.Errorf("expected: %v, got: %v", as, targs) + } +} + +func TestCommandsAreSorted(t *testing.T) { + EnableCommandSorting = true + + originalNames := []string{"middle", "zlast", "afirst"} + expectedNames := []string{"afirst", "middle", "zlast"} + + var tmpCommand = &Command{Use: "tmp"} + + for _, name := range originalNames { + tmpCommand.AddCommand(&Command{Use: name}) + } + + for i, c := range tmpCommand.Commands() { + if expectedNames[i] != c.Name() { + t.Errorf("expected: %s, got: %s", expectedNames[i], c.Name()) + } + } + + EnableCommandSorting = true +} + +func TestEnableCommandSortingIsDisabled(t *testing.T) { + EnableCommandSorting = false + + originalNames := []string{"middle", "zlast", "afirst"} + + var tmpCommand = &Command{Use: "tmp"} + + for _, name := range originalNames { + tmpCommand.AddCommand(&Command{Use: name}) + } + + for i, c := range tmpCommand.Commands() { + if originalNames[i] != c.Name() { + t.Errorf("expected: %s, got: %s", originalNames[i], c.Name()) + } + } + + EnableCommandSorting = true +} diff --git a/src/vendor/github.com/spf13/cobra/command_win.go b/src/vendor/github.com/spf13/cobra/command_win.go new file mode 100644 index 00000000..4b0eaa1b --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/command_win.go @@ -0,0 +1,26 @@ +// +build windows + +package cobra + +import ( + "os" + "time" + + "github.com/inconshreveable/mousetrap" +) + +var preExecHookFn = preExecHook + +// enables an information splash screen on Windows if the CLI is started from explorer.exe. +var MousetrapHelpText string = `This is a command line tool + +You need to open cmd.exe and run it from there. +` + +func preExecHook(c *Command) { + if mousetrap.StartedByExplorer() { + c.Print(MousetrapHelpText) + time.Sleep(5 * time.Second) + os.Exit(1) + } +} diff --git a/src/vendor/github.com/spf13/cobra/doc/cmd_test.go b/src/vendor/github.com/spf13/cobra/doc/cmd_test.go new file mode 100644 index 00000000..a4b5568f --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/doc/cmd_test.go @@ -0,0 +1,145 @@ +package doc + +import ( + "bytes" + "fmt" + "runtime" + "strings" + "testing" + + "github.com/spf13/cobra" +) + +var flagb1, flagb2, flagb3, flagbr, flagbp bool +var flags1, flags2a, flags2b, flags3 string +var flagi1, flagi2, flagi3, flagir int + +const strtwoParentHelp = "help message for parent flag strtwo" +const strtwoChildHelp = "help message for child flag strtwo" + +var cmdEcho = &cobra.Command{ + Use: "echo [string to echo]", + Aliases: []string{"say"}, + Short: "Echo anything to the screen", + Long: `an utterly useless command for testing.`, + Example: "Just run cobra-test echo", +} + +var cmdEchoSub = &cobra.Command{ + Use: "echosub [string to print]", + Short: "second sub command for echo", + Long: `an absolutely utterly useless command for testing gendocs!.`, + Run: func(cmd *cobra.Command, args []string) {}, +} + +var cmdDeprecated = &cobra.Command{ + Use: "deprecated [can't do anything here]", + Short: "A command which is deprecated", + Long: `an absolutely utterly useless command for testing deprecation!.`, + Deprecated: "Please use echo instead", +} + +var cmdTimes = &cobra.Command{ + Use: "times [# times] [string to echo]", + SuggestFor: []string{"counts"}, + Short: "Echo anything to the screen more times", + Long: `a slightly useless command for testing.`, + PersistentPreRun: func(cmd *cobra.Command, args []string) {}, + Run: func(cmd *cobra.Command, args []string) {}, +} + +var cmdPrint = &cobra.Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `an absolutely utterly useless command for testing.`, +} + +var cmdRootNoRun = &cobra.Command{ + Use: "cobra-test", + Short: "The root can run its own function", + Long: "The root description for help", +} + +var cmdRootSameName = &cobra.Command{ + Use: "print", + Short: "Root with the same name as a subcommand", + Long: "The root description for help", +} + +var cmdRootWithRun = &cobra.Command{ + Use: "cobra-test", + Short: "The root can run its own function", + Long: "The root description for help", +} + +var cmdSubNoRun = &cobra.Command{ + Use: "subnorun", + Short: "A subcommand without a Run function", + Long: "A long output about a subcommand without a Run function", +} + +var cmdVersion1 = &cobra.Command{ + Use: "version", + Short: "Print the version number", + Long: `First version of the version command`, +} + +var cmdVersion2 = &cobra.Command{ + Use: "version", + Short: "Print the version number", + Long: `Second version of the version command`, +} + +func flagInit() { + cmdEcho.ResetFlags() + cmdPrint.ResetFlags() + cmdTimes.ResetFlags() + cmdRootNoRun.ResetFlags() + cmdRootSameName.ResetFlags() + cmdRootWithRun.ResetFlags() + cmdSubNoRun.ResetFlags() + cmdRootNoRun.PersistentFlags().StringVarP(&flags2a, "strtwo", "t", "two", strtwoParentHelp) + cmdEcho.Flags().IntVarP(&flagi1, "intone", "i", 123, "help message for flag intone") + cmdTimes.Flags().IntVarP(&flagi2, "inttwo", "j", 234, "help message for flag inttwo") + cmdPrint.Flags().IntVarP(&flagi3, "intthree", "i", 345, "help message for flag intthree") + cmdEcho.PersistentFlags().StringVarP(&flags1, "strone", "s", "one", "help message for flag strone") + cmdEcho.PersistentFlags().BoolVarP(&flagbp, "persistentbool", "p", false, "help message for flag persistentbool") + cmdTimes.PersistentFlags().StringVarP(&flags2b, "strtwo", "t", "2", strtwoChildHelp) + cmdPrint.PersistentFlags().StringVarP(&flags3, "strthree", "s", "three", "help message for flag strthree") + cmdEcho.Flags().BoolVarP(&flagb1, "boolone", "b", true, "help message for flag boolone") + cmdTimes.Flags().BoolVarP(&flagb2, "booltwo", "c", false, "help message for flag booltwo") + cmdPrint.Flags().BoolVarP(&flagb3, "boolthree", "b", true, "help message for flag boolthree") + cmdVersion1.ResetFlags() + cmdVersion2.ResetFlags() +} + +func initializeWithRootCmd() *cobra.Command { + cmdRootWithRun.ResetCommands() + flagInit() + cmdRootWithRun.Flags().BoolVarP(&flagbr, "boolroot", "b", false, "help message for flag boolroot") + cmdRootWithRun.Flags().IntVarP(&flagir, "introot", "i", 321, "help message for flag introot") + return cmdRootWithRun +} + +func checkStringContains(t *testing.T, found, expected string) { + if !strings.Contains(found, expected) { + logErr(t, found, expected) + } +} + +func checkStringOmits(t *testing.T, found, expected string) { + if strings.Contains(found, expected) { + logErr(t, found, expected) + } +} + +func logErr(t *testing.T, found, expected string) { + out := new(bytes.Buffer) + + _, _, line, ok := runtime.Caller(2) + if ok { + fmt.Fprintf(out, "Line: %d ", line) + } + fmt.Fprintf(out, "Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) + t.Errorf(out.String()) +} diff --git a/src/vendor/github.com/spf13/cobra/doc/man_docs.go b/src/vendor/github.com/spf13/cobra/doc/man_docs.go new file mode 100644 index 00000000..5798d0fb --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/doc/man_docs.go @@ -0,0 +1,231 @@ +// Copyright 2015 Red Hat Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package doc + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "time" + + mangen "github.com/cpuguy83/go-md2man/md2man" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +// GenManTree will generate a man page for this command and all descendants +// in the directory given. The header may be nil. This function may not work +// correctly if your command names have - in them. If you have `cmd` with two +// subcmds, `sub` and `sub-third`. And `sub` has a subcommand called `third` +// it is undefined which help output will be in the file `cmd-sub-third.1`. +func GenManTree(cmd *cobra.Command, header *GenManHeader, dir string) error { + return GenManTreeFromOpts(cmd, GenManTreeOptions{ + Header: header, + Path: dir, + CommandSeparator: "_", + }) +} + +// GenManTreeFromOpts generates a man page for the command and all descendants. +// The pages are written to the opts.Path directory. +func GenManTreeFromOpts(cmd *cobra.Command, opts GenManTreeOptions) error { + header := opts.Header + if header == nil { + header = &GenManHeader{} + } + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c.IsHelpCommand() { + continue + } + if err := GenManTreeFromOpts(c, opts); err != nil { + return err + } + } + section := "1" + if header.Section != "" { + section = header.Section + } + + separator := "_" + if opts.CommandSeparator != "" { + separator = opts.CommandSeparator + } + basename := strings.Replace(cmd.CommandPath(), " ", separator, -1) + filename := filepath.Join(opts.Path, basename+"."+section) + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + headerCopy := *header + return GenMan(cmd, &headerCopy, f) +} + +type GenManTreeOptions struct { + Header *GenManHeader + Path string + CommandSeparator string +} + +// GenManHeader is a lot like the .TH header at the start of man pages. These +// include the title, section, date, source, and manual. We will use the +// current time if Date if unset and will use "Auto generated by spf13/cobra" +// if the Source is unset. +type GenManHeader struct { + Title string + Section string + Date *time.Time + date string + Source string + Manual string +} + +// GenMan will generate a man page for the given command and write it to +// w. The header argument may be nil, however obviously w may not. +func GenMan(cmd *cobra.Command, header *GenManHeader, w io.Writer) error { + if header == nil { + header = &GenManHeader{} + } + fillHeader(header, cmd.CommandPath()) + + b := genMan(cmd, header) + _, err := w.Write(mangen.Render(b)) + return err +} + +func fillHeader(header *GenManHeader, name string) { + if header.Title == "" { + header.Title = strings.ToUpper(strings.Replace(name, " ", "\\-", -1)) + } + if header.Section == "" { + header.Section = "1" + } + if header.Date == nil { + now := time.Now() + header.Date = &now + } + header.date = (*header.Date).Format("Jan 2006") + if header.Source == "" { + header.Source = "Auto generated by spf13/cobra" + } +} + +func manPreamble(out io.Writer, header *GenManHeader, cmd *cobra.Command, dashedName string) { + description := cmd.Long + if len(description) == 0 { + description = cmd.Short + } + + fmt.Fprintf(out, `%% %s(%s)%s +%% %s +%% %s +# NAME +`, header.Title, header.Section, header.date, header.Source, header.Manual) + fmt.Fprintf(out, "%s \\- %s\n\n", dashedName, cmd.Short) + fmt.Fprintf(out, "# SYNOPSIS\n") + fmt.Fprintf(out, "**%s**\n\n", cmd.UseLine()) + fmt.Fprintf(out, "# DESCRIPTION\n") + fmt.Fprintf(out, "%s\n\n", description) +} + +func manPrintFlags(out io.Writer, flags *pflag.FlagSet) { + flags.VisitAll(func(flag *pflag.Flag) { + if len(flag.Deprecated) > 0 || flag.Hidden { + return + } + format := "" + if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 { + format = fmt.Sprintf("**-%s**, **--%s**", flag.Shorthand, flag.Name) + } else { + format = fmt.Sprintf("**--%s**", flag.Name) + } + if len(flag.NoOptDefVal) > 0 { + format = format + "[" + } + if flag.Value.Type() == "string" { + // put quotes on the value + format = format + "=%q" + } else { + format = format + "=%s" + } + if len(flag.NoOptDefVal) > 0 { + format = format + "]" + } + format = format + "\n\t%s\n\n" + fmt.Fprintf(out, format, flag.DefValue, flag.Usage) + }) +} + +func manPrintOptions(out io.Writer, command *cobra.Command) { + flags := command.NonInheritedFlags() + if flags.HasFlags() { + fmt.Fprintf(out, "# OPTIONS\n") + manPrintFlags(out, flags) + fmt.Fprintf(out, "\n") + } + flags = command.InheritedFlags() + if flags.HasFlags() { + fmt.Fprintf(out, "# OPTIONS INHERITED FROM PARENT COMMANDS\n") + manPrintFlags(out, flags) + fmt.Fprintf(out, "\n") + } +} + +func genMan(cmd *cobra.Command, header *GenManHeader) []byte { + // something like `rootcmd-subcmd1-subcmd2` + dashCommandName := strings.Replace(cmd.CommandPath(), " ", "-", -1) + + buf := new(bytes.Buffer) + + manPreamble(buf, header, cmd, dashCommandName) + manPrintOptions(buf, cmd) + if len(cmd.Example) > 0 { + fmt.Fprintf(buf, "# EXAMPLE\n") + fmt.Fprintf(buf, "```\n%s\n```\n", cmd.Example) + } + if hasSeeAlso(cmd) { + fmt.Fprintf(buf, "# SEE ALSO\n") + seealsos := make([]string, 0) + if cmd.HasParent() { + parentPath := cmd.Parent().CommandPath() + dashParentPath := strings.Replace(parentPath, " ", "-", -1) + seealso := fmt.Sprintf("**%s(%s)**", dashParentPath, header.Section) + seealsos = append(seealsos, seealso) + cmd.VisitParents(func(c *cobra.Command) { + if c.DisableAutoGenTag { + cmd.DisableAutoGenTag = c.DisableAutoGenTag + } + }) + } + children := cmd.Commands() + sort.Sort(byName(children)) + for _, c := range children { + if !c.IsAvailableCommand() || c.IsHelpCommand() { + continue + } + seealso := fmt.Sprintf("**%s-%s(%s)**", dashCommandName, c.Name(), header.Section) + seealsos = append(seealsos, seealso) + } + fmt.Fprintf(buf, "%s\n", strings.Join(seealsos, ", ")) + } + if !cmd.DisableAutoGenTag { + fmt.Fprintf(buf, "# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006")) + } + return buf.Bytes() +} diff --git a/src/vendor/github.com/spf13/cobra/doc/man_docs.md b/src/vendor/github.com/spf13/cobra/doc/man_docs.md new file mode 100644 index 00000000..5fe957a3 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/doc/man_docs.md @@ -0,0 +1,26 @@ +# Generating Man Pages For Your Own cobra.Command + +Generating man pages from a cobra command is incredibly easy. An example is as follows: + +```go +package main + +import ( + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +func main() { + cmd := &cobra.Command{ + Use: "test", + Short: "my test program", + } + header := &cobra.GenManHeader{ + Title: "MINE", + Section: "3", + } + doc.GenManTree(cmd, header, "/tmp") +} +``` + +That will get you a man page `/tmp/test.1` diff --git a/src/vendor/github.com/spf13/cobra/doc/man_docs_test.go b/src/vendor/github.com/spf13/cobra/doc/man_docs_test.go new file mode 100644 index 00000000..26b8fcc6 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/doc/man_docs_test.go @@ -0,0 +1,202 @@ +package doc + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/spf13/cobra" +) + +var _ = fmt.Println +var _ = os.Stderr + +func translate(in string) string { + return strings.Replace(in, "-", "\\-", -1) +} + +func TestGenManDoc(t *testing.T) { + c := initializeWithRootCmd() + // Need two commands to run the command alphabetical sort + cmdEcho.AddCommand(cmdTimes, cmdEchoSub, cmdDeprecated) + c.AddCommand(cmdPrint, cmdEcho) + cmdRootWithRun.PersistentFlags().StringVarP(&flags2a, "rootflag", "r", "two", strtwoParentHelp) + + out := new(bytes.Buffer) + + header := &GenManHeader{ + Title: "Project", + Section: "2", + } + // We generate on a subcommand so we have both subcommands and parents + if err := GenMan(cmdEcho, header, out); err != nil { + t.Fatal(err) + } + found := out.String() + + // Make sure parent has - in CommandPath() in SEE ALSO: + parentPath := cmdEcho.Parent().CommandPath() + dashParentPath := strings.Replace(parentPath, " ", "-", -1) + expected := translate(dashParentPath) + expected = expected + "(" + header.Section + ")" + checkStringContains(t, found, expected) + + // Our description + expected = translate(cmdEcho.Name()) + checkStringContains(t, found, expected) + + // Better have our example + expected = translate(cmdEcho.Name()) + checkStringContains(t, found, expected) + + // A local flag + expected = "boolone" + checkStringContains(t, found, expected) + + // persistent flag on parent + expected = "rootflag" + checkStringContains(t, found, expected) + + // We better output info about our parent + expected = translate(cmdRootWithRun.Name()) + checkStringContains(t, found, expected) + + // And about subcommands + expected = translate(cmdEchoSub.Name()) + checkStringContains(t, found, expected) + + unexpected := translate(cmdDeprecated.Name()) + checkStringOmits(t, found, unexpected) + + // auto generated + expected = translate("Auto generated") + checkStringContains(t, found, expected) +} + +func TestGenManNoGenTag(t *testing.T) { + c := initializeWithRootCmd() + // Need two commands to run the command alphabetical sort + cmdEcho.AddCommand(cmdTimes, cmdEchoSub, cmdDeprecated) + c.AddCommand(cmdPrint, cmdEcho) + cmdRootWithRun.PersistentFlags().StringVarP(&flags2a, "rootflag", "r", "two", strtwoParentHelp) + cmdEcho.DisableAutoGenTag = true + out := new(bytes.Buffer) + + header := &GenManHeader{ + Title: "Project", + Section: "2", + } + // We generate on a subcommand so we have both subcommands and parents + if err := GenMan(cmdEcho, header, out); err != nil { + t.Fatal(err) + } + found := out.String() + + unexpected := translate("#HISTORY") + checkStringOmits(t, found, unexpected) +} + +func TestGenManSeeAlso(t *testing.T) { + noop := func(cmd *cobra.Command, args []string) {} + + top := &cobra.Command{Use: "top", Run: noop} + aaa := &cobra.Command{Use: "aaa", Run: noop, Hidden: true} // #229 + bbb := &cobra.Command{Use: "bbb", Run: noop} + ccc := &cobra.Command{Use: "ccc", Run: noop} + top.AddCommand(aaa, bbb, ccc) + + out := new(bytes.Buffer) + header := &GenManHeader{} + if err := GenMan(top, header, out); err != nil { + t.Fatal(err) + } + + scanner := bufio.NewScanner(out) + + if err := AssertLineFound(scanner, ".SH SEE ALSO"); err != nil { + t.Fatal(fmt.Errorf("Couldn't find SEE ALSO section header: %s", err.Error())) + } + + if err := AssertNextLineEquals(scanner, ".PP"); err != nil { + t.Fatal(fmt.Errorf("First line after SEE ALSO wasn't break-indent: %s", err.Error())) + } + + if err := AssertNextLineEquals(scanner, `\fBtop\-bbb(1)\fP, \fBtop\-ccc(1)\fP`); err != nil { + t.Fatal(fmt.Errorf("Second line after SEE ALSO wasn't correct: %s", err.Error())) + } +} + +func TestManPrintFlagsHidesShortDeperecated(t *testing.T) { + cmd := &cobra.Command{} + flags := cmd.Flags() + flags.StringP("foo", "f", "default", "Foo flag") + flags.MarkShorthandDeprecated("foo", "don't use it no more") + + out := new(bytes.Buffer) + manPrintFlags(out, flags) + + expected := "**--foo**=\"default\"\n\tFoo flag\n\n" + if out.String() != expected { + t.Fatalf("Expected %s, but got %s", expected, out.String()) + } +} + +func TestGenManTree(t *testing.T) { + cmd := &cobra.Command{ + Use: "do [OPTIONS] arg1 arg2", + } + header := &GenManHeader{Section: "2"} + tmpdir, err := ioutil.TempDir("", "test-gen-man-tree") + if err != nil { + t.Fatalf("Failed to create tempdir: %s", err.Error()) + } + defer os.RemoveAll(tmpdir) + + if err := GenManTree(cmd, header, tmpdir); err != nil { + t.Fatalf("GenManTree failed: %s", err.Error()) + } + + if _, err := os.Stat(filepath.Join(tmpdir, "do.2")); err != nil { + t.Fatalf("Expected file 'do.2' to exist") + } + + if header.Title != "" { + t.Fatalf("Expected header.Title to be unmodified") + } +} + +func AssertLineFound(scanner *bufio.Scanner, expectedLine string) error { + for scanner.Scan() { + line := scanner.Text() + if line == expectedLine { + return nil + } + } + + if err := scanner.Err(); err != nil { + return fmt.Errorf("AssertLineFound: scan failed: %s", err.Error()) + } + + return fmt.Errorf("AssertLineFound: hit EOF before finding %#v", expectedLine) +} + +func AssertNextLineEquals(scanner *bufio.Scanner, expectedLine string) error { + if scanner.Scan() { + line := scanner.Text() + if line == expectedLine { + return nil + } + return fmt.Errorf("AssertNextLineEquals: got %#v, not %#v", line, expectedLine) + } + + if err := scanner.Err(); err != nil { + return fmt.Errorf("AssertNextLineEquals: scan failed: %s", err.Error()) + } + + return fmt.Errorf("AssertNextLineEquals: hit EOF before finding %#v", expectedLine) +} diff --git a/src/vendor/github.com/spf13/cobra/doc/man_examples_test.go b/src/vendor/github.com/spf13/cobra/doc/man_examples_test.go new file mode 100644 index 00000000..3593853b --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/doc/man_examples_test.go @@ -0,0 +1,35 @@ +package doc_test + +import ( + "bytes" + "fmt" + + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +func ExampleCommand_GenManTree() { + cmd := &cobra.Command{ + Use: "test", + Short: "my test program", + } + header := &doc.GenManHeader{ + Title: "MINE", + Section: "3", + } + doc.GenManTree(cmd, header, "/tmp") +} + +func ExampleCommand_GenMan() { + cmd := &cobra.Command{ + Use: "test", + Short: "my test program", + } + header := &doc.GenManHeader{ + Title: "MINE", + Section: "3", + } + out := new(bytes.Buffer) + doc.GenMan(cmd, header, out) + fmt.Print(out.String()) +} diff --git a/src/vendor/github.com/spf13/cobra/doc/md_docs.go b/src/vendor/github.com/spf13/cobra/doc/md_docs.go new file mode 100644 index 00000000..fa136318 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/doc/md_docs.go @@ -0,0 +1,175 @@ +//Copyright 2015 Red Hat Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package doc + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/spf13/cobra" +) + +func printOptions(w io.Writer, cmd *cobra.Command, name string) error { + flags := cmd.NonInheritedFlags() + flags.SetOutput(w) + if flags.HasFlags() { + if _, err := fmt.Fprintf(w, "### Options\n\n```\n"); err != nil { + return err + } + flags.PrintDefaults() + if _, err := fmt.Fprintf(w, "```\n\n"); err != nil { + return err + } + } + + parentFlags := cmd.InheritedFlags() + parentFlags.SetOutput(w) + if parentFlags.HasFlags() { + if _, err := fmt.Fprintf(w, "### Options inherited from parent commands\n\n```\n"); err != nil { + return err + } + parentFlags.PrintDefaults() + if _, err := fmt.Fprintf(w, "```\n\n"); err != nil { + return err + } + } + return nil +} + +func GenMarkdown(cmd *cobra.Command, w io.Writer) error { + return GenMarkdownCustom(cmd, w, func(s string) string { return s }) +} + +func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error { + name := cmd.CommandPath() + + short := cmd.Short + long := cmd.Long + if len(long) == 0 { + long = short + } + + if _, err := fmt.Fprintf(w, "## %s\n\n", name); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "%s\n\n", short); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "### Synopsis\n\n"); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "\n%s\n\n", long); err != nil { + return err + } + + if cmd.Runnable() { + if _, err := fmt.Fprintf(w, "```\n%s\n```\n\n", cmd.UseLine()); err != nil { + return err + } + } + + if len(cmd.Example) > 0 { + if _, err := fmt.Fprintf(w, "### Examples\n\n"); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "```\n%s\n```\n\n", cmd.Example); err != nil { + return err + } + } + + if err := printOptions(w, cmd, name); err != nil { + return err + } + if hasSeeAlso(cmd) { + if _, err := fmt.Fprintf(w, "### SEE ALSO\n"); err != nil { + return err + } + if cmd.HasParent() { + parent := cmd.Parent() + pname := parent.CommandPath() + link := pname + ".md" + link = strings.Replace(link, " ", "_", -1) + if _, err := fmt.Fprintf(w, "* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short); err != nil { + return err + } + cmd.VisitParents(func(c *cobra.Command) { + if c.DisableAutoGenTag { + cmd.DisableAutoGenTag = c.DisableAutoGenTag + } + }) + } + + children := cmd.Commands() + sort.Sort(byName(children)) + + for _, child := range children { + if !child.IsAvailableCommand() || child.IsHelpCommand() { + continue + } + cname := name + " " + child.Name() + link := cname + ".md" + link = strings.Replace(link, " ", "_", -1) + if _, err := fmt.Fprintf(w, "* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short); err != nil { + return err + } + } + if _, err := fmt.Fprintf(w, "\n"); err != nil { + return err + } + } + if !cmd.DisableAutoGenTag { + if _, err := fmt.Fprintf(w, "###### Auto generated by spf13/cobra on %s\n", time.Now().Format("2-Jan-2006")); err != nil { + return err + } + } + return nil +} + +func GenMarkdownTree(cmd *cobra.Command, dir string) error { + identity := func(s string) string { return s } + emptyStr := func(s string) string { return "" } + return GenMarkdownTreeCustom(cmd, dir, emptyStr, identity) +} + +func GenMarkdownTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error { + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c.IsHelpCommand() { + continue + } + if err := GenMarkdownTreeCustom(c, dir, filePrepender, linkHandler); err != nil { + return err + } + } + + basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".md" + filename := filepath.Join(dir, basename) + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + if _, err := io.WriteString(f, filePrepender(filename)); err != nil { + return err + } + if err := GenMarkdownCustom(cmd, f, linkHandler); err != nil { + return err + } + return nil +} diff --git a/src/vendor/github.com/spf13/cobra/doc/md_docs.md b/src/vendor/github.com/spf13/cobra/doc/md_docs.md new file mode 100644 index 00000000..0c3b96e2 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/doc/md_docs.md @@ -0,0 +1,104 @@ +# Generating Markdown Docs For Your Own cobra.Command + +Generating man pages from a cobra command is incredibly easy. An example is as follows: + +```go +package main + +import ( + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +func main() { + cmd := &cobra.Command{ + Use: "test", + Short: "my test program", + } + doc.GenMarkdownTree(cmd, "/tmp") +} +``` + +That will get you a Markdown document `/tmp/test.md` + +## Generate markdown docs for the entire command tree + +This program can actually generate docs for the kubectl command in the kubernetes project + +```go +package main + +import ( + "io/ioutil" + "os" + + kubectlcmd "k8s.io/kubernetes/pkg/kubectl/cmd" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + + "github.com/spf13/cobra/doc" +) + +func main() { + cmd := kubectlcmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) + doc.GenMarkdownTree(cmd, "./") +} +``` + +This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") + +## Generate markdown docs for a single command + +You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenMarkdown` instead of `GenMarkdownTree` + +```go + out := new(bytes.Buffer) + doc.GenMarkdown(cmd, out) +``` + +This will write the markdown doc for ONLY "cmd" into the out, buffer. + +## Customize the output + +Both `GenMarkdown` and `GenMarkdownTree` have alternate versions with callbacks to get some control of the output: + +```go +func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender, linkHandler func(string) string) error { + //... +} +``` + +```go +func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) error { + //... +} +``` + +The `filePrepender` will prepend the return value given the full filepath to the rendered Markdown file. A common use case is to add front matter to use the generated documentation with [Hugo](http://gohugo.io/): + +```go +const fmTemplate = `--- +date: %s +title: "%s" +slug: %s +url: %s +--- +` + +filePrepender := func(filename string) string { + now := time.Now().Format(time.RFC3339) + name := filepath.Base(filename) + base := strings.TrimSuffix(name, path.Ext(name)) + url := "/commands/" + strings.ToLower(base) + "/" + return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) +} +``` + +The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename: + +```go +linkHandler := func(name string) string { + base := strings.TrimSuffix(name, path.Ext(name)) + return "/commands/" + strings.ToLower(base) + "/" +} +``` + diff --git a/src/vendor/github.com/spf13/cobra/doc/md_docs_test.go b/src/vendor/github.com/spf13/cobra/doc/md_docs_test.go new file mode 100644 index 00000000..86ee0293 --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/doc/md_docs_test.go @@ -0,0 +1,88 @@ +package doc + +import ( + "bytes" + "fmt" + "os" + "strings" + "testing" +) + +var _ = fmt.Println +var _ = os.Stderr + +func TestGenMdDoc(t *testing.T) { + c := initializeWithRootCmd() + // Need two commands to run the command alphabetical sort + cmdEcho.AddCommand(cmdTimes, cmdEchoSub, cmdDeprecated) + c.AddCommand(cmdPrint, cmdEcho) + cmdRootWithRun.PersistentFlags().StringVarP(&flags2a, "rootflag", "r", "two", strtwoParentHelp) + + out := new(bytes.Buffer) + + // We generate on s subcommand so we have both subcommands and parents + if err := GenMarkdown(cmdEcho, out); err != nil { + t.Fatal(err) + } + found := out.String() + + // Our description + expected := cmdEcho.Long + if !strings.Contains(found, expected) { + t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) + } + + // Better have our example + expected = cmdEcho.Example + if !strings.Contains(found, expected) { + t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) + } + + // A local flag + expected = "boolone" + if !strings.Contains(found, expected) { + t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) + } + + // persistent flag on parent + expected = "rootflag" + if !strings.Contains(found, expected) { + t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) + } + + // We better output info about our parent + expected = cmdRootWithRun.Short + if !strings.Contains(found, expected) { + t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) + } + + // And about subcommands + expected = cmdEchoSub.Short + if !strings.Contains(found, expected) { + t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) + } + + unexpected := cmdDeprecated.Short + if strings.Contains(found, unexpected) { + t.Errorf("Unexpected response.\nFound: %v\nBut should not have!!\n", unexpected) + } +} + +func TestGenMdNoTag(t *testing.T) { + c := initializeWithRootCmd() + // Need two commands to run the command alphabetical sort + cmdEcho.AddCommand(cmdTimes, cmdEchoSub, cmdDeprecated) + c.AddCommand(cmdPrint, cmdEcho) + c.DisableAutoGenTag = true + cmdRootWithRun.PersistentFlags().StringVarP(&flags2a, "rootflag", "r", "two", strtwoParentHelp) + out := new(bytes.Buffer) + + if err := GenMarkdown(c, out); err != nil { + t.Fatal(err) + } + found := out.String() + + unexpected := "Auto generated" + checkStringOmits(t, found, unexpected) + +} diff --git a/src/vendor/github.com/spf13/cobra/doc/util.go b/src/vendor/github.com/spf13/cobra/doc/util.go new file mode 100644 index 00000000..a1c6b89b --- /dev/null +++ b/src/vendor/github.com/spf13/cobra/doc/util.go @@ -0,0 +1,38 @@ +// Copyright 2015 Red Hat Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package doc + +import "github.com/spf13/cobra" + +// Test to see if we have a reason to print See Also information in docs +// Basically this is a test for a parent commend or a subcommand which is +// both not deprecated and not the autogenerated help command. +func hasSeeAlso(cmd *cobra.Command) bool { + if cmd.HasParent() { + return true + } + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c.IsHelpCommand() { + continue + } + return true + } + return false +} + +type byName []*cobra.Command + +func (s byName) Len() int { return len(s) } +func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } diff --git a/src/vendor/github.com/spf13/pflag/.travis.yml b/src/vendor/github.com/spf13/pflag/.travis.yml new file mode 100644 index 00000000..0a7c1362 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/.travis.yml @@ -0,0 +1,21 @@ +sudo: false + +language: go + +go: + - 1.5.4 + - 1.6.3 + - 1.7 + - tip + +matrix: + allow_failures: + - go: tip +install: + - go get github.com/golang/lint/golint + - export PATH=$GOPATH/bin:$PATH + - go install ./... + +script: + - verify/all.sh -v + - go test ./... diff --git a/src/vendor/github.com/spf13/pflag/LICENSE b/src/vendor/github.com/spf13/pflag/LICENSE new file mode 100644 index 00000000..63ed1cfe --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Alex Ogier. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/vendor/github.com/spf13/pflag/README.md b/src/vendor/github.com/spf13/pflag/README.md new file mode 100644 index 00000000..08ad9456 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/README.md @@ -0,0 +1,275 @@ +[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) + +## Description + +pflag is a drop-in replacement for Go's flag package, implementing +POSIX/GNU-style --flags. + +pflag is compatible with the [GNU extensions to the POSIX recommendations +for command-line options][1]. For a more precise description, see the +"Command-line flag syntax" section below. + +[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html + +pflag is available under the same style of BSD license as the Go language, +which can be found in the LICENSE file. + +## Installation + +pflag is available using the standard `go get` command. + +Install by running: + + go get github.com/spf13/pflag + +Run tests by running: + + go test github.com/spf13/pflag + +## Usage + +pflag is a drop-in replacement of Go's native flag package. If you import +pflag under the name "flag" then all code should continue to function +with no changes. + +``` go +import flag "github.com/spf13/pflag" +``` + +There is one exception to this: if you directly instantiate the Flag struct +there is one more field "Shorthand" that you will need to set. +Most code never instantiates this struct directly, and instead uses +functions such as String(), BoolVar(), and Var(), and is therefore +unaffected. + +Define flags using flag.String(), Bool(), Int(), etc. + +This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + +``` go +var ip *int = flag.Int("flagname", 1234, "help message for flagname") +``` + +If you like, you can bind the flag to a variable using the Var() functions. + +``` go +var flagvar int +func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") +} +``` + +Or you can create custom flags that satisfy the Value interface (with +pointer receivers) and couple them to flag parsing by + +``` go +flag.Var(&flagVal, "name", "help message for flagname") +``` + +For such flags, the default value is just the initial value of the variable. + +After all flags are defined, call + +``` go +flag.Parse() +``` + +to parse the command line into the defined flags. + +Flags may then be used directly. If you're using the flags themselves, +they are all pointers; if you bind to variables, they're values. + +``` go +fmt.Println("ip has value ", *ip) +fmt.Println("flagvar has value ", flagvar) +``` + +There are helpers function to get values later if you have the FlagSet but +it was difficult to keep up with all of the flag pointers in your code. +If you have a pflag.FlagSet with a flag called 'flagname' of type int you +can use GetInt() to get the int value. But notice that 'flagname' must exist +and it must be an int. GetString("flagname") will fail. + +``` go +i, err := flagset.GetInt("flagname") +``` + +After parsing, the arguments after the flag are available as the +slice flag.Args() or individually as flag.Arg(i). +The arguments are indexed from 0 through flag.NArg()-1. + +The pflag package also defines some new functions that are not in flag, +that give one-letter shorthands for flags. You can use these by appending +'P' to the name of any function that defines a flag. + +``` go +var ip = flag.IntP("flagname", "f", 1234, "help message") +var flagvar bool +func init() { + flag.BoolVarP("boolname", "b", true, "help message") +} +flag.VarP(&flagVar, "varname", "v", 1234, "help message") +``` + +Shorthand letters can be used with single dashes on the command line. +Boolean shorthand flags can be combined with other shorthand flags. + +The default set of command-line flags is controlled by +top-level functions. The FlagSet type allows one to define +independent sets of flags, such as to implement subcommands +in a command-line interface. The methods of FlagSet are +analogous to the top-level functions for the command-line +flag set. + +## Setting no option default values for flags + +After you create a flag it is possible to set the pflag.NoOptDefVal for +the given flag. Doing this changes the meaning of the flag slightly. If +a flag has a NoOptDefVal and the flag is set on the command line without +an option the flag will be set to the NoOptDefVal. For example given: + +``` go +var ip = flag.IntP("flagname", "f", 1234, "help message") +flag.Lookup("flagname").NoOptDefVal = "4321" +``` + +Would result in something like + +| Parsed Arguments | Resulting Value | +| ------------- | ------------- | +| --flagname=1357 | ip=1357 | +| --flagname | ip=4321 | +| [nothing] | ip=1234 | + +## Command line flag syntax + +``` +--flag // boolean flags, or flags with no option default values +--flag x // only on flags without a default value +--flag=x +``` + +Unlike the flag package, a single dash before an option means something +different than a double dash. Single dashes signify a series of shorthand +letters for flags. All but the last shorthand letter must be boolean flags +or a flag with a default value + +``` +// boolean or flags where the 'no option default value' is set +-f +-f=true +-abc +but +-b true is INVALID + +// non-boolean and flags without a 'no option default value' +-n 1234 +-n=1234 +-n1234 + +// mixed +-abcs "hello" +-absd="hello" +-abcs1234 +``` + +Flag parsing stops after the terminator "--". Unlike the flag package, +flags can be interspersed with arguments anywhere on the command line +before this terminator. + +Integer flags accept 1234, 0664, 0x1234 and may be negative. +Boolean flags (in their long form) accept 1, 0, t, f, true, false, +TRUE, FALSE, True, False. +Duration flags accept any input valid for time.ParseDuration. + +## Mutating or "Normalizing" Flag names + +It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. + +**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag + +``` go +func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + from := []string{"-", "_"} + to := "." + for _, sep := range from { + name = strings.Replace(name, sep, to, -1) + } + return pflag.NormalizedName(name) +} + +myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) +``` + +**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name + +``` go +func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + switch name { + case "old-flag-name": + name = "new-flag-name" + break + } + return pflag.NormalizedName(name) +} + +myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) +``` + +## Deprecating a flag or its shorthand +It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used. + +**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead. +```go +// deprecate a flag by specifying its name and a usage message +flags.MarkDeprecated("badflag", "please use --good-flag instead") +``` +This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used. + +**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n". +```go +// deprecate a flag shorthand by specifying its flag name and a usage message +flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only") +``` +This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used. + +Note that usage message is essential here, and it should not be empty. + +## Hidden flags +It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text. + +**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available. +```go +// hide a flag by specifying its name +flags.MarkHidden("secretFlag") +``` + +## Supporting Go flags when using pflag +In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary +to support flags defined by third-party dependencies (e.g. `golang/glog`). + +**Example**: You want to add the Go flags to the `CommandLine` flagset +```go +import ( + goflag "flag" + flag "github.com/spf13/pflag" +) + +var ip *int = flag.Int("flagname", 1234, "help message for flagname") + +func main() { + flag.CommandLine.AddGoFlagSet(goflag.CommandLine) + flag.Parse() +} +``` + +## More info + +You can see the full reference documentation of the pflag package +[at godoc.org][3], or through go's standard documentation system by +running `godoc -http=:6060` and browsing to +[http://localhost:6060/pkg/github.com/ogier/pflag][2] after +installation. + +[2]: http://localhost:6060/pkg/github.com/ogier/pflag +[3]: http://godoc.org/github.com/ogier/pflag diff --git a/src/vendor/github.com/spf13/pflag/bool.go b/src/vendor/github.com/spf13/pflag/bool.go new file mode 100644 index 00000000..c4c5c0bf --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/bool.go @@ -0,0 +1,94 @@ +package pflag + +import "strconv" + +// optional interface to indicate boolean flags that can be +// supplied without "=value" text +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) Type() string { + return "bool" +} + +func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) } + +func (b *boolValue) IsBoolFlag() bool { return true } + +func boolConv(sval string) (interface{}, error) { + return strconv.ParseBool(sval) +} + +// GetBool return the bool value of a flag with the given name +func (f *FlagSet) GetBool(name string) (bool, error) { + val, err := f.getFlagType(name, "bool", boolConv) + if err != nil { + return false, err + } + return val.(bool), nil +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { + f.BoolVarP(p, name, "", value, usage) +} + +// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { + flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage) + flag.NoOptDefVal = "true" +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, name string, value bool, usage string) { + BoolVarP(p, name, "", value, usage) +} + +// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. +func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { + flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage) + flag.NoOptDefVal = "true" +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (f *FlagSet) Bool(name string, value bool, usage string) *bool { + return f.BoolP(name, "", value, usage) +} + +// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { + p := new(bool) + f.BoolVarP(p, name, shorthand, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(name string, value bool, usage string) *bool { + return BoolP(name, "", value, usage) +} + +// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. +func BoolP(name, shorthand string, value bool, usage string) *bool { + b := CommandLine.BoolP(name, shorthand, value, usage) + return b +} diff --git a/src/vendor/github.com/spf13/pflag/bool_test.go b/src/vendor/github.com/spf13/pflag/bool_test.go new file mode 100644 index 00000000..3e38a0f4 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/bool_test.go @@ -0,0 +1,180 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "bytes" + "fmt" + "strconv" + "testing" +) + +// This value can be a boolean ("true", "false") or "maybe" +type triStateValue int + +const ( + triStateFalse triStateValue = 0 + triStateTrue triStateValue = 1 + triStateMaybe triStateValue = 2 +) + +const strTriStateMaybe = "maybe" + +func (v *triStateValue) IsBoolFlag() bool { + return true +} + +func (v *triStateValue) Get() interface{} { + return triStateValue(*v) +} + +func (v *triStateValue) Set(s string) error { + if s == strTriStateMaybe { + *v = triStateMaybe + return nil + } + boolVal, err := strconv.ParseBool(s) + if boolVal { + *v = triStateTrue + } else { + *v = triStateFalse + } + return err +} + +func (v *triStateValue) String() string { + if *v == triStateMaybe { + return strTriStateMaybe + } + return fmt.Sprintf("%v", bool(*v == triStateTrue)) +} + +// The type of the flag as required by the pflag.Value interface +func (v *triStateValue) Type() string { + return "version" +} + +func setUpFlagSet(tristate *triStateValue) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + *tristate = triStateFalse + flag := f.VarPF(tristate, "tristate", "t", "tristate value (true, maybe or false)") + flag.NoOptDefVal = "true" + return f +} + +func TestExplicitTrue(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + err := f.Parse([]string{"--tristate=true"}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateTrue { + t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") + } +} + +func TestImplicitTrue(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + err := f.Parse([]string{"--tristate"}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateTrue { + t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") + } +} + +func TestShortFlag(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + err := f.Parse([]string{"-t"}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateTrue { + t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") + } +} + +func TestShortFlagExtraArgument(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + // The"maybe"turns into an arg, since short boolean options will only do true/false + err := f.Parse([]string{"-t", "maybe"}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateTrue { + t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") + } + args := f.Args() + if len(args) != 1 || args[0] != "maybe" { + t.Fatal("expected an extra 'maybe' argument to stick around") + } +} + +func TestExplicitMaybe(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + err := f.Parse([]string{"--tristate=maybe"}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateMaybe { + t.Fatal("expected", triStateMaybe, "(triStateMaybe) but got", tristate, "instead") + } +} + +func TestExplicitFalse(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + err := f.Parse([]string{"--tristate=false"}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateFalse { + t.Fatal("expected", triStateFalse, "(triStateFalse) but got", tristate, "instead") + } +} + +func TestImplicitFalse(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + if tristate != triStateFalse { + t.Fatal("expected", triStateFalse, "(triStateFalse) but got", tristate, "instead") + } +} + +func TestInvalidValue(t *testing.T) { + var tristate triStateValue + f := setUpFlagSet(&tristate) + var buf bytes.Buffer + f.SetOutput(&buf) + err := f.Parse([]string{"--tristate=invalid"}) + if err == nil { + t.Fatal("expected an error but did not get any, tristate has value", tristate) + } +} + +func TestBoolP(t *testing.T) { + b := BoolP("bool", "b", false, "bool value in CommandLine") + c := BoolP("c", "c", false, "other bool value") + args := []string{"--bool"} + if err := CommandLine.Parse(args); err != nil { + t.Error("expected no error, got ", err) + } + if *b != true { + t.Errorf("expected b=true got b=%v", *b) + } + if *c != false { + t.Errorf("expect c=false got c=%v", *c) + } +} diff --git a/src/vendor/github.com/spf13/pflag/count.go b/src/vendor/github.com/spf13/pflag/count.go new file mode 100644 index 00000000..d22be41f --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/count.go @@ -0,0 +1,94 @@ +package pflag + +import "strconv" + +// -- count Value +type countValue int + +func newCountValue(val int, p *int) *countValue { + *p = val + return (*countValue)(p) +} + +func (i *countValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + // -1 means that no specific value was passed, so increment + if v == -1 { + *i = countValue(*i + 1) + } else { + *i = countValue(v) + } + return err +} + +func (i *countValue) Type() string { + return "count" +} + +func (i *countValue) String() string { return strconv.Itoa(int(*i)) } + +func countConv(sval string) (interface{}, error) { + i, err := strconv.Atoi(sval) + if err != nil { + return nil, err + } + return i, nil +} + +// GetCount return the int value of a flag with the given name +func (f *FlagSet) GetCount(name string) (int, error) { + val, err := f.getFlagType(name, "count", countConv) + if err != nil { + return 0, err + } + return val.(int), nil +} + +// CountVar defines a count flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +// A count flag will add 1 to its value evey time it is found on the command line +func (f *FlagSet) CountVar(p *int, name string, usage string) { + f.CountVarP(p, name, "", usage) +} + +// CountVarP is like CountVar only take a shorthand for the flag name. +func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) { + flag := f.VarPF(newCountValue(0, p), name, shorthand, usage) + flag.NoOptDefVal = "-1" +} + +// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set +func CountVar(p *int, name string, usage string) { + CommandLine.CountVar(p, name, usage) +} + +// CountVarP is like CountVar only take a shorthand for the flag name. +func CountVarP(p *int, name, shorthand string, usage string) { + CommandLine.CountVarP(p, name, shorthand, usage) +} + +// Count defines a count flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +// A count flag will add 1 to its value evey time it is found on the command line +func (f *FlagSet) Count(name string, usage string) *int { + p := new(int) + f.CountVarP(p, name, "", usage) + return p +} + +// CountP is like Count only takes a shorthand for the flag name. +func (f *FlagSet) CountP(name, shorthand string, usage string) *int { + p := new(int) + f.CountVarP(p, name, shorthand, usage) + return p +} + +// Count like Count only the flag is placed on the CommandLine isntead of a given flag set +func Count(name string, usage string) *int { + return CommandLine.CountP(name, "", usage) +} + +// CountP is like Count only takes a shorthand for the flag name. +func CountP(name, shorthand string, usage string) *int { + return CommandLine.CountP(name, shorthand, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/count_test.go b/src/vendor/github.com/spf13/pflag/count_test.go new file mode 100644 index 00000000..716765cb --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/count_test.go @@ -0,0 +1,55 @@ +package pflag + +import ( + "fmt" + "os" + "testing" +) + +var _ = fmt.Printf + +func setUpCount(c *int) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.CountVarP(c, "verbose", "v", "a counter") + return f +} + +func TestCount(t *testing.T) { + testCases := []struct { + input []string + success bool + expected int + }{ + {[]string{"-vvv"}, true, 3}, + {[]string{"-v", "-v", "-v"}, true, 3}, + {[]string{"-v", "--verbose", "-v"}, true, 3}, + {[]string{"-v=3", "-v"}, true, 4}, + {[]string{"-v=a"}, false, 0}, + } + + devnull, _ := os.Open(os.DevNull) + os.Stderr = devnull + for i := range testCases { + var count int + f := setUpCount(&count) + + tc := &testCases[i] + + err := f.Parse(tc.input) + if err != nil && tc.success == true { + t.Errorf("expected success, got %q", err) + continue + } else if err == nil && tc.success == false { + t.Errorf("expected failure, got success") + continue + } else if tc.success { + c, err := f.GetCount("verbose") + if err != nil { + t.Errorf("Got error trying to fetch the counter flag") + } + if c != tc.expected { + t.Errorf("expected %q, got %q", tc.expected, c) + } + } + } +} diff --git a/src/vendor/github.com/spf13/pflag/duration.go b/src/vendor/github.com/spf13/pflag/duration.go new file mode 100644 index 00000000..e9debef8 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/duration.go @@ -0,0 +1,86 @@ +package pflag + +import ( + "time" +) + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Type() string { + return "duration" +} + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +func durationConv(sval string) (interface{}, error) { + return time.ParseDuration(sval) +} + +// GetDuration return the duration value of a flag with the given name +func (f *FlagSet) GetDuration(name string) (time.Duration, error) { + val, err := f.getFlagType(name, "duration", durationConv) + if err != nil { + return 0, err + } + return val.(time.Duration), nil +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + f.VarP(newDurationValue(value, p), name, "", usage) +} + +// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { + f.VarP(newDurationValue(value, p), name, shorthand, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + CommandLine.VarP(newDurationValue(value, p), name, "", usage) +} + +// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. +func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { + CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVarP(p, name, "", value, usage) + return p +} + +// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVarP(p, name, shorthand, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func Duration(name string, value time.Duration, usage string) *time.Duration { + return CommandLine.DurationP(name, "", value, usage) +} + +// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. +func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { + return CommandLine.DurationP(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/example_test.go b/src/vendor/github.com/spf13/pflag/example_test.go new file mode 100644 index 00000000..9be7a49f --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/example_test.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// These examples demonstrate more intricate uses of the flag package. +package pflag_test + +import ( + "errors" + "fmt" + "strings" + "time" + + flag "github.com/spf13/pflag" +) + +// Example 1: A single string flag called "species" with default value "gopher". +var species = flag.String("species", "gopher", "the species we are studying") + +// Example 2: A flag with a shorthand letter. +var gopherType = flag.StringP("gopher_type", "g", "pocket", "the variety of gopher") + +// Example 3: A user-defined flag type, a slice of durations. +type interval []time.Duration + +// String is the method to format the flag's value, part of the flag.Value interface. +// The String method's output will be used in diagnostics. +func (i *interval) String() string { + return fmt.Sprint(*i) +} + +func (i *interval) Type() string { + return "interval" +} + +// Set is the method to set the flag value, part of the flag.Value interface. +// Set's argument is a string to be parsed to set the flag. +// It's a comma-separated list, so we split it. +func (i *interval) Set(value string) error { + // If we wanted to allow the flag to be set multiple times, + // accumulating values, we would delete this if statement. + // That would permit usages such as + // -deltaT 10s -deltaT 15s + // and other combinations. + if len(*i) > 0 { + return errors.New("interval flag already set") + } + for _, dt := range strings.Split(value, ",") { + duration, err := time.ParseDuration(dt) + if err != nil { + return err + } + *i = append(*i, duration) + } + return nil +} + +// Define a flag to accumulate durations. Because it has a special type, +// we need to use the Var function and therefore create the flag during +// init. + +var intervalFlag interval + +func init() { + // Tie the command-line flag to the intervalFlag variable and + // set a usage message. + flag.Var(&intervalFlag, "deltaT", "comma-separated list of intervals to use between events") +} + +func Example() { + // All the interesting pieces are with the variables declared above, but + // to enable the flag package to see the flags defined there, one must + // execute, typically at the start of main (not init!): + // flag.Parse() + // We don't run it here because this is not a main function and + // the testing suite has already parsed the flags. +} diff --git a/src/vendor/github.com/spf13/pflag/export_test.go b/src/vendor/github.com/spf13/pflag/export_test.go new file mode 100644 index 00000000..9318fee0 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/export_test.go @@ -0,0 +1,29 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "io/ioutil" + "os" +) + +// Additional routines compiled into the package only during testing. + +// ResetForTesting clears all flag state and sets the usage function as directed. +// After calling ResetForTesting, parse errors in flag handling will not +// exit the program. +func ResetForTesting(usage func()) { + CommandLine = &FlagSet{ + name: os.Args[0], + errorHandling: ContinueOnError, + output: ioutil.Discard, + } + Usage = usage +} + +// GetCommandLine returns the default FlagSet. +func GetCommandLine() *FlagSet { + return CommandLine +} diff --git a/src/vendor/github.com/spf13/pflag/flag.go b/src/vendor/github.com/spf13/pflag/flag.go new file mode 100644 index 00000000..b0b0d46f --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/flag.go @@ -0,0 +1,947 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pflag is a drop-in replacement for Go's flag package, implementing +POSIX/GNU-style --flags. + +pflag is compatible with the GNU extensions to the POSIX recommendations +for command-line options. See +http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html + +Usage: + +pflag is a drop-in replacement of Go's native flag package. If you import +pflag under the name "flag" then all code should continue to function +with no changes. + + import flag "github.com/ogier/pflag" + + There is one exception to this: if you directly instantiate the Flag struct +there is one more field "Shorthand" that you will need to set. +Most code never instantiates this struct directly, and instead uses +functions such as String(), BoolVar(), and Var(), and is therefore +unaffected. + +Define flags using flag.String(), Bool(), Int(), etc. + +This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + var ip = flag.Int("flagname", 1234, "help message for flagname") +If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int + func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") + } +Or you can create custom flags that satisfy the Value interface (with +pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") +For such flags, the default value is just the initial value of the variable. + +After all flags are defined, call + flag.Parse() +to parse the command line into the defined flags. + +Flags may then be used directly. If you're using the flags themselves, +they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) + fmt.Println("flagvar has value ", flagvar) + +After parsing, the arguments after the flag are available as the +slice flag.Args() or individually as flag.Arg(i). +The arguments are indexed from 0 through flag.NArg()-1. + +The pflag package also defines some new functions that are not in flag, +that give one-letter shorthands for flags. You can use these by appending +'P' to the name of any function that defines a flag. + var ip = flag.IntP("flagname", "f", 1234, "help message") + var flagvar bool + func init() { + flag.BoolVarP("boolname", "b", true, "help message") + } + flag.VarP(&flagVar, "varname", "v", 1234, "help message") +Shorthand letters can be used with single dashes on the command line. +Boolean shorthand flags can be combined with other shorthand flags. + +Command line flag syntax: + --flag // boolean flags only + --flag=x + +Unlike the flag package, a single dash before an option means something +different than a double dash. Single dashes signify a series of shorthand +letters for flags. All but the last shorthand letter must be boolean flags. + // boolean flags + -f + -abc + // non-boolean flags + -n 1234 + -Ifile + // mixed + -abcs "hello" + -abcn1234 + +Flag parsing stops after the terminator "--". Unlike the flag package, +flags can be interspersed with arguments anywhere on the command line +before this terminator. + +Integer flags accept 1234, 0664, 0x1234 and may be negative. +Boolean flags (in their long form) accept 1, 0, t, f, true, false, +TRUE, FALSE, True, False. +Duration flags accept any input valid for time.ParseDuration. + +The default set of command-line flags is controlled by +top-level functions. The FlagSet type allows one to define +independent sets of flags, such as to implement subcommands +in a command-line interface. The methods of FlagSet are +analogous to the top-level functions for the command-line +flag set. +*/ +package pflag + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "sort" + "strings" +) + +// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. +var ErrHelp = errors.New("pflag: help requested") + +// ErrorHandling defines how to handle flag parsing errors. +type ErrorHandling int + +const ( + // ContinueOnError will return an err from Parse() if an error is found + ContinueOnError ErrorHandling = iota + // ExitOnError will call os.Exit(2) if an error is found when parsing + ExitOnError + // PanicOnError will panic() if an error is found when parsing flags + PanicOnError +) + +// NormalizedName is a flag name that has been normalized according to rules +// for the FlagSet (e.g. making '-' and '_' equivalent). +type NormalizedName string + +// A FlagSet represents a set of defined flags. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + + name string + parsed bool + actual map[NormalizedName]*Flag + formal map[NormalizedName]*Flag + shorthands map[byte]*Flag + args []string // arguments after flags + argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- + exitOnError bool // does the program exit if there's an error? + errorHandling ErrorHandling + output io.Writer // nil means stderr; use out() accessor + interspersed bool // allow interspersed option/non-option args + normalizeNameFunc func(f *FlagSet, name string) NormalizedName +} + +// A Flag represents the state of a flag. +type Flag struct { + Name string // name as it appears on command line + Shorthand string // one-letter abbreviated flag + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message + Changed bool // If the user set the value (or if left to default) + NoOptDefVal string //default value (as text); if the flag is on the command line without any options + Deprecated string // If this flag is deprecated, this string is the new or now thing to use + Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text + ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use + Annotations map[string][]string // used by cobra.Command bash autocomple code +} + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +type Value interface { + String() string + Set(string) error + Type() string +} + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[NormalizedName]*Flag) []*Flag { + list := make(sort.StringSlice, len(flags)) + i := 0 + for k := range flags { + list[i] = string(k) + i++ + } + list.Sort() + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[NormalizedName(name)] + } + return result +} + +// SetNormalizeFunc allows you to add a function which can translate flag names. +// Flags added to the FlagSet will be translated and then when anything tries to +// look up the flag that will also be translated. So it would be possible to create +// a flag named "getURL" and have it translated to "geturl". A user could then pass +// "--getUrl" which may also be translated to "geturl" and everything will work. +func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { + f.normalizeNameFunc = n + for k, v := range f.formal { + delete(f.formal, k) + nname := f.normalizeFlagName(string(k)) + f.formal[nname] = v + v.Name = string(nname) + } +} + +// GetNormalizeFunc returns the previously set NormalizeFunc of a function which +// does no translation, if not set previously. +func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName { + if f.normalizeNameFunc != nil { + return f.normalizeNameFunc + } + return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) } +} + +func (f *FlagSet) normalizeFlagName(name string) NormalizedName { + n := f.GetNormalizeFunc() + return n(f, name) +} + +func (f *FlagSet) out() io.Writer { + if f.output == nil { + return os.Stderr + } + return f.output +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (f *FlagSet) SetOutput(output io.Writer) { + f.output = output +} + +// VisitAll visits the flags in lexicographical order, calling fn for each. +// It visits all flags, even those not set. +func (f *FlagSet) VisitAll(fn func(*Flag)) { + for _, flag := range sortFlags(f.formal) { + fn(flag) + } +} + +// HasFlags returns a bool to indicate if the FlagSet has any flags definied. +func (f *FlagSet) HasFlags() bool { + return len(f.formal) > 0 +} + +// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags +// definied that are not hidden or deprecated. +func (f *FlagSet) HasAvailableFlags() bool { + for _, flag := range f.formal { + if !flag.Hidden && len(flag.Deprecated) == 0 { + return true + } + } + return false +} + +// VisitAll visits the command-line flags in lexicographical order, calling +// fn for each. It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { + CommandLine.VisitAll(fn) +} + +// Visit visits the flags in lexicographical order, calling fn for each. +// It visits only those flags that have been set. +func (f *FlagSet) Visit(fn func(*Flag)) { + for _, flag := range sortFlags(f.actual) { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order, calling fn +// for each. It visits only those flags that have been set. +func Visit(fn func(*Flag)) { + CommandLine.Visit(fn) +} + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) Lookup(name string) *Flag { + return f.lookup(f.normalizeFlagName(name)) +} + +// lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) lookup(name NormalizedName) *Flag { + return f.formal[name] +} + +// func to return a given type for a given flag name +func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { + flag := f.Lookup(name) + if flag == nil { + err := fmt.Errorf("flag accessed but not defined: %s", name) + return nil, err + } + + if flag.Value.Type() != ftype { + err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type()) + return nil, err + } + + sval := flag.Value.String() + result, err := convFunc(sval) + if err != nil { + return nil, err + } + return result, nil +} + +// ArgsLenAtDash will return the length of f.Args at the moment when a -- was +// found during arg parsing. This allows your program to know which args were +// before the -- and which came after. +func (f *FlagSet) ArgsLenAtDash() int { + return f.argsLenAtDash +} + +// MarkDeprecated indicated that a flag is deprecated in your program. It will +// continue to function but will not show up in help or usage messages. Using +// this flag will also print the given usageMessage. +func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + if len(usageMessage) == 0 { + return fmt.Errorf("deprecated message for flag %q must be set", name) + } + flag.Deprecated = usageMessage + return nil +} + +// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your +// program. It will continue to function but will not show up in help or usage +// messages. Using this flag will also print the given usageMessage. +func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + if len(usageMessage) == 0 { + return fmt.Errorf("deprecated message for flag %q must be set", name) + } + flag.ShorthandDeprecated = usageMessage + return nil +} + +// MarkHidden sets a flag to 'hidden' in your program. It will continue to +// function but will not show up in help or usage messages. +func (f *FlagSet) MarkHidden(name string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + flag.Hidden = true + return nil +} + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { + return CommandLine.Lookup(name) +} + +// Set sets the value of the named flag. +func (f *FlagSet) Set(name, value string) error { + normalName := f.normalizeFlagName(name) + flag, ok := f.formal[normalName] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + err := flag.Value.Set(value) + if err != nil { + return err + } + if f.actual == nil { + f.actual = make(map[NormalizedName]*Flag) + } + f.actual[normalName] = flag + flag.Changed = true + if len(flag.Deprecated) > 0 { + fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) + } + return nil +} + +// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet. +// This is sometimes used by spf13/cobra programs which want to generate additional +// bash completion information. +func (f *FlagSet) SetAnnotation(name, key string, values []string) error { + normalName := f.normalizeFlagName(name) + flag, ok := f.formal[normalName] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + if flag.Annotations == nil { + flag.Annotations = map[string][]string{} + } + flag.Annotations[key] = values + return nil +} + +// Changed returns true if the flag was explicitly set during Parse() and false +// otherwise +func (f *FlagSet) Changed(name string) bool { + flag := f.Lookup(name) + // If a flag doesn't exist, it wasn't changed.... + if flag == nil { + return false + } + return flag.Changed +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { + return CommandLine.Set(name, value) +} + +// PrintDefaults prints, to standard error unless configured +// otherwise, the default values of all defined flags in the set. +func (f *FlagSet) PrintDefaults() { + usages := f.FlagUsages() + fmt.Fprintf(f.out(), "%s", usages) +} + +// defaultIsZeroValue returns true if the default value for this flag represents +// a zero value. +func (f *Flag) defaultIsZeroValue() bool { + switch f.Value.(type) { + case boolFlag: + return f.DefValue == "false" + case *durationValue: + // Beginning in Go 1.7, duration zero values are "0s" + return f.DefValue == "0" || f.DefValue == "0s" + case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value: + return f.DefValue == "0" + case *stringValue: + return f.DefValue == "" + case *ipValue, *ipMaskValue, *ipNetValue: + return f.DefValue == "" + case *intSliceValue, *stringSliceValue, *stringArrayValue: + return f.DefValue == "[]" + default: + switch f.Value.String() { + case "false": + return true + case "": + return true + case "": + return true + case "0": + return true + } + return false + } +} + +// UnquoteUsage extracts a back-quoted name from the usage +// string for a flag and returns it and the un-quoted usage. +// Given "a `name` to show" it returns ("name", "a name to show"). +// If there are no back quotes, the name is an educated guess of the +// type of the flag's value, or the empty string if the flag is boolean. +func UnquoteUsage(flag *Flag) (name string, usage string) { + // Look for a back-quoted name, but avoid the strings package. + usage = flag.Usage + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name = usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break // Only one back quote; use type name. + } + } + + name = flag.Value.Type() + switch name { + case "bool": + name = "" + case "float64": + name = "float" + case "int64": + name = "int" + case "uint64": + name = "uint" + } + + return +} + +// FlagUsages Returns a string containing the usage information for all flags in +// the FlagSet +func (f *FlagSet) FlagUsages() string { + x := new(bytes.Buffer) + + lines := make([]string, 0, len(f.formal)) + + maxlen := 0 + f.VisitAll(func(flag *Flag) { + if len(flag.Deprecated) > 0 || flag.Hidden { + return + } + + line := "" + if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 { + line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name) + } else { + line = fmt.Sprintf(" --%s", flag.Name) + } + + varname, usage := UnquoteUsage(flag) + if len(varname) > 0 { + line += " " + varname + } + if len(flag.NoOptDefVal) > 0 { + switch flag.Value.Type() { + case "string": + line += fmt.Sprintf("[=%q]", flag.NoOptDefVal) + case "bool": + if flag.NoOptDefVal != "true" { + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + default: + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + } + + // This special character will be replaced with spacing once the + // correct alignment is calculated + line += "\x00" + if len(line) > maxlen { + maxlen = len(line) + } + + line += usage + if !flag.defaultIsZeroValue() { + if flag.Value.Type() == "string" { + line += fmt.Sprintf(" (default %q)", flag.DefValue) + } else { + line += fmt.Sprintf(" (default %s)", flag.DefValue) + } + } + + lines = append(lines, line) + }) + + for _, line := range lines { + sidx := strings.Index(line, "\x00") + spacing := strings.Repeat(" ", maxlen-sidx) + fmt.Fprintln(x, line[:sidx], spacing, line[sidx+1:]) + } + + return x.String() +} + +// PrintDefaults prints to standard error the default values of all defined command-line flags. +func PrintDefaults() { + CommandLine.PrintDefaults() +} + +// defaultUsage is the default function to print a usage message. +func defaultUsage(f *FlagSet) { + fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) + f.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(CommandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// The function is a variable that may be changed to point to a custom function. +// By default it prints a simple header and calls PrintDefaults; for details about the +// format of the output and how to control it, see the documentation for PrintDefaults. +var Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// NFlag returns the number of flags that have been set. +func (f *FlagSet) NFlag() int { return len(f.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(CommandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. +func (f *FlagSet) Arg(i int) string { + if i < 0 || i >= len(f.args) { + return "" + } + return f.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. +func Arg(i int) string { + return CommandLine.Arg(i) +} + +// NArg is the number of arguments remaining after flags have been processed. +func (f *FlagSet) NArg() int { return len(f.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(CommandLine.args) } + +// Args returns the non-flag arguments. +func (f *FlagSet) Args() []string { return f.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return CommandLine.args } + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (f *FlagSet) Var(value Value, name string, usage string) { + f.VarP(value, name, "", usage) +} + +// VarPF is like VarP, but returns the flag created +func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag { + // Remember the default value as a string; it won't change. + flag := &Flag{ + Name: name, + Shorthand: shorthand, + Usage: usage, + Value: value, + DefValue: value.String(), + } + f.AddFlag(flag) + return flag +} + +// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { + _ = f.VarPF(value, name, shorthand, usage) +} + +// AddFlag will add the flag to the FlagSet +func (f *FlagSet) AddFlag(flag *Flag) { + // Call normalizeFlagName function only once + normalizedFlagName := f.normalizeFlagName(flag.Name) + + _, alreadythere := f.formal[normalizedFlagName] + if alreadythere { + msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) + fmt.Fprintln(f.out(), msg) + panic(msg) // Happens only if flags are declared with identical names + } + if f.formal == nil { + f.formal = make(map[NormalizedName]*Flag) + } + + flag.Name = string(normalizedFlagName) + f.formal[normalizedFlagName] = flag + + if len(flag.Shorthand) == 0 { + return + } + if len(flag.Shorthand) > 1 { + fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand) + panic("shorthand is more than one character") + } + if f.shorthands == nil { + f.shorthands = make(map[byte]*Flag) + } + c := flag.Shorthand[0] + old, alreadythere := f.shorthands[c] + if alreadythere { + fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s already used for %s\n", f.name, c, flag.Name, old.Name) + panic("shorthand redefinition") + } + f.shorthands[c] = flag +} + +// AddFlagSet adds one FlagSet to another. If a flag is already present in f +// the flag from newSet will be ignored +func (f *FlagSet) AddFlagSet(newSet *FlagSet) { + if newSet == nil { + return + } + newSet.VisitAll(func(flag *Flag) { + if f.Lookup(flag.Name) == nil { + f.AddFlag(flag) + } + }) +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, name string, usage string) { + CommandLine.VarP(value, name, "", usage) +} + +// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. +func VarP(value Value, name, shorthand, usage string) { + CommandLine.VarP(value, name, shorthand, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (f *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + fmt.Fprintln(f.out(), err) + f.usage() + return err +} + +// usage calls the Usage method for the flag set, or the usage function if +// the flag set is CommandLine. +func (f *FlagSet) usage() { + if f == CommandLine { + Usage() + } else if f.Usage == nil { + defaultUsage(f) + } else { + f.Usage() + } +} + +func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error { + if err := flag.Value.Set(value); err != nil { + return f.failf("invalid argument %q for %s: %v", value, origArg, err) + } + // mark as visited for Visit() + if f.actual == nil { + f.actual = make(map[NormalizedName]*Flag) + } + f.actual[f.normalizeFlagName(flag.Name)] = flag + flag.Changed = true + if len(flag.Deprecated) > 0 { + fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) + } + if len(flag.ShorthandDeprecated) > 0 && containsShorthand(origArg, flag.Shorthand) { + fmt.Fprintf(os.Stderr, "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) + } + return nil +} + +func containsShorthand(arg, shorthand string) bool { + // filter out flags -- + if strings.HasPrefix(arg, "-") { + return false + } + arg = strings.SplitN(arg, "=", 2)[0] + return strings.Contains(arg, shorthand) +} + +func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) { + a = args + name := s[2:] + if len(name) == 0 || name[0] == '-' || name[0] == '=' { + err = f.failf("bad flag syntax: %s", s) + return + } + split := strings.SplitN(name, "=", 2) + name = split[0] + flag, alreadythere := f.formal[f.normalizeFlagName(name)] + if !alreadythere { + if name == "help" { // special case for nice help message. + f.usage() + return a, ErrHelp + } + err = f.failf("unknown flag: --%s", name) + return + } + var value string + if len(split) == 2 { + // '--flag=arg' + value = split[1] + } else if len(flag.NoOptDefVal) > 0 { + // '--flag' (arg was optional) + value = flag.NoOptDefVal + } else if len(a) > 0 { + // '--flag arg' + value = a[0] + a = a[1:] + } else { + // '--flag' (arg was required) + err = f.failf("flag needs an argument: %s", s) + return + } + err = f.setFlag(flag, value, s) + return +} + +func (f *FlagSet) parseSingleShortArg(shorthands string, args []string) (outShorts string, outArgs []string, err error) { + if strings.HasPrefix(shorthands, "test.") { + return + } + outArgs = args + outShorts = shorthands[1:] + c := shorthands[0] + + flag, alreadythere := f.shorthands[c] + if !alreadythere { + if c == 'h' { // special case for nice help message. + f.usage() + err = ErrHelp + return + } + //TODO continue on error + err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) + return + } + var value string + if len(shorthands) > 2 && shorthands[1] == '=' { + value = shorthands[2:] + outShorts = "" + } else if len(flag.NoOptDefVal) > 0 { + value = flag.NoOptDefVal + } else if len(shorthands) > 1 { + value = shorthands[1:] + outShorts = "" + } else if len(args) > 0 { + value = args[0] + outArgs = args[1:] + } else { + err = f.failf("flag needs an argument: %q in -%s", c, shorthands) + return + } + err = f.setFlag(flag, value, shorthands) + return +} + +func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error) { + a = args + shorthands := s[1:] + + for len(shorthands) > 0 { + shorthands, a, err = f.parseSingleShortArg(shorthands, args) + if err != nil { + return + } + } + + return +} + +func (f *FlagSet) parseArgs(args []string) (err error) { + for len(args) > 0 { + s := args[0] + args = args[1:] + if len(s) == 0 || s[0] != '-' || len(s) == 1 { + if !f.interspersed { + f.args = append(f.args, s) + f.args = append(f.args, args...) + return nil + } + f.args = append(f.args, s) + continue + } + + if s[1] == '-' { + if len(s) == 2 { // "--" terminates the flags + f.argsLenAtDash = len(f.args) + f.args = append(f.args, args...) + break + } + args, err = f.parseLongArg(s, args) + } else { + args, err = f.parseShortArg(s, args) + } + if err != nil { + return + } + } + return +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if -help was set but not defined. +func (f *FlagSet) Parse(arguments []string) error { + f.parsed = true + f.args = make([]string, 0, len(arguments)) + err := f.parseArgs(arguments) + if err != nil { + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + return nil +} + +// Parsed reports whether f.Parse has been called. +func (f *FlagSet) Parsed() bool { + return f.parsed +} + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +func Parse() { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.Parse(os.Args[1:]) +} + +// SetInterspersed sets whether to support interspersed option/non-option arguments. +func SetInterspersed(interspersed bool) { + CommandLine.SetInterspersed(interspersed) +} + +// Parsed returns true if the command-line flags have been parsed. +func Parsed() bool { + return CommandLine.Parsed() +} + +// CommandLine is the default set of command-line flags, parsed from os.Args. +var CommandLine = NewFlagSet(os.Args[0], ExitOnError) + +// NewFlagSet returns a new, empty flag set with the specified name and +// error handling property. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + argsLenAtDash: -1, + interspersed: true, + } + return f +} + +// SetInterspersed sets whether to support interspersed option/non-option arguments. +func (f *FlagSet) SetInterspersed(interspersed bool) { + f.interspersed = interspersed +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name and the +// ContinueOnError error handling policy. +func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { + f.name = name + f.errorHandling = errorHandling + f.argsLenAtDash = -1 +} diff --git a/src/vendor/github.com/spf13/pflag/flag_test.go b/src/vendor/github.com/spf13/pflag/flag_test.go new file mode 100644 index 00000000..b294fc76 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/flag_test.go @@ -0,0 +1,948 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "reflect" + "sort" + "strconv" + "strings" + "testing" + "time" +) + +var ( + testBool = Bool("test_bool", false, "bool value") + testInt = Int("test_int", 0, "int value") + testInt64 = Int64("test_int64", 0, "int64 value") + testUint = Uint("test_uint", 0, "uint value") + testUint64 = Uint64("test_uint64", 0, "uint64 value") + testString = String("test_string", "0", "string value") + testFloat = Float64("test_float64", 0, "float64 value") + testDuration = Duration("test_duration", 0, "time.Duration value") + testOptionalInt = Int("test_optional_int", 0, "optional int value") + normalizeFlagNameInvocations = 0 +) + +func boolString(s string) string { + if s == "0" { + return "false" + } + return "true" +} + +func TestEverything(t *testing.T) { + m := make(map[string]*Flag) + desired := "0" + visitor := func(f *Flag) { + if len(f.Name) > 5 && f.Name[0:5] == "test_" { + m[f.Name] = f + ok := false + switch { + case f.Value.String() == desired: + ok = true + case f.Name == "test_bool" && f.Value.String() == boolString(desired): + ok = true + case f.Name == "test_duration" && f.Value.String() == desired+"s": + ok = true + } + if !ok { + t.Error("Visit: bad value", f.Value.String(), "for", f.Name) + } + } + } + VisitAll(visitor) + if len(m) != 9 { + t.Error("VisitAll misses some flags") + for k, v := range m { + t.Log(k, *v) + } + } + m = make(map[string]*Flag) + Visit(visitor) + if len(m) != 0 { + t.Errorf("Visit sees unset flags") + for k, v := range m { + t.Log(k, *v) + } + } + // Now set all flags + Set("test_bool", "true") + Set("test_int", "1") + Set("test_int64", "1") + Set("test_uint", "1") + Set("test_uint64", "1") + Set("test_string", "1") + Set("test_float64", "1") + Set("test_duration", "1s") + Set("test_optional_int", "1") + desired = "1" + Visit(visitor) + if len(m) != 9 { + t.Error("Visit fails after set") + for k, v := range m { + t.Log(k, *v) + } + } + // Now test they're visited in sort order. + var flagNames []string + Visit(func(f *Flag) { flagNames = append(flagNames, f.Name) }) + if !sort.StringsAreSorted(flagNames) { + t.Errorf("flag names not sorted: %v", flagNames) + } +} + +func TestUsage(t *testing.T) { + called := false + ResetForTesting(func() { called = true }) + if GetCommandLine().Parse([]string{"--x"}) == nil { + t.Error("parse did not fail for unknown flag") + } + if !called { + t.Error("did not call Usage for unknown flag") + } +} + +func TestAddFlagSet(t *testing.T) { + oldSet := NewFlagSet("old", ContinueOnError) + newSet := NewFlagSet("new", ContinueOnError) + + oldSet.String("flag1", "flag1", "flag1") + oldSet.String("flag2", "flag2", "flag2") + + newSet.String("flag2", "flag2", "flag2") + newSet.String("flag3", "flag3", "flag3") + + oldSet.AddFlagSet(newSet) + + if len(oldSet.formal) != 3 { + t.Errorf("Unexpected result adding a FlagSet to a FlagSet %v", oldSet) + } +} + +func TestAnnotation(t *testing.T) { + f := NewFlagSet("shorthand", ContinueOnError) + + if err := f.SetAnnotation("missing-flag", "key", nil); err == nil { + t.Errorf("Expected error setting annotation on non-existent flag") + } + + f.StringP("stringa", "a", "", "string value") + if err := f.SetAnnotation("stringa", "key", nil); err != nil { + t.Errorf("Unexpected error setting new nil annotation: %v", err) + } + if annotation := f.Lookup("stringa").Annotations["key"]; annotation != nil { + t.Errorf("Unexpected annotation: %v", annotation) + } + + f.StringP("stringb", "b", "", "string2 value") + if err := f.SetAnnotation("stringb", "key", []string{"value1"}); err != nil { + t.Errorf("Unexpected error setting new annotation: %v", err) + } + if annotation := f.Lookup("stringb").Annotations["key"]; !reflect.DeepEqual(annotation, []string{"value1"}) { + t.Errorf("Unexpected annotation: %v", annotation) + } + + if err := f.SetAnnotation("stringb", "key", []string{"value2"}); err != nil { + t.Errorf("Unexpected error updating annotation: %v", err) + } + if annotation := f.Lookup("stringb").Annotations["key"]; !reflect.DeepEqual(annotation, []string{"value2"}) { + t.Errorf("Unexpected annotation: %v", annotation) + } +} + +func testParse(f *FlagSet, t *testing.T) { + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + boolFlag := f.Bool("bool", false, "bool value") + bool2Flag := f.Bool("bool2", false, "bool2 value") + bool3Flag := f.Bool("bool3", false, "bool3 value") + intFlag := f.Int("int", 0, "int value") + int8Flag := f.Int8("int8", 0, "int value") + int32Flag := f.Int32("int32", 0, "int value") + int64Flag := f.Int64("int64", 0, "int64 value") + uintFlag := f.Uint("uint", 0, "uint value") + uint8Flag := f.Uint8("uint8", 0, "uint value") + uint16Flag := f.Uint16("uint16", 0, "uint value") + uint32Flag := f.Uint32("uint32", 0, "uint value") + uint64Flag := f.Uint64("uint64", 0, "uint64 value") + stringFlag := f.String("string", "0", "string value") + float32Flag := f.Float32("float32", 0, "float32 value") + float64Flag := f.Float64("float64", 0, "float64 value") + ipFlag := f.IP("ip", net.ParseIP("127.0.0.1"), "ip value") + maskFlag := f.IPMask("mask", ParseIPv4Mask("0.0.0.0"), "mask value") + durationFlag := f.Duration("duration", 5*time.Second, "time.Duration value") + optionalIntNoValueFlag := f.Int("optional-int-no-value", 0, "int value") + f.Lookup("optional-int-no-value").NoOptDefVal = "9" + optionalIntWithValueFlag := f.Int("optional-int-with-value", 0, "int value") + f.Lookup("optional-int-no-value").NoOptDefVal = "9" + extra := "one-extra-argument" + args := []string{ + "--bool", + "--bool2=true", + "--bool3=false", + "--int=22", + "--int8=-8", + "--int32=-32", + "--int64=0x23", + "--uint", "24", + "--uint8=8", + "--uint16=16", + "--uint32=32", + "--uint64=25", + "--string=hello", + "--float32=-172e12", + "--float64=2718e28", + "--ip=10.11.12.13", + "--mask=255.255.255.0", + "--duration=2m", + "--optional-int-no-value", + "--optional-int-with-value=42", + extra, + } + if err := f.Parse(args); err != nil { + t.Fatal(err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *boolFlag != true { + t.Error("bool flag should be true, is ", *boolFlag) + } + if v, err := f.GetBool("bool"); err != nil || v != *boolFlag { + t.Error("GetBool does not work.") + } + if *bool2Flag != true { + t.Error("bool2 flag should be true, is ", *bool2Flag) + } + if *bool3Flag != false { + t.Error("bool3 flag should be false, is ", *bool2Flag) + } + if *intFlag != 22 { + t.Error("int flag should be 22, is ", *intFlag) + } + if v, err := f.GetInt("int"); err != nil || v != *intFlag { + t.Error("GetInt does not work.") + } + if *int8Flag != -8 { + t.Error("int8 flag should be 0x23, is ", *int8Flag) + } + if v, err := f.GetInt8("int8"); err != nil || v != *int8Flag { + t.Error("GetInt8 does not work.") + } + if *int32Flag != -32 { + t.Error("int32 flag should be 0x23, is ", *int32Flag) + } + if v, err := f.GetInt32("int32"); err != nil || v != *int32Flag { + t.Error("GetInt32 does not work.") + } + if *int64Flag != 0x23 { + t.Error("int64 flag should be 0x23, is ", *int64Flag) + } + if v, err := f.GetInt64("int64"); err != nil || v != *int64Flag { + t.Error("GetInt64 does not work.") + } + if *uintFlag != 24 { + t.Error("uint flag should be 24, is ", *uintFlag) + } + if v, err := f.GetUint("uint"); err != nil || v != *uintFlag { + t.Error("GetUint does not work.") + } + if *uint8Flag != 8 { + t.Error("uint8 flag should be 8, is ", *uint8Flag) + } + if v, err := f.GetUint8("uint8"); err != nil || v != *uint8Flag { + t.Error("GetUint8 does not work.") + } + if *uint16Flag != 16 { + t.Error("uint16 flag should be 16, is ", *uint16Flag) + } + if v, err := f.GetUint16("uint16"); err != nil || v != *uint16Flag { + t.Error("GetUint16 does not work.") + } + if *uint32Flag != 32 { + t.Error("uint32 flag should be 32, is ", *uint32Flag) + } + if v, err := f.GetUint32("uint32"); err != nil || v != *uint32Flag { + t.Error("GetUint32 does not work.") + } + if *uint64Flag != 25 { + t.Error("uint64 flag should be 25, is ", *uint64Flag) + } + if v, err := f.GetUint64("uint64"); err != nil || v != *uint64Flag { + t.Error("GetUint64 does not work.") + } + if *stringFlag != "hello" { + t.Error("string flag should be `hello`, is ", *stringFlag) + } + if v, err := f.GetString("string"); err != nil || v != *stringFlag { + t.Error("GetString does not work.") + } + if *float32Flag != -172e12 { + t.Error("float32 flag should be -172e12, is ", *float32Flag) + } + if v, err := f.GetFloat32("float32"); err != nil || v != *float32Flag { + t.Errorf("GetFloat32 returned %v but float32Flag was %v", v, *float32Flag) + } + if *float64Flag != 2718e28 { + t.Error("float64 flag should be 2718e28, is ", *float64Flag) + } + if v, err := f.GetFloat64("float64"); err != nil || v != *float64Flag { + t.Errorf("GetFloat64 returned %v but float64Flag was %v", v, *float64Flag) + } + if !(*ipFlag).Equal(net.ParseIP("10.11.12.13")) { + t.Error("ip flag should be 10.11.12.13, is ", *ipFlag) + } + if v, err := f.GetIP("ip"); err != nil || !v.Equal(*ipFlag) { + t.Errorf("GetIP returned %v but ipFlag was %v", v, *ipFlag) + } + if (*maskFlag).String() != ParseIPv4Mask("255.255.255.0").String() { + t.Error("mask flag should be 255.255.255.0, is ", (*maskFlag).String()) + } + if v, err := f.GetIPv4Mask("mask"); err != nil || v.String() != (*maskFlag).String() { + t.Errorf("GetIP returned %v maskFlag was %v error was %v", v, *maskFlag, err) + } + if *durationFlag != 2*time.Minute { + t.Error("duration flag should be 2m, is ", *durationFlag) + } + if v, err := f.GetDuration("duration"); err != nil || v != *durationFlag { + t.Error("GetDuration does not work.") + } + if _, err := f.GetInt("duration"); err == nil { + t.Error("GetInt parsed a time.Duration?!?!") + } + if *optionalIntNoValueFlag != 9 { + t.Error("optional int flag should be the default value, is ", *optionalIntNoValueFlag) + } + if *optionalIntWithValueFlag != 42 { + t.Error("optional int flag should be 42, is ", *optionalIntWithValueFlag) + } + if len(f.Args()) != 1 { + t.Error("expected one argument, got", len(f.Args())) + } else if f.Args()[0] != extra { + t.Errorf("expected argument %q got %q", extra, f.Args()[0]) + } +} + +func TestShorthand(t *testing.T) { + f := NewFlagSet("shorthand", ContinueOnError) + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + boolaFlag := f.BoolP("boola", "a", false, "bool value") + boolbFlag := f.BoolP("boolb", "b", false, "bool2 value") + boolcFlag := f.BoolP("boolc", "c", false, "bool3 value") + booldFlag := f.BoolP("boold", "d", false, "bool4 value") + stringaFlag := f.StringP("stringa", "s", "0", "string value") + stringzFlag := f.StringP("stringz", "z", "0", "string value") + extra := "interspersed-argument" + notaflag := "--i-look-like-a-flag" + args := []string{ + "-ab", + extra, + "-cs", + "hello", + "-z=something", + "-d=true", + "--", + notaflag, + } + f.SetOutput(ioutil.Discard) + if err := f.Parse(args); err != nil { + t.Error("expected no error, got ", err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *boolaFlag != true { + t.Error("boola flag should be true, is ", *boolaFlag) + } + if *boolbFlag != true { + t.Error("boolb flag should be true, is ", *boolbFlag) + } + if *boolcFlag != true { + t.Error("boolc flag should be true, is ", *boolcFlag) + } + if *booldFlag != true { + t.Error("boold flag should be true, is ", *booldFlag) + } + if *stringaFlag != "hello" { + t.Error("stringa flag should be `hello`, is ", *stringaFlag) + } + if *stringzFlag != "something" { + t.Error("stringz flag should be `something`, is ", *stringzFlag) + } + if len(f.Args()) != 2 { + t.Error("expected one argument, got", len(f.Args())) + } else if f.Args()[0] != extra { + t.Errorf("expected argument %q got %q", extra, f.Args()[0]) + } else if f.Args()[1] != notaflag { + t.Errorf("expected argument %q got %q", notaflag, f.Args()[1]) + } + if f.ArgsLenAtDash() != 1 { + t.Errorf("expected argsLenAtDash %d got %d", f.ArgsLenAtDash(), 1) + } +} + +func TestParse(t *testing.T) { + ResetForTesting(func() { t.Error("bad parse") }) + testParse(GetCommandLine(), t) +} + +func TestFlagSetParse(t *testing.T) { + testParse(NewFlagSet("test", ContinueOnError), t) +} + +func TestChangedHelper(t *testing.T) { + f := NewFlagSet("changedtest", ContinueOnError) + _ = f.Bool("changed", false, "changed bool") + _ = f.Bool("settrue", true, "true to true") + _ = f.Bool("setfalse", false, "false to false") + _ = f.Bool("unchanged", false, "unchanged bool") + + args := []string{"--changed", "--settrue", "--setfalse=false"} + if err := f.Parse(args); err != nil { + t.Error("f.Parse() = false after Parse") + } + if !f.Changed("changed") { + t.Errorf("--changed wasn't changed!") + } + if !f.Changed("settrue") { + t.Errorf("--settrue wasn't changed!") + } + if !f.Changed("setfalse") { + t.Errorf("--setfalse wasn't changed!") + } + if f.Changed("unchanged") { + t.Errorf("--unchanged was changed!") + } + if f.Changed("invalid") { + t.Errorf("--invalid was changed!") + } + if f.ArgsLenAtDash() != -1 { + t.Errorf("Expected argsLenAtDash: %d but got %d", -1, f.ArgsLenAtDash()) + } +} + +func replaceSeparators(name string, from []string, to string) string { + result := name + for _, sep := range from { + result = strings.Replace(result, sep, to, -1) + } + // Type convert to indicate normalization has been done. + return result +} + +func wordSepNormalizeFunc(f *FlagSet, name string) NormalizedName { + seps := []string{"-", "_"} + name = replaceSeparators(name, seps, ".") + normalizeFlagNameInvocations++ + + return NormalizedName(name) +} + +func testWordSepNormalizedNames(args []string, t *testing.T) { + f := NewFlagSet("normalized", ContinueOnError) + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + withDashFlag := f.Bool("with-dash-flag", false, "bool value") + // Set this after some flags have been added and before others. + f.SetNormalizeFunc(wordSepNormalizeFunc) + withUnderFlag := f.Bool("with_under_flag", false, "bool value") + withBothFlag := f.Bool("with-both_flag", false, "bool value") + if err := f.Parse(args); err != nil { + t.Fatal(err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *withDashFlag != true { + t.Error("withDashFlag flag should be true, is ", *withDashFlag) + } + if *withUnderFlag != true { + t.Error("withUnderFlag flag should be true, is ", *withUnderFlag) + } + if *withBothFlag != true { + t.Error("withBothFlag flag should be true, is ", *withBothFlag) + } +} + +func TestWordSepNormalizedNames(t *testing.T) { + args := []string{ + "--with-dash-flag", + "--with-under-flag", + "--with-both-flag", + } + testWordSepNormalizedNames(args, t) + + args = []string{ + "--with_dash_flag", + "--with_under_flag", + "--with_both_flag", + } + testWordSepNormalizedNames(args, t) + + args = []string{ + "--with-dash_flag", + "--with-under_flag", + "--with-both_flag", + } + testWordSepNormalizedNames(args, t) +} + +func aliasAndWordSepFlagNames(f *FlagSet, name string) NormalizedName { + seps := []string{"-", "_"} + + oldName := replaceSeparators("old-valid_flag", seps, ".") + newName := replaceSeparators("valid-flag", seps, ".") + + name = replaceSeparators(name, seps, ".") + switch name { + case oldName: + name = newName + break + } + + return NormalizedName(name) +} + +func TestCustomNormalizedNames(t *testing.T) { + f := NewFlagSet("normalized", ContinueOnError) + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + + validFlag := f.Bool("valid-flag", false, "bool value") + f.SetNormalizeFunc(aliasAndWordSepFlagNames) + someOtherFlag := f.Bool("some-other-flag", false, "bool value") + + args := []string{"--old_valid_flag", "--some-other_flag"} + if err := f.Parse(args); err != nil { + t.Fatal(err) + } + + if *validFlag != true { + t.Errorf("validFlag is %v even though we set the alias --old_valid_falg", *validFlag) + } + if *someOtherFlag != true { + t.Error("someOtherFlag should be true, is ", *someOtherFlag) + } +} + +// Every flag we add, the name (displayed also in usage) should normalized +func TestNormalizationFuncShouldChangeFlagName(t *testing.T) { + // Test normalization after addition + f := NewFlagSet("normalized", ContinueOnError) + + f.Bool("valid_flag", false, "bool value") + if f.Lookup("valid_flag").Name != "valid_flag" { + t.Error("The new flag should have the name 'valid_flag' instead of ", f.Lookup("valid_flag").Name) + } + + f.SetNormalizeFunc(wordSepNormalizeFunc) + if f.Lookup("valid_flag").Name != "valid.flag" { + t.Error("The new flag should have the name 'valid.flag' instead of ", f.Lookup("valid_flag").Name) + } + + // Test normalization before addition + f = NewFlagSet("normalized", ContinueOnError) + f.SetNormalizeFunc(wordSepNormalizeFunc) + + f.Bool("valid_flag", false, "bool value") + if f.Lookup("valid_flag").Name != "valid.flag" { + t.Error("The new flag should have the name 'valid.flag' instead of ", f.Lookup("valid_flag").Name) + } +} + +// Declare a user-defined flag type. +type flagVar []string + +func (f *flagVar) String() string { + return fmt.Sprint([]string(*f)) +} + +func (f *flagVar) Set(value string) error { + *f = append(*f, value) + return nil +} + +func (f *flagVar) Type() string { + return "flagVar" +} + +func TestUserDefined(t *testing.T) { + var flags FlagSet + flags.Init("test", ContinueOnError) + var v flagVar + flags.VarP(&v, "v", "v", "usage") + if err := flags.Parse([]string{"--v=1", "-v2", "-v", "3"}); err != nil { + t.Error(err) + } + if len(v) != 3 { + t.Fatal("expected 3 args; got ", len(v)) + } + expect := "[1 2 3]" + if v.String() != expect { + t.Errorf("expected value %q got %q", expect, v.String()) + } +} + +func TestSetOutput(t *testing.T) { + var flags FlagSet + var buf bytes.Buffer + flags.SetOutput(&buf) + flags.Init("test", ContinueOnError) + flags.Parse([]string{"--unknown"}) + if out := buf.String(); !strings.Contains(out, "--unknown") { + t.Logf("expected output mentioning unknown; got %q", out) + } +} + +// This tests that one can reset the flags. This still works but not well, and is +// superseded by FlagSet. +func TestChangingArgs(t *testing.T) { + ResetForTesting(func() { t.Fatal("bad parse") }) + oldArgs := os.Args + defer func() { os.Args = oldArgs }() + os.Args = []string{"cmd", "--before", "subcmd"} + before := Bool("before", false, "") + if err := GetCommandLine().Parse(os.Args[1:]); err != nil { + t.Fatal(err) + } + cmd := Arg(0) + os.Args = []string{"subcmd", "--after", "args"} + after := Bool("after", false, "") + Parse() + args := Args() + + if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { + t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) + } +} + +// Test that -help invokes the usage message and returns ErrHelp. +func TestHelp(t *testing.T) { + var helpCalled = false + fs := NewFlagSet("help test", ContinueOnError) + fs.Usage = func() { helpCalled = true } + var flag bool + fs.BoolVar(&flag, "flag", false, "regular flag") + // Regular flag invocation should work + err := fs.Parse([]string{"--flag=true"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + if !flag { + t.Error("flag was not set by --flag") + } + if helpCalled { + t.Error("help called for regular flag") + helpCalled = false // reset for next test + } + // Help flag should work as expected. + err = fs.Parse([]string{"--help"}) + if err == nil { + t.Fatal("error expected") + } + if err != ErrHelp { + t.Fatal("expected ErrHelp; got ", err) + } + if !helpCalled { + t.Fatal("help was not called") + } + // If we define a help flag, that should override. + var help bool + fs.BoolVar(&help, "help", false, "help flag") + helpCalled = false + err = fs.Parse([]string{"--help"}) + if err != nil { + t.Fatal("expected no error for defined --help; got ", err) + } + if helpCalled { + t.Fatal("help was called; should not have been for defined help flag") + } +} + +func TestNoInterspersed(t *testing.T) { + f := NewFlagSet("test", ContinueOnError) + f.SetInterspersed(false) + f.Bool("true", true, "always true") + f.Bool("false", false, "always false") + err := f.Parse([]string{"--true", "break", "--false"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + args := f.Args() + if len(args) != 2 || args[0] != "break" || args[1] != "--false" { + t.Fatal("expected interspersed options/non-options to fail") + } +} + +func TestTermination(t *testing.T) { + f := NewFlagSet("termination", ContinueOnError) + boolFlag := f.BoolP("bool", "l", false, "bool value") + if f.Parsed() { + t.Error("f.Parse() = true before Parse") + } + arg1 := "ls" + arg2 := "-l" + args := []string{ + "--", + arg1, + arg2, + } + f.SetOutput(ioutil.Discard) + if err := f.Parse(args); err != nil { + t.Fatal("expected no error; got ", err) + } + if !f.Parsed() { + t.Error("f.Parse() = false after Parse") + } + if *boolFlag { + t.Error("expected boolFlag=false, got true") + } + if len(f.Args()) != 2 { + t.Errorf("expected 2 arguments, got %d: %v", len(f.Args()), f.Args()) + } + if f.Args()[0] != arg1 { + t.Errorf("expected argument %q got %q", arg1, f.Args()[0]) + } + if f.Args()[1] != arg2 { + t.Errorf("expected argument %q got %q", arg2, f.Args()[1]) + } + if f.ArgsLenAtDash() != 0 { + t.Errorf("expected argsLenAtDash %d got %d", 0, f.ArgsLenAtDash()) + } +} + +func TestDeprecatedFlagInDocs(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + f.Bool("badflag", true, "always true") + f.MarkDeprecated("badflag", "use --good-flag instead") + + out := new(bytes.Buffer) + f.SetOutput(out) + f.PrintDefaults() + + if strings.Contains(out.String(), "badflag") { + t.Errorf("found deprecated flag in usage!") + } +} + +func TestDeprecatedFlagShorthandInDocs(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + name := "noshorthandflag" + f.BoolP(name, "n", true, "always true") + f.MarkShorthandDeprecated("noshorthandflag", fmt.Sprintf("use --%s instead", name)) + + out := new(bytes.Buffer) + f.SetOutput(out) + f.PrintDefaults() + + if strings.Contains(out.String(), "-n,") { + t.Errorf("found deprecated flag shorthand in usage!") + } +} + +func parseReturnStderr(t *testing.T, f *FlagSet, args []string) (string, error) { + oldStderr := os.Stderr + r, w, _ := os.Pipe() + os.Stderr = w + + err := f.Parse(args) + + outC := make(chan string) + // copy the output in a separate goroutine so printing can't block indefinitely + go func() { + var buf bytes.Buffer + io.Copy(&buf, r) + outC <- buf.String() + }() + + w.Close() + os.Stderr = oldStderr + out := <-outC + + return out, err +} + +func TestDeprecatedFlagUsage(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + f.Bool("badflag", true, "always true") + usageMsg := "use --good-flag instead" + f.MarkDeprecated("badflag", usageMsg) + + args := []string{"--badflag"} + out, err := parseReturnStderr(t, f, args) + if err != nil { + t.Fatal("expected no error; got ", err) + } + + if !strings.Contains(out, usageMsg) { + t.Errorf("usageMsg not printed when using a deprecated flag!") + } +} + +func TestDeprecatedFlagShorthandUsage(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + name := "noshorthandflag" + f.BoolP(name, "n", true, "always true") + usageMsg := fmt.Sprintf("use --%s instead", name) + f.MarkShorthandDeprecated(name, usageMsg) + + args := []string{"-n"} + out, err := parseReturnStderr(t, f, args) + if err != nil { + t.Fatal("expected no error; got ", err) + } + + if !strings.Contains(out, usageMsg) { + t.Errorf("usageMsg not printed when using a deprecated flag!") + } +} + +func TestDeprecatedFlagUsageNormalized(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + f.Bool("bad-double_flag", true, "always true") + f.SetNormalizeFunc(wordSepNormalizeFunc) + usageMsg := "use --good-flag instead" + f.MarkDeprecated("bad_double-flag", usageMsg) + + args := []string{"--bad_double_flag"} + out, err := parseReturnStderr(t, f, args) + if err != nil { + t.Fatal("expected no error; got ", err) + } + + if !strings.Contains(out, usageMsg) { + t.Errorf("usageMsg not printed when using a deprecated flag!") + } +} + +// Name normalization function should be called only once on flag addition +func TestMultipleNormalizeFlagNameInvocations(t *testing.T) { + normalizeFlagNameInvocations = 0 + + f := NewFlagSet("normalized", ContinueOnError) + f.SetNormalizeFunc(wordSepNormalizeFunc) + f.Bool("with_under_flag", false, "bool value") + + if normalizeFlagNameInvocations != 1 { + t.Fatal("Expected normalizeFlagNameInvocations to be 1; got ", normalizeFlagNameInvocations) + } +} + +// +func TestHiddenFlagInUsage(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + f.Bool("secretFlag", true, "shhh") + f.MarkHidden("secretFlag") + + out := new(bytes.Buffer) + f.SetOutput(out) + f.PrintDefaults() + + if strings.Contains(out.String(), "secretFlag") { + t.Errorf("found hidden flag in usage!") + } +} + +// +func TestHiddenFlagUsage(t *testing.T) { + f := NewFlagSet("bob", ContinueOnError) + f.Bool("secretFlag", true, "shhh") + f.MarkHidden("secretFlag") + + args := []string{"--secretFlag"} + out, err := parseReturnStderr(t, f, args) + if err != nil { + t.Fatal("expected no error; got ", err) + } + + if strings.Contains(out, "shhh") { + t.Errorf("usage message printed when using a hidden flag!") + } +} + +const defaultOutput = ` --A for bootstrapping, allow 'any' type + --Alongflagname disable bounds checking + -C, --CCC a boolean defaulting to true (default true) + --D path set relative path for local imports + --F number a non-zero number (default 2.7) + --G float a float that defaults to zero + --IP ip IP address with no default + --IPMask ipMask Netmask address with no default + --IPNet ipNet IP network with no default + --Ints intSlice int slice with zero default + --N int a non-zero int (default 27) + --ND1 string[="bar"] a string with NoOptDefVal (default "foo") + --ND2 num[=4321] a num with NoOptDefVal (default 1234) + --StringArray stringArray string array with zero default + --StringSlice stringSlice string slice with zero default + --Z int an int that defaults to zero + --custom custom custom Value implementation + --customP custom a VarP with default (default 10) + --maxT timeout set timeout for dial +` + +// Custom value that satisfies the Value interface. +type customValue int + +func (cv *customValue) String() string { return fmt.Sprintf("%v", *cv) } + +func (cv *customValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *cv = customValue(v) + return err +} + +func (cv *customValue) Type() string { return "custom" } + +func TestPrintDefaults(t *testing.T) { + fs := NewFlagSet("print defaults test", ContinueOnError) + var buf bytes.Buffer + fs.SetOutput(&buf) + fs.Bool("A", false, "for bootstrapping, allow 'any' type") + fs.Bool("Alongflagname", false, "disable bounds checking") + fs.BoolP("CCC", "C", true, "a boolean defaulting to true") + fs.String("D", "", "set relative `path` for local imports") + fs.Float64("F", 2.7, "a non-zero `number`") + fs.Float64("G", 0, "a float that defaults to zero") + fs.Int("N", 27, "a non-zero int") + fs.IntSlice("Ints", []int{}, "int slice with zero default") + fs.IP("IP", nil, "IP address with no default") + fs.IPMask("IPMask", nil, "Netmask address with no default") + fs.IPNet("IPNet", net.IPNet{}, "IP network with no default") + fs.Int("Z", 0, "an int that defaults to zero") + fs.Duration("maxT", 0, "set `timeout` for dial") + fs.String("ND1", "foo", "a string with NoOptDefVal") + fs.Lookup("ND1").NoOptDefVal = "bar" + fs.Int("ND2", 1234, "a `num` with NoOptDefVal") + fs.Lookup("ND2").NoOptDefVal = "4321" + fs.StringSlice("StringSlice", []string{}, "string slice with zero default") + fs.StringArray("StringArray", []string{}, "string array with zero default") + + var cv customValue + fs.Var(&cv, "custom", "custom Value implementation") + + cv2 := customValue(10) + fs.VarP(&cv2, "customP", "", "a VarP with default") + + fs.PrintDefaults() + got := buf.String() + if got != defaultOutput { + fmt.Println("\n" + got) + fmt.Println("\n" + defaultOutput) + t.Errorf("got %q want %q\n", got, defaultOutput) + } +} diff --git a/src/vendor/github.com/spf13/pflag/float32.go b/src/vendor/github.com/spf13/pflag/float32.go new file mode 100644 index 00000000..a243f81f --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/float32.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- float32 Value +type float32Value float32 + +func newFloat32Value(val float32, p *float32) *float32Value { + *p = val + return (*float32Value)(p) +} + +func (f *float32Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 32) + *f = float32Value(v) + return err +} + +func (f *float32Value) Type() string { + return "float32" +} + +func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) } + +func float32Conv(sval string) (interface{}, error) { + v, err := strconv.ParseFloat(sval, 32) + if err != nil { + return 0, err + } + return float32(v), nil +} + +// GetFloat32 return the float32 value of a flag with the given name +func (f *FlagSet) GetFloat32(name string) (float32, error) { + val, err := f.getFlagType(name, "float32", float32Conv) + if err != nil { + return 0, err + } + return val.(float32), nil +} + +// Float32Var defines a float32 flag with specified name, default value, and usage string. +// The argument p points to a float32 variable in which to store the value of the flag. +func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) { + f.VarP(newFloat32Value(value, p), name, "", usage) +} + +// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { + f.VarP(newFloat32Value(value, p), name, shorthand, usage) +} + +// Float32Var defines a float32 flag with specified name, default value, and usage string. +// The argument p points to a float32 variable in which to store the value of the flag. +func Float32Var(p *float32, name string, value float32, usage string) { + CommandLine.VarP(newFloat32Value(value, p), name, "", usage) +} + +// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. +func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { + CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) +} + +// Float32 defines a float32 flag with specified name, default value, and usage string. +// The return value is the address of a float32 variable that stores the value of the flag. +func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { + p := new(float32) + f.Float32VarP(p, name, "", value, usage) + return p +} + +// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { + p := new(float32) + f.Float32VarP(p, name, shorthand, value, usage) + return p +} + +// Float32 defines a float32 flag with specified name, default value, and usage string. +// The return value is the address of a float32 variable that stores the value of the flag. +func Float32(name string, value float32, usage string) *float32 { + return CommandLine.Float32P(name, "", value, usage) +} + +// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. +func Float32P(name, shorthand string, value float32, usage string) *float32 { + return CommandLine.Float32P(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/float64.go b/src/vendor/github.com/spf13/pflag/float64.go new file mode 100644 index 00000000..04b5492a --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/float64.go @@ -0,0 +1,84 @@ +package pflag + +import "strconv" + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) Type() string { + return "float64" +} + +func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) } + +func float64Conv(sval string) (interface{}, error) { + return strconv.ParseFloat(sval, 64) +} + +// GetFloat64 return the float64 value of a flag with the given name +func (f *FlagSet) GetFloat64(name string) (float64, error) { + val, err := f.getFlagType(name, "float64", float64Conv) + if err != nil { + return 0, err + } + return val.(float64), nil +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { + f.VarP(newFloat64Value(value, p), name, "", usage) +} + +// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { + f.VarP(newFloat64Value(value, p), name, shorthand, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, name string, value float64, usage string) { + CommandLine.VarP(newFloat64Value(value, p), name, "", usage) +} + +// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. +func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { + CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { + p := new(float64) + f.Float64VarP(p, name, "", value, usage) + return p +} + +// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { + p := new(float64) + f.Float64VarP(p, name, shorthand, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(name string, value float64, usage string) *float64 { + return CommandLine.Float64P(name, "", value, usage) +} + +// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. +func Float64P(name, shorthand string, value float64, usage string) *float64 { + return CommandLine.Float64P(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/golangflag.go b/src/vendor/github.com/spf13/pflag/golangflag.go new file mode 100644 index 00000000..b056147f --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/golangflag.go @@ -0,0 +1,104 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + goflag "flag" + "fmt" + "reflect" + "strings" +) + +var _ = fmt.Print + +// flagValueWrapper implements pflag.Value around a flag.Value. The main +// difference here is the addition of the Type method that returns a string +// name of the type. As this is generally unknown, we approximate that with +// reflection. +type flagValueWrapper struct { + inner goflag.Value + flagType string +} + +// We are just copying the boolFlag interface out of goflag as that is what +// they use to decide if a flag should get "true" when no arg is given. +type goBoolFlag interface { + goflag.Value + IsBoolFlag() bool +} + +func wrapFlagValue(v goflag.Value) Value { + // If the flag.Value happens to also be a pflag.Value, just use it directly. + if pv, ok := v.(Value); ok { + return pv + } + + pv := &flagValueWrapper{ + inner: v, + } + + t := reflect.TypeOf(v) + if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr { + t = t.Elem() + } + + pv.flagType = strings.TrimSuffix(t.Name(), "Value") + return pv +} + +func (v *flagValueWrapper) String() string { + return v.inner.String() +} + +func (v *flagValueWrapper) Set(s string) error { + return v.inner.Set(s) +} + +func (v *flagValueWrapper) Type() string { + return v.flagType +} + +// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag +// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei +// with both `-v` and `--v` in flags. If the golang flag was more than a single +// character (ex: `verbose`) it will only be accessible via `--verbose` +func PFlagFromGoFlag(goflag *goflag.Flag) *Flag { + // Remember the default value as a string; it won't change. + flag := &Flag{ + Name: goflag.Name, + Usage: goflag.Usage, + Value: wrapFlagValue(goflag.Value), + // Looks like golang flags don't set DefValue correctly :-( + //DefValue: goflag.DefValue, + DefValue: goflag.Value.String(), + } + // Ex: if the golang flag was -v, allow both -v and --v to work + if len(flag.Name) == 1 { + flag.Shorthand = flag.Name + } + if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() { + flag.NoOptDefVal = "true" + } + return flag +} + +// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet +func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) { + if f.Lookup(goflag.Name) != nil { + return + } + newflag := PFlagFromGoFlag(goflag) + f.AddFlag(newflag) +} + +// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet +func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { + if newSet == nil { + return + } + newSet.VisitAll(func(goflag *goflag.Flag) { + f.AddGoFlag(goflag) + }) +} diff --git a/src/vendor/github.com/spf13/pflag/golangflag_test.go b/src/vendor/github.com/spf13/pflag/golangflag_test.go new file mode 100644 index 00000000..77e2d7d8 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/golangflag_test.go @@ -0,0 +1,39 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + goflag "flag" + "testing" +) + +func TestGoflags(t *testing.T) { + goflag.String("stringFlag", "stringFlag", "stringFlag") + goflag.Bool("boolFlag", false, "boolFlag") + + f := NewFlagSet("test", ContinueOnError) + + f.AddGoFlagSet(goflag.CommandLine) + err := f.Parse([]string{"--stringFlag=bob", "--boolFlag"}) + if err != nil { + t.Fatal("expected no error; get", err) + } + + getString, err := f.GetString("stringFlag") + if err != nil { + t.Fatal("expected no error; get", err) + } + if getString != "bob" { + t.Fatalf("expected getString=bob but got getString=%s", getString) + } + + getBool, err := f.GetBool("boolFlag") + if err != nil { + t.Fatal("expected no error; get", err) + } + if getBool != true { + t.Fatalf("expected getBool=true but got getBool=%v", getBool) + } +} diff --git a/src/vendor/github.com/spf13/pflag/int.go b/src/vendor/github.com/spf13/pflag/int.go new file mode 100644 index 00000000..1474b89d --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/int.go @@ -0,0 +1,84 @@ +package pflag + +import "strconv" + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) Type() string { + return "int" +} + +func (i *intValue) String() string { return strconv.Itoa(int(*i)) } + +func intConv(sval string) (interface{}, error) { + return strconv.Atoi(sval) +} + +// GetInt return the int value of a flag with the given name +func (f *FlagSet) GetInt(name string) (int, error) { + val, err := f.getFlagType(name, "int", intConv) + if err != nil { + return 0, err + } + return val.(int), nil +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { + f.VarP(newIntValue(value, p), name, "", usage) +} + +// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { + f.VarP(newIntValue(value, p), name, shorthand, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, name string, value int, usage string) { + CommandLine.VarP(newIntValue(value, p), name, "", usage) +} + +// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. +func IntVarP(p *int, name, shorthand string, value int, usage string) { + CommandLine.VarP(newIntValue(value, p), name, shorthand, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (f *FlagSet) Int(name string, value int, usage string) *int { + p := new(int) + f.IntVarP(p, name, "", value, usage) + return p +} + +// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { + p := new(int) + f.IntVarP(p, name, shorthand, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(name string, value int, usage string) *int { + return CommandLine.IntP(name, "", value, usage) +} + +// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. +func IntP(name, shorthand string, value int, usage string) *int { + return CommandLine.IntP(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/int32.go b/src/vendor/github.com/spf13/pflag/int32.go new file mode 100644 index 00000000..9b95944f --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/int32.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- int32 Value +type int32Value int32 + +func newInt32Value(val int32, p *int32) *int32Value { + *p = val + return (*int32Value)(p) +} + +func (i *int32Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 32) + *i = int32Value(v) + return err +} + +func (i *int32Value) Type() string { + return "int32" +} + +func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int32Conv(sval string) (interface{}, error) { + v, err := strconv.ParseInt(sval, 0, 32) + if err != nil { + return 0, err + } + return int32(v), nil +} + +// GetInt32 return the int32 value of a flag with the given name +func (f *FlagSet) GetInt32(name string) (int32, error) { + val, err := f.getFlagType(name, "int32", int32Conv) + if err != nil { + return 0, err + } + return val.(int32), nil +} + +// Int32Var defines an int32 flag with specified name, default value, and usage string. +// The argument p points to an int32 variable in which to store the value of the flag. +func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) { + f.VarP(newInt32Value(value, p), name, "", usage) +} + +// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) { + f.VarP(newInt32Value(value, p), name, shorthand, usage) +} + +// Int32Var defines an int32 flag with specified name, default value, and usage string. +// The argument p points to an int32 variable in which to store the value of the flag. +func Int32Var(p *int32, name string, value int32, usage string) { + CommandLine.VarP(newInt32Value(value, p), name, "", usage) +} + +// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. +func Int32VarP(p *int32, name, shorthand string, value int32, usage string) { + CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage) +} + +// Int32 defines an int32 flag with specified name, default value, and usage string. +// The return value is the address of an int32 variable that stores the value of the flag. +func (f *FlagSet) Int32(name string, value int32, usage string) *int32 { + p := new(int32) + f.Int32VarP(p, name, "", value, usage) + return p +} + +// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 { + p := new(int32) + f.Int32VarP(p, name, shorthand, value, usage) + return p +} + +// Int32 defines an int32 flag with specified name, default value, and usage string. +// The return value is the address of an int32 variable that stores the value of the flag. +func Int32(name string, value int32, usage string) *int32 { + return CommandLine.Int32P(name, "", value, usage) +} + +// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. +func Int32P(name, shorthand string, value int32, usage string) *int32 { + return CommandLine.Int32P(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/int64.go b/src/vendor/github.com/spf13/pflag/int64.go new file mode 100644 index 00000000..0026d781 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/int64.go @@ -0,0 +1,84 @@ +package pflag + +import "strconv" + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) Type() string { + return "int64" +} + +func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int64Conv(sval string) (interface{}, error) { + return strconv.ParseInt(sval, 0, 64) +} + +// GetInt64 return the int64 value of a flag with the given name +func (f *FlagSet) GetInt64(name string) (int64, error) { + val, err := f.getFlagType(name, "int64", int64Conv) + if err != nil { + return 0, err + } + return val.(int64), nil +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { + f.VarP(newInt64Value(value, p), name, "", usage) +} + +// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { + f.VarP(newInt64Value(value, p), name, shorthand, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, name string, value int64, usage string) { + CommandLine.VarP(newInt64Value(value, p), name, "", usage) +} + +// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. +func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { + CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { + p := new(int64) + f.Int64VarP(p, name, "", value, usage) + return p +} + +// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { + p := new(int64) + f.Int64VarP(p, name, shorthand, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(name string, value int64, usage string) *int64 { + return CommandLine.Int64P(name, "", value, usage) +} + +// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. +func Int64P(name, shorthand string, value int64, usage string) *int64 { + return CommandLine.Int64P(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/int8.go b/src/vendor/github.com/spf13/pflag/int8.go new file mode 100644 index 00000000..4da92228 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/int8.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- int8 Value +type int8Value int8 + +func newInt8Value(val int8, p *int8) *int8Value { + *p = val + return (*int8Value)(p) +} + +func (i *int8Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 8) + *i = int8Value(v) + return err +} + +func (i *int8Value) Type() string { + return "int8" +} + +func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int8Conv(sval string) (interface{}, error) { + v, err := strconv.ParseInt(sval, 0, 8) + if err != nil { + return 0, err + } + return int8(v), nil +} + +// GetInt8 return the int8 value of a flag with the given name +func (f *FlagSet) GetInt8(name string) (int8, error) { + val, err := f.getFlagType(name, "int8", int8Conv) + if err != nil { + return 0, err + } + return val.(int8), nil +} + +// Int8Var defines an int8 flag with specified name, default value, and usage string. +// The argument p points to an int8 variable in which to store the value of the flag. +func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) { + f.VarP(newInt8Value(value, p), name, "", usage) +} + +// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) { + f.VarP(newInt8Value(value, p), name, shorthand, usage) +} + +// Int8Var defines an int8 flag with specified name, default value, and usage string. +// The argument p points to an int8 variable in which to store the value of the flag. +func Int8Var(p *int8, name string, value int8, usage string) { + CommandLine.VarP(newInt8Value(value, p), name, "", usage) +} + +// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. +func Int8VarP(p *int8, name, shorthand string, value int8, usage string) { + CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage) +} + +// Int8 defines an int8 flag with specified name, default value, and usage string. +// The return value is the address of an int8 variable that stores the value of the flag. +func (f *FlagSet) Int8(name string, value int8, usage string) *int8 { + p := new(int8) + f.Int8VarP(p, name, "", value, usage) + return p +} + +// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 { + p := new(int8) + f.Int8VarP(p, name, shorthand, value, usage) + return p +} + +// Int8 defines an int8 flag with specified name, default value, and usage string. +// The return value is the address of an int8 variable that stores the value of the flag. +func Int8(name string, value int8, usage string) *int8 { + return CommandLine.Int8P(name, "", value, usage) +} + +// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. +func Int8P(name, shorthand string, value int8, usage string) *int8 { + return CommandLine.Int8P(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/int_slice.go b/src/vendor/github.com/spf13/pflag/int_slice.go new file mode 100644 index 00000000..1e7c9edd --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/int_slice.go @@ -0,0 +1,128 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- intSlice Value +type intSliceValue struct { + value *[]int + changed bool +} + +func newIntSliceValue(val []int, p *[]int) *intSliceValue { + isv := new(intSliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *intSliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *intSliceValue) Type() string { + return "intSlice" +} + +func (s *intSliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func intSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int{}, nil + } + ss := strings.Split(val, ",") + out := make([]int, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetIntSlice return the []int value of a flag with the given name +func (f *FlagSet) GetIntSlice(name string) ([]int, error) { + val, err := f.getFlagType(name, "intSlice", intSliceConv) + if err != nil { + return []int{}, err + } + return val.([]int), nil +} + +// IntSliceVar defines a intSlice flag with specified name, default value, and usage string. +// The argument p points to a []int variable in which to store the value of the flag. +func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) { + f.VarP(newIntSliceValue(value, p), name, "", usage) +} + +// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { + f.VarP(newIntSliceValue(value, p), name, shorthand, usage) +} + +// IntSliceVar defines a int[] flag with specified name, default value, and usage string. +// The argument p points to a int[] variable in which to store the value of the flag. +func IntSliceVar(p *[]int, name string, value []int, usage string) { + CommandLine.VarP(newIntSliceValue(value, p), name, "", usage) +} + +// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. +func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { + CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage) +} + +// IntSlice defines a []int flag with specified name, default value, and usage string. +// The return value is the address of a []int variable that stores the value of the flag. +func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int { + p := []int{} + f.IntSliceVarP(&p, name, "", value, usage) + return &p +} + +// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int { + p := []int{} + f.IntSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// IntSlice defines a []int flag with specified name, default value, and usage string. +// The return value is the address of a []int variable that stores the value of the flag. +func IntSlice(name string, value []int, usage string) *[]int { + return CommandLine.IntSliceP(name, "", value, usage) +} + +// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. +func IntSliceP(name, shorthand string, value []int, usage string) *[]int { + return CommandLine.IntSliceP(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/int_slice_test.go b/src/vendor/github.com/spf13/pflag/int_slice_test.go new file mode 100644 index 00000000..745aecb9 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/int_slice_test.go @@ -0,0 +1,165 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "fmt" + "strconv" + "strings" + "testing" +) + +func setUpISFlagSet(isp *[]int) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.IntSliceVar(isp, "is", []int{}, "Command separated list!") + return f +} + +func setUpISFlagSetWithDefault(isp *[]int) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.IntSliceVar(isp, "is", []int{0, 1}, "Command separated list!") + return f +} + +func TestEmptyIS(t *testing.T) { + var is []int + f := setUpISFlagSet(&is) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getIS, err := f.GetIntSlice("is") + if err != nil { + t.Fatal("got an error from GetIntSlice():", err) + } + if len(getIS) != 0 { + t.Fatalf("got is %v with len=%d but expected length=0", getIS, len(getIS)) + } +} + +func TestIS(t *testing.T) { + var is []int + f := setUpISFlagSet(&is) + + vals := []string{"1", "2", "4", "3"} + arg := fmt.Sprintf("--is=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + d, err := strconv.Atoi(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %s but got: %d", i, vals[i], v) + } + } + getIS, err := f.GetIntSlice("is") + if err != nil { + t.Fatalf("got error: %v", err) + } + for i, v := range getIS { + d, err := strconv.Atoi(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %s but got: %d from GetIntSlice", i, vals[i], v) + } + } +} + +func TestISDefault(t *testing.T) { + var is []int + f := setUpISFlagSetWithDefault(&is) + + vals := []string{"0", "1"} + + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + d, err := strconv.Atoi(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %d but got: %d", i, d, v) + } + } + + getIS, err := f.GetIntSlice("is") + if err != nil { + t.Fatal("got an error from GetIntSlice():", err) + } + for i, v := range getIS { + d, err := strconv.Atoi(vals[i]) + if err != nil { + t.Fatal("got an error from GetIntSlice():", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %d from GetIntSlice but got: %d", i, d, v) + } + } +} + +func TestISWithDefault(t *testing.T) { + var is []int + f := setUpISFlagSetWithDefault(&is) + + vals := []string{"1", "2"} + arg := fmt.Sprintf("--is=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + d, err := strconv.Atoi(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %d but got: %d", i, d, v) + } + } + + getIS, err := f.GetIntSlice("is") + if err != nil { + t.Fatal("got an error from GetIntSlice():", err) + } + for i, v := range getIS { + d, err := strconv.Atoi(vals[i]) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %d from GetIntSlice but got: %d", i, d, v) + } + } +} + +func TestISCalledTwice(t *testing.T) { + var is []int + f := setUpISFlagSet(&is) + + in := []string{"1,2", "3"} + expected := []int{1, 2, 3} + argfmt := "--is=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + if expected[i] != v { + t.Fatalf("expected is[%d] to be %d but got: %d", i, expected[i], v) + } + } +} diff --git a/src/vendor/github.com/spf13/pflag/ip.go b/src/vendor/github.com/spf13/pflag/ip.go new file mode 100644 index 00000000..88a17430 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/ip.go @@ -0,0 +1,96 @@ +package pflag + +import ( + "fmt" + "net" + "strings" +) + +var _ = strings.TrimSpace + +// -- net.IP value +type ipValue net.IP + +func newIPValue(val net.IP, p *net.IP) *ipValue { + *p = val + return (*ipValue)(p) +} + +func (i *ipValue) String() string { return net.IP(*i).String() } +func (i *ipValue) Set(s string) error { + ip := net.ParseIP(strings.TrimSpace(s)) + if ip == nil { + return fmt.Errorf("failed to parse IP: %q", s) + } + *i = ipValue(ip) + return nil +} + +func (i *ipValue) Type() string { + return "ip" +} + +func ipConv(sval string) (interface{}, error) { + ip := net.ParseIP(sval) + if ip != nil { + return ip, nil + } + return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) +} + +// GetIP return the net.IP value of a flag with the given name +func (f *FlagSet) GetIP(name string) (net.IP, error) { + val, err := f.getFlagType(name, "ip", ipConv) + if err != nil { + return nil, err + } + return val.(net.IP), nil +} + +// IPVar defines an net.IP flag with specified name, default value, and usage string. +// The argument p points to an net.IP variable in which to store the value of the flag. +func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) { + f.VarP(newIPValue(value, p), name, "", usage) +} + +// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { + f.VarP(newIPValue(value, p), name, shorthand, usage) +} + +// IPVar defines an net.IP flag with specified name, default value, and usage string. +// The argument p points to an net.IP variable in which to store the value of the flag. +func IPVar(p *net.IP, name string, value net.IP, usage string) { + CommandLine.VarP(newIPValue(value, p), name, "", usage) +} + +// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. +func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { + CommandLine.VarP(newIPValue(value, p), name, shorthand, usage) +} + +// IP defines an net.IP flag with specified name, default value, and usage string. +// The return value is the address of an net.IP variable that stores the value of the flag. +func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP { + p := new(net.IP) + f.IPVarP(p, name, "", value, usage) + return p +} + +// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP { + p := new(net.IP) + f.IPVarP(p, name, shorthand, value, usage) + return p +} + +// IP defines an net.IP flag with specified name, default value, and usage string. +// The return value is the address of an net.IP variable that stores the value of the flag. +func IP(name string, value net.IP, usage string) *net.IP { + return CommandLine.IPP(name, "", value, usage) +} + +// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. +func IPP(name, shorthand string, value net.IP, usage string) *net.IP { + return CommandLine.IPP(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/ip_test.go b/src/vendor/github.com/spf13/pflag/ip_test.go new file mode 100644 index 00000000..1fec50e4 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/ip_test.go @@ -0,0 +1,63 @@ +package pflag + +import ( + "fmt" + "net" + "os" + "testing" +) + +func setUpIP(ip *net.IP) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.IPVar(ip, "address", net.ParseIP("0.0.0.0"), "IP Address") + return f +} + +func TestIP(t *testing.T) { + testCases := []struct { + input string + success bool + expected string + }{ + {"0.0.0.0", true, "0.0.0.0"}, + {" 0.0.0.0 ", true, "0.0.0.0"}, + {"1.2.3.4", true, "1.2.3.4"}, + {"127.0.0.1", true, "127.0.0.1"}, + {"255.255.255.255", true, "255.255.255.255"}, + {"", false, ""}, + {"0", false, ""}, + {"localhost", false, ""}, + {"0.0.0", false, ""}, + {"0.0.0.", false, ""}, + {"0.0.0.0.", false, ""}, + {"0.0.0.256", false, ""}, + {"0 . 0 . 0 . 0", false, ""}, + } + + devnull, _ := os.Open(os.DevNull) + os.Stderr = devnull + for i := range testCases { + var addr net.IP + f := setUpIP(&addr) + + tc := &testCases[i] + + arg := fmt.Sprintf("--address=%s", tc.input) + err := f.Parse([]string{arg}) + if err != nil && tc.success == true { + t.Errorf("expected success, got %q", err) + continue + } else if err == nil && tc.success == false { + t.Errorf("expected failure") + continue + } else if tc.success { + ip, err := f.GetIP("address") + if err != nil { + t.Errorf("Got error trying to fetch the IP flag: %v", err) + } + if ip.String() != tc.expected { + t.Errorf("expected %q, got %q", tc.expected, ip.String()) + } + } + } +} diff --git a/src/vendor/github.com/spf13/pflag/ipmask.go b/src/vendor/github.com/spf13/pflag/ipmask.go new file mode 100644 index 00000000..5bd44bd2 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/ipmask.go @@ -0,0 +1,122 @@ +package pflag + +import ( + "fmt" + "net" + "strconv" +) + +// -- net.IPMask value +type ipMaskValue net.IPMask + +func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue { + *p = val + return (*ipMaskValue)(p) +} + +func (i *ipMaskValue) String() string { return net.IPMask(*i).String() } +func (i *ipMaskValue) Set(s string) error { + ip := ParseIPv4Mask(s) + if ip == nil { + return fmt.Errorf("failed to parse IP mask: %q", s) + } + *i = ipMaskValue(ip) + return nil +} + +func (i *ipMaskValue) Type() string { + return "ipMask" +} + +// ParseIPv4Mask written in IP form (e.g. 255.255.255.0). +// This function should really belong to the net package. +func ParseIPv4Mask(s string) net.IPMask { + mask := net.ParseIP(s) + if mask == nil { + if len(s) != 8 { + return nil + } + // net.IPMask.String() actually outputs things like ffffff00 + // so write a horrible parser for that as well :-( + m := []int{} + for i := 0; i < 4; i++ { + b := "0x" + s[2*i:2*i+2] + d, err := strconv.ParseInt(b, 0, 0) + if err != nil { + return nil + } + m = append(m, int(d)) + } + s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3]) + mask = net.ParseIP(s) + if mask == nil { + return nil + } + } + return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) +} + +func parseIPv4Mask(sval string) (interface{}, error) { + mask := ParseIPv4Mask(sval) + if mask == nil { + return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval) + } + return mask, nil +} + +// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name +func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) { + val, err := f.getFlagType(name, "ipMask", parseIPv4Mask) + if err != nil { + return nil, err + } + return val.(net.IPMask), nil +} + +// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. +// The argument p points to an net.IPMask variable in which to store the value of the flag. +func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { + f.VarP(newIPMaskValue(value, p), name, "", usage) +} + +// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { + f.VarP(newIPMaskValue(value, p), name, shorthand, usage) +} + +// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. +// The argument p points to an net.IPMask variable in which to store the value of the flag. +func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { + CommandLine.VarP(newIPMaskValue(value, p), name, "", usage) +} + +// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. +func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { + CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage) +} + +// IPMask defines an net.IPMask flag with specified name, default value, and usage string. +// The return value is the address of an net.IPMask variable that stores the value of the flag. +func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask { + p := new(net.IPMask) + f.IPMaskVarP(p, name, "", value, usage) + return p +} + +// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { + p := new(net.IPMask) + f.IPMaskVarP(p, name, shorthand, value, usage) + return p +} + +// IPMask defines an net.IPMask flag with specified name, default value, and usage string. +// The return value is the address of an net.IPMask variable that stores the value of the flag. +func IPMask(name string, value net.IPMask, usage string) *net.IPMask { + return CommandLine.IPMaskP(name, "", value, usage) +} + +// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash. +func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { + return CommandLine.IPMaskP(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/ipnet.go b/src/vendor/github.com/spf13/pflag/ipnet.go new file mode 100644 index 00000000..149b764b --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/ipnet.go @@ -0,0 +1,100 @@ +package pflag + +import ( + "fmt" + "net" + "strings" +) + +// IPNet adapts net.IPNet for use as a flag. +type ipNetValue net.IPNet + +func (ipnet ipNetValue) String() string { + n := net.IPNet(ipnet) + return n.String() +} + +func (ipnet *ipNetValue) Set(value string) error { + _, n, err := net.ParseCIDR(strings.TrimSpace(value)) + if err != nil { + return err + } + *ipnet = ipNetValue(*n) + return nil +} + +func (*ipNetValue) Type() string { + return "ipNet" +} + +var _ = strings.TrimSpace + +func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue { + *p = val + return (*ipNetValue)(p) +} + +func ipNetConv(sval string) (interface{}, error) { + _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) + if err == nil { + return *n, nil + } + return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval) +} + +// GetIPNet return the net.IPNet value of a flag with the given name +func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) { + val, err := f.getFlagType(name, "ipNet", ipNetConv) + if err != nil { + return net.IPNet{}, err + } + return val.(net.IPNet), nil +} + +// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. +// The argument p points to an net.IPNet variable in which to store the value of the flag. +func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { + f.VarP(newIPNetValue(value, p), name, "", usage) +} + +// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { + f.VarP(newIPNetValue(value, p), name, shorthand, usage) +} + +// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. +// The argument p points to an net.IPNet variable in which to store the value of the flag. +func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { + CommandLine.VarP(newIPNetValue(value, p), name, "", usage) +} + +// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. +func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { + CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage) +} + +// IPNet defines an net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of an net.IPNet variable that stores the value of the flag. +func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet { + p := new(net.IPNet) + f.IPNetVarP(p, name, "", value, usage) + return p +} + +// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { + p := new(net.IPNet) + f.IPNetVarP(p, name, shorthand, value, usage) + return p +} + +// IPNet defines an net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of an net.IPNet variable that stores the value of the flag. +func IPNet(name string, value net.IPNet, usage string) *net.IPNet { + return CommandLine.IPNetP(name, "", value, usage) +} + +// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. +func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { + return CommandLine.IPNetP(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/ipnet_test.go b/src/vendor/github.com/spf13/pflag/ipnet_test.go new file mode 100644 index 00000000..335b6fa1 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/ipnet_test.go @@ -0,0 +1,70 @@ +package pflag + +import ( + "fmt" + "net" + "os" + "testing" +) + +func setUpIPNet(ip *net.IPNet) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + _, def, _ := net.ParseCIDR("0.0.0.0/0") + f.IPNetVar(ip, "address", *def, "IP Address") + return f +} + +func TestIPNet(t *testing.T) { + testCases := []struct { + input string + success bool + expected string + }{ + {"0.0.0.0/0", true, "0.0.0.0/0"}, + {" 0.0.0.0/0 ", true, "0.0.0.0/0"}, + {"1.2.3.4/8", true, "1.0.0.0/8"}, + {"127.0.0.1/16", true, "127.0.0.0/16"}, + {"255.255.255.255/19", true, "255.255.224.0/19"}, + {"255.255.255.255/32", true, "255.255.255.255/32"}, + {"", false, ""}, + {"/0", false, ""}, + {"0", false, ""}, + {"0/0", false, ""}, + {"localhost/0", false, ""}, + {"0.0.0/4", false, ""}, + {"0.0.0./8", false, ""}, + {"0.0.0.0./12", false, ""}, + {"0.0.0.256/16", false, ""}, + {"0.0.0.0 /20", false, ""}, + {"0.0.0.0/ 24", false, ""}, + {"0 . 0 . 0 . 0 / 28", false, ""}, + {"0.0.0.0/33", false, ""}, + } + + devnull, _ := os.Open(os.DevNull) + os.Stderr = devnull + for i := range testCases { + var addr net.IPNet + f := setUpIPNet(&addr) + + tc := &testCases[i] + + arg := fmt.Sprintf("--address=%s", tc.input) + err := f.Parse([]string{arg}) + if err != nil && tc.success == true { + t.Errorf("expected success, got %q", err) + continue + } else if err == nil && tc.success == false { + t.Errorf("expected failure") + continue + } else if tc.success { + ip, err := f.GetIPNet("address") + if err != nil { + t.Errorf("Got error trying to fetch the IP flag: %v", err) + } + if ip.String() != tc.expected { + t.Errorf("expected %q, got %q", tc.expected, ip.String()) + } + } + } +} diff --git a/src/vendor/github.com/spf13/pflag/string.go b/src/vendor/github.com/spf13/pflag/string.go new file mode 100644 index 00000000..04e0a26f --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/string.go @@ -0,0 +1,80 @@ +package pflag + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} +func (s *stringValue) Type() string { + return "string" +} + +func (s *stringValue) String() string { return string(*s) } + +func stringConv(sval string) (interface{}, error) { + return sval, nil +} + +// GetString return the string value of a flag with the given name +func (f *FlagSet) GetString(name string) (string, error) { + val, err := f.getFlagType(name, "string", stringConv) + if err != nil { + return "", err + } + return val.(string), nil +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { + f.VarP(newStringValue(value, p), name, "", usage) +} + +// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { + f.VarP(newStringValue(value, p), name, shorthand, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, name string, value string, usage string) { + CommandLine.VarP(newStringValue(value, p), name, "", usage) +} + +// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. +func StringVarP(p *string, name, shorthand string, value string, usage string) { + CommandLine.VarP(newStringValue(value, p), name, shorthand, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (f *FlagSet) String(name string, value string, usage string) *string { + p := new(string) + f.StringVarP(p, name, "", value, usage) + return p +} + +// StringP is like String, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { + p := new(string) + f.StringVarP(p, name, shorthand, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(name string, value string, usage string) *string { + return CommandLine.StringP(name, "", value, usage) +} + +// StringP is like String, but accepts a shorthand letter that can be used after a single dash. +func StringP(name, shorthand string, value string, usage string) *string { + return CommandLine.StringP(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/string_array.go b/src/vendor/github.com/spf13/pflag/string_array.go new file mode 100644 index 00000000..f320f2ec --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/string_array.go @@ -0,0 +1,110 @@ +package pflag + +import ( + "fmt" + "strings" +) + +var _ = fmt.Fprint + +// -- stringArray Value +type stringArrayValue struct { + value *[]string + changed bool +} + +func newStringArrayValue(val []string, p *[]string) *stringArrayValue { + ssv := new(stringArrayValue) + ssv.value = p + *ssv.value = val + return ssv +} + +func (s *stringArrayValue) Set(val string) error { + if !s.changed { + *s.value = []string{val} + s.changed = true + } else { + *s.value = append(*s.value, val) + } + return nil +} + +func (s *stringArrayValue) Type() string { + return "stringArray" +} + +func (s *stringArrayValue) String() string { + str, _ := writeAsCSV(*s.value) + return "[" + str + "]" +} + +func stringArrayConv(sval string) (interface{}, error) { + sval = strings.Trim(sval, "[]") + // An empty string would cause a array with one (empty) string + if len(sval) == 0 { + return []string{}, nil + } + return readAsCSV(sval) +} + +// GetStringArray return the []string value of a flag with the given name +func (f *FlagSet) GetStringArray(name string) ([]string, error) { + val, err := f.getFlagType(name, "stringArray", stringArrayConv) + if err != nil { + return []string{}, err + } + return val.([]string), nil +} + +// StringArrayVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) { + f.VarP(newStringArrayValue(value, p), name, "", usage) +} + +// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { + f.VarP(newStringArrayValue(value, p), name, shorthand, usage) +} + +// StringArrayVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringArrayVar(p *[]string, name string, value []string, usage string) { + CommandLine.VarP(newStringArrayValue(value, p), name, "", usage) +} + +// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. +func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { + CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage) +} + +// StringArray defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string { + p := []string{} + f.StringArrayVarP(&p, name, "", value, usage) + return &p +} + +// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string { + p := []string{} + f.StringArrayVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringArray defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringArray(name string, value []string, usage string) *[]string { + return CommandLine.StringArrayP(name, "", value, usage) +} + +// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. +func StringArrayP(name, shorthand string, value []string, usage string) *[]string { + return CommandLine.StringArrayP(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/string_array_test.go b/src/vendor/github.com/spf13/pflag/string_array_test.go new file mode 100644 index 00000000..3e3eb74f --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/string_array_test.go @@ -0,0 +1,194 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "fmt" + "testing" +) + +func setUpSAFlagSet(sap *[]string) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.StringArrayVar(sap, "sa", []string{}, "Command separated list!") + return f +} + +func setUpSAFlagSetWithDefault(sap *[]string) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.StringArrayVar(sap, "sa", []string{"default", "values"}, "Command separated list!") + return f +} + +func TestEmptySA(t *testing.T) { + var sa []string + f := setUpSAFlagSet(&sa) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getSA, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("got an error from GetStringArray():", err) + } + if len(getSA) != 0 { + t.Fatalf("got sa %v with len=%d but expected length=0", getSA, len(getSA)) + } +} + +func TestEmptySAValue(t *testing.T) { + var sa []string + f := setUpSAFlagSet(&sa) + err := f.Parse([]string{"--sa="}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getSA, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("got an error from GetStringArray():", err) + } + if len(getSA) != 0 { + t.Fatalf("got sa %v with len=%d but expected length=0", getSA, len(getSA)) + } +} + +func TestSADefault(t *testing.T) { + var sa []string + f := setUpSAFlagSetWithDefault(&sa) + + vals := []string{"default", "values"} + + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range sa { + if vals[i] != v { + t.Fatalf("expected sa[%d] to be %s but got: %s", i, vals[i], v) + } + } + + getSA, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("got an error from GetStringArray():", err) + } + for i, v := range getSA { + if vals[i] != v { + t.Fatalf("expected sa[%d] to be %s from GetStringArray but got: %s", i, vals[i], v) + } + } +} + +func TestSAWithDefault(t *testing.T) { + var sa []string + f := setUpSAFlagSetWithDefault(&sa) + + val := "one" + arg := fmt.Sprintf("--sa=%s", val) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(sa) != 1 { + t.Fatalf("expected number of values to be %d but %d", 1, len(sa)) + } + + if sa[0] != val { + t.Fatalf("expected value to be %s but got: %s", sa[0], val) + } + + getSA, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("got an error from GetStringArray():", err) + } + + if len(getSA) != 1 { + t.Fatalf("expected number of values to be %d but %d", 1, len(getSA)) + } + + if getSA[0] != val { + t.Fatalf("expected value to be %s but got: %s", getSA[0], val) + } +} + +func TestSACalledTwice(t *testing.T) { + var sa []string + f := setUpSAFlagSet(&sa) + + in := []string{"one", "two"} + expected := []string{"one", "two"} + argfmt := "--sa=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(sa) { + t.Fatalf("expected number of sa to be %d but got: %d", len(expected), len(sa)) + } + for i, v := range sa { + if expected[i] != v { + t.Fatalf("expected sa[%d] to be %s but got: %s", i, expected[i], v) + } + } + + values, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(values) { + t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(sa)) + } + for i, v := range values { + if expected[i] != v { + t.Fatalf("expected got sa[%d] to be %s but got: %s", i, expected[i], v) + } + } +} + +func TestSAWithSpecialChar(t *testing.T) { + var sa []string + f := setUpSAFlagSet(&sa) + + in := []string{"one,two", `"three"`, `"four,five",six`, "seven eight"} + expected := []string{"one,two", `"three"`, `"four,five",six`, "seven eight"} + argfmt := "--sa=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + arg3 := fmt.Sprintf(argfmt, in[2]) + arg4 := fmt.Sprintf(argfmt, in[3]) + err := f.Parse([]string{arg1, arg2, arg3, arg4}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(sa) { + t.Fatalf("expected number of sa to be %d but got: %d", len(expected), len(sa)) + } + for i, v := range sa { + if expected[i] != v { + t.Fatalf("expected sa[%d] to be %s but got: %s", i, expected[i], v) + } + } + + values, err := f.GetStringArray("sa") + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(values) { + t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(values)) + } + for i, v := range values { + if expected[i] != v { + t.Fatalf("expected got sa[%d] to be %s but got: %s", i, expected[i], v) + } + } +} diff --git a/src/vendor/github.com/spf13/pflag/string_slice.go b/src/vendor/github.com/spf13/pflag/string_slice.go new file mode 100644 index 00000000..51e3c5d2 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/string_slice.go @@ -0,0 +1,132 @@ +package pflag + +import ( + "bytes" + "encoding/csv" + "fmt" + "strings" +) + +var _ = fmt.Fprint + +// -- stringSlice Value +type stringSliceValue struct { + value *[]string + changed bool +} + +func newStringSliceValue(val []string, p *[]string) *stringSliceValue { + ssv := new(stringSliceValue) + ssv.value = p + *ssv.value = val + return ssv +} + +func readAsCSV(val string) ([]string, error) { + if val == "" { + return []string{}, nil + } + stringReader := strings.NewReader(val) + csvReader := csv.NewReader(stringReader) + return csvReader.Read() +} + +func writeAsCSV(vals []string) (string, error) { + b := &bytes.Buffer{} + w := csv.NewWriter(b) + err := w.Write(vals) + if err != nil { + return "", err + } + w.Flush() + return strings.TrimSuffix(b.String(), fmt.Sprintln()), nil +} + +func (s *stringSliceValue) Set(val string) error { + v, err := readAsCSV(val) + if err != nil { + return err + } + if !s.changed { + *s.value = v + } else { + *s.value = append(*s.value, v...) + } + s.changed = true + return nil +} + +func (s *stringSliceValue) Type() string { + return "stringSlice" +} + +func (s *stringSliceValue) String() string { + str, _ := writeAsCSV(*s.value) + return "[" + str + "]" +} + +func stringSliceConv(sval string) (interface{}, error) { + sval = strings.Trim(sval, "[]") + // An empty string would cause a slice with one (empty) string + if len(sval) == 0 { + return []string{}, nil + } + return readAsCSV(sval) +} + +// GetStringSlice return the []string value of a flag with the given name +func (f *FlagSet) GetStringSlice(name string) ([]string, error) { + val, err := f.getFlagType(name, "stringSlice", stringSliceConv) + if err != nil { + return []string{}, err + } + return val.([]string), nil +} + +// StringSliceVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { + f.VarP(newStringSliceValue(value, p), name, "", usage) +} + +// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { + f.VarP(newStringSliceValue(value, p), name, shorthand, usage) +} + +// StringSliceVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +func StringSliceVar(p *[]string, name string, value []string, usage string) { + CommandLine.VarP(newStringSliceValue(value, p), name, "", usage) +} + +// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. +func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { + CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage) +} + +// StringSlice defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { + p := []string{} + f.StringSliceVarP(&p, name, "", value, usage) + return &p +} + +// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string { + p := []string{} + f.StringSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringSlice defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +func StringSlice(name string, value []string, usage string) *[]string { + return CommandLine.StringSliceP(name, "", value, usage) +} + +// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. +func StringSliceP(name, shorthand string, value []string, usage string) *[]string { + return CommandLine.StringSliceP(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/string_slice_test.go b/src/vendor/github.com/spf13/pflag/string_slice_test.go new file mode 100644 index 00000000..26118c70 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/string_slice_test.go @@ -0,0 +1,215 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "fmt" + "strings" + "testing" +) + +func setUpSSFlagSet(ssp *[]string) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.StringSliceVar(ssp, "ss", []string{}, "Command separated list!") + return f +} + +func setUpSSFlagSetWithDefault(ssp *[]string) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.StringSliceVar(ssp, "ss", []string{"default", "values"}, "Command separated list!") + return f +} + +func TestEmptySS(t *testing.T) { + var ss []string + f := setUpSSFlagSet(&ss) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getSS, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("got an error from GetStringSlice():", err) + } + if len(getSS) != 0 { + t.Fatalf("got ss %v with len=%d but expected length=0", getSS, len(getSS)) + } +} + +func TestEmptySSValue(t *testing.T) { + var ss []string + f := setUpSSFlagSet(&ss) + err := f.Parse([]string{"--ss="}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getSS, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("got an error from GetStringSlice():", err) + } + if len(getSS) != 0 { + t.Fatalf("got ss %v with len=%d but expected length=0", getSS, len(getSS)) + } +} + +func TestSS(t *testing.T) { + var ss []string + f := setUpSSFlagSet(&ss) + + vals := []string{"one", "two", "4", "3"} + arg := fmt.Sprintf("--ss=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range ss { + if vals[i] != v { + t.Fatalf("expected ss[%d] to be %s but got: %s", i, vals[i], v) + } + } + + getSS, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("got an error from GetStringSlice():", err) + } + for i, v := range getSS { + if vals[i] != v { + t.Fatalf("expected ss[%d] to be %s from GetStringSlice but got: %s", i, vals[i], v) + } + } +} + +func TestSSDefault(t *testing.T) { + var ss []string + f := setUpSSFlagSetWithDefault(&ss) + + vals := []string{"default", "values"} + + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range ss { + if vals[i] != v { + t.Fatalf("expected ss[%d] to be %s but got: %s", i, vals[i], v) + } + } + + getSS, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("got an error from GetStringSlice():", err) + } + for i, v := range getSS { + if vals[i] != v { + t.Fatalf("expected ss[%d] to be %s from GetStringSlice but got: %s", i, vals[i], v) + } + } +} + +func TestSSWithDefault(t *testing.T) { + var ss []string + f := setUpSSFlagSetWithDefault(&ss) + + vals := []string{"one", "two", "4", "3"} + arg := fmt.Sprintf("--ss=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range ss { + if vals[i] != v { + t.Fatalf("expected ss[%d] to be %s but got: %s", i, vals[i], v) + } + } + + getSS, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("got an error from GetStringSlice():", err) + } + for i, v := range getSS { + if vals[i] != v { + t.Fatalf("expected ss[%d] to be %s from GetStringSlice but got: %s", i, vals[i], v) + } + } +} + +func TestSSCalledTwice(t *testing.T) { + var ss []string + f := setUpSSFlagSet(&ss) + + in := []string{"one,two", "three"} + expected := []string{"one", "two", "three"} + argfmt := "--ss=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(ss) { + t.Fatalf("expected number of ss to be %d but got: %d", len(expected), len(ss)) + } + for i, v := range ss { + if expected[i] != v { + t.Fatalf("expected ss[%d] to be %s but got: %s", i, expected[i], v) + } + } + + values, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(values) { + t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(ss)) + } + for i, v := range values { + if expected[i] != v { + t.Fatalf("expected got ss[%d] to be %s but got: %s", i, expected[i], v) + } + } +} + +func TestSSWithComma(t *testing.T) { + var ss []string + f := setUpSSFlagSet(&ss) + + in := []string{`"one,two"`, `"three"`, `"four,five",six`} + expected := []string{"one,two", "three", "four,five", "six"} + argfmt := "--ss=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + arg3 := fmt.Sprintf(argfmt, in[2]) + err := f.Parse([]string{arg1, arg2, arg3}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(ss) { + t.Fatalf("expected number of ss to be %d but got: %d", len(expected), len(ss)) + } + for i, v := range ss { + if expected[i] != v { + t.Fatalf("expected ss[%d] to be %s but got: %s", i, expected[i], v) + } + } + + values, err := f.GetStringSlice("ss") + if err != nil { + t.Fatal("expected no error; got", err) + } + + if len(expected) != len(values) { + t.Fatalf("expected number of values to be %d but got: %d", len(expected), len(values)) + } + for i, v := range values { + if expected[i] != v { + t.Fatalf("expected got ss[%d] to be %s but got: %s", i, expected[i], v) + } + } +} diff --git a/src/vendor/github.com/spf13/pflag/uint.go b/src/vendor/github.com/spf13/pflag/uint.go new file mode 100644 index 00000000..dcbc2b75 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/uint.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) Type() string { + return "uint" +} + +func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uintConv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 0) + if err != nil { + return 0, err + } + return uint(v), nil +} + +// GetUint return the uint value of a flag with the given name +func (f *FlagSet) GetUint(name string) (uint, error) { + val, err := f.getFlagType(name, "uint", uintConv) + if err != nil { + return 0, err + } + return val.(uint), nil +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { + f.VarP(newUintValue(value, p), name, "", usage) +} + +// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { + f.VarP(newUintValue(value, p), name, shorthand, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, name string, value uint, usage string) { + CommandLine.VarP(newUintValue(value, p), name, "", usage) +} + +// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. +func UintVarP(p *uint, name, shorthand string, value uint, usage string) { + CommandLine.VarP(newUintValue(value, p), name, shorthand, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint(name string, value uint, usage string) *uint { + p := new(uint) + f.UintVarP(p, name, "", value, usage) + return p +} + +// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { + p := new(uint) + f.UintVarP(p, name, shorthand, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(name string, value uint, usage string) *uint { + return CommandLine.UintP(name, "", value, usage) +} + +// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. +func UintP(name, shorthand string, value uint, usage string) *uint { + return CommandLine.UintP(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/uint16.go b/src/vendor/github.com/spf13/pflag/uint16.go new file mode 100644 index 00000000..7e9914ed --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/uint16.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint16 value +type uint16Value uint16 + +func newUint16Value(val uint16, p *uint16) *uint16Value { + *p = val + return (*uint16Value)(p) +} + +func (i *uint16Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 16) + *i = uint16Value(v) + return err +} + +func (i *uint16Value) Type() string { + return "uint16" +} + +func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint16Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 16) + if err != nil { + return 0, err + } + return uint16(v), nil +} + +// GetUint16 return the uint16 value of a flag with the given name +func (f *FlagSet) GetUint16(name string) (uint16, error) { + val, err := f.getFlagType(name, "uint16", uint16Conv) + if err != nil { + return 0, err + } + return val.(uint16), nil +} + +// Uint16Var defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) { + f.VarP(newUint16Value(value, p), name, "", usage) +} + +// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { + f.VarP(newUint16Value(value, p), name, shorthand, usage) +} + +// Uint16Var defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func Uint16Var(p *uint16, name string, value uint16, usage string) { + CommandLine.VarP(newUint16Value(value, p), name, "", usage) +} + +// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. +func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { + CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage) +} + +// Uint16 defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 { + p := new(uint16) + f.Uint16VarP(p, name, "", value, usage) + return p +} + +// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 { + p := new(uint16) + f.Uint16VarP(p, name, shorthand, value, usage) + return p +} + +// Uint16 defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint16(name string, value uint16, usage string) *uint16 { + return CommandLine.Uint16P(name, "", value, usage) +} + +// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. +func Uint16P(name, shorthand string, value uint16, usage string) *uint16 { + return CommandLine.Uint16P(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/uint32.go b/src/vendor/github.com/spf13/pflag/uint32.go new file mode 100644 index 00000000..d8024539 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/uint32.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint32 value +type uint32Value uint32 + +func newUint32Value(val uint32, p *uint32) *uint32Value { + *p = val + return (*uint32Value)(p) +} + +func (i *uint32Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 32) + *i = uint32Value(v) + return err +} + +func (i *uint32Value) Type() string { + return "uint32" +} + +func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint32Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 32) + if err != nil { + return 0, err + } + return uint32(v), nil +} + +// GetUint32 return the uint32 value of a flag with the given name +func (f *FlagSet) GetUint32(name string) (uint32, error) { + val, err := f.getFlagType(name, "uint32", uint32Conv) + if err != nil { + return 0, err + } + return val.(uint32), nil +} + +// Uint32Var defines a uint32 flag with specified name, default value, and usage string. +// The argument p points to a uint32 variable in which to store the value of the flag. +func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) { + f.VarP(newUint32Value(value, p), name, "", usage) +} + +// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { + f.VarP(newUint32Value(value, p), name, shorthand, usage) +} + +// Uint32Var defines a uint32 flag with specified name, default value, and usage string. +// The argument p points to a uint32 variable in which to store the value of the flag. +func Uint32Var(p *uint32, name string, value uint32, usage string) { + CommandLine.VarP(newUint32Value(value, p), name, "", usage) +} + +// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. +func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { + CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage) +} + +// Uint32 defines a uint32 flag with specified name, default value, and usage string. +// The return value is the address of a uint32 variable that stores the value of the flag. +func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 { + p := new(uint32) + f.Uint32VarP(p, name, "", value, usage) + return p +} + +// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 { + p := new(uint32) + f.Uint32VarP(p, name, shorthand, value, usage) + return p +} + +// Uint32 defines a uint32 flag with specified name, default value, and usage string. +// The return value is the address of a uint32 variable that stores the value of the flag. +func Uint32(name string, value uint32, usage string) *uint32 { + return CommandLine.Uint32P(name, "", value, usage) +} + +// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. +func Uint32P(name, shorthand string, value uint32, usage string) *uint32 { + return CommandLine.Uint32P(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/uint64.go b/src/vendor/github.com/spf13/pflag/uint64.go new file mode 100644 index 00000000..f62240f2 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/uint64.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) Type() string { + return "uint64" +} + +func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint64Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 64) + if err != nil { + return 0, err + } + return uint64(v), nil +} + +// GetUint64 return the uint64 value of a flag with the given name +func (f *FlagSet) GetUint64(name string) (uint64, error) { + val, err := f.getFlagType(name, "uint64", uint64Conv) + if err != nil { + return 0, err + } + return val.(uint64), nil +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { + f.VarP(newUint64Value(value, p), name, "", usage) +} + +// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { + f.VarP(newUint64Value(value, p), name, shorthand, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, name string, value uint64, usage string) { + CommandLine.VarP(newUint64Value(value, p), name, "", usage) +} + +// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. +func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { + CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64VarP(p, name, "", value, usage) + return p +} + +// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64VarP(p, name, shorthand, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(name string, value uint64, usage string) *uint64 { + return CommandLine.Uint64P(name, "", value, usage) +} + +// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. +func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { + return CommandLine.Uint64P(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/uint8.go b/src/vendor/github.com/spf13/pflag/uint8.go new file mode 100644 index 00000000..bb0e83c1 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/uint8.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- uint8 Value +type uint8Value uint8 + +func newUint8Value(val uint8, p *uint8) *uint8Value { + *p = val + return (*uint8Value)(p) +} + +func (i *uint8Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 8) + *i = uint8Value(v) + return err +} + +func (i *uint8Value) Type() string { + return "uint8" +} + +func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + +func uint8Conv(sval string) (interface{}, error) { + v, err := strconv.ParseUint(sval, 0, 8) + if err != nil { + return 0, err + } + return uint8(v), nil +} + +// GetUint8 return the uint8 value of a flag with the given name +func (f *FlagSet) GetUint8(name string) (uint8, error) { + val, err := f.getFlagType(name, "uint8", uint8Conv) + if err != nil { + return 0, err + } + return val.(uint8), nil +} + +// Uint8Var defines a uint8 flag with specified name, default value, and usage string. +// The argument p points to a uint8 variable in which to store the value of the flag. +func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) { + f.VarP(newUint8Value(value, p), name, "", usage) +} + +// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { + f.VarP(newUint8Value(value, p), name, shorthand, usage) +} + +// Uint8Var defines a uint8 flag with specified name, default value, and usage string. +// The argument p points to a uint8 variable in which to store the value of the flag. +func Uint8Var(p *uint8, name string, value uint8, usage string) { + CommandLine.VarP(newUint8Value(value, p), name, "", usage) +} + +// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. +func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { + CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage) +} + +// Uint8 defines a uint8 flag with specified name, default value, and usage string. +// The return value is the address of a uint8 variable that stores the value of the flag. +func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 { + p := new(uint8) + f.Uint8VarP(p, name, "", value, usage) + return p +} + +// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 { + p := new(uint8) + f.Uint8VarP(p, name, shorthand, value, usage) + return p +} + +// Uint8 defines a uint8 flag with specified name, default value, and usage string. +// The return value is the address of a uint8 variable that stores the value of the flag. +func Uint8(name string, value uint8, usage string) *uint8 { + return CommandLine.Uint8P(name, "", value, usage) +} + +// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. +func Uint8P(name, shorthand string, value uint8, usage string) *uint8 { + return CommandLine.Uint8P(name, shorthand, value, usage) +} diff --git a/src/vendor/github.com/spf13/pflag/verify/all.sh b/src/vendor/github.com/spf13/pflag/verify/all.sh new file mode 100755 index 00000000..739f89c0 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/verify/all.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +set -o errexit +set -o nounset +set -o pipefail + +ROOT=$(dirname "${BASH_SOURCE}")/.. + +# Some useful colors. +if [[ -z "${color_start-}" ]]; then + declare -r color_start="\033[" + declare -r color_red="${color_start}0;31m" + declare -r color_yellow="${color_start}0;33m" + declare -r color_green="${color_start}0;32m" + declare -r color_norm="${color_start}0m" +fi + +SILENT=true + +function is-excluded { + for e in $EXCLUDE; do + if [[ $1 -ef ${BASH_SOURCE} ]]; then + return + fi + if [[ $1 -ef "$ROOT/hack/$e" ]]; then + return + fi + done + return 1 +} + +while getopts ":v" opt; do + case $opt in + v) + SILENT=false + ;; + \?) + echo "Invalid flag: -$OPTARG" >&2 + exit 1 + ;; + esac +done + +if $SILENT ; then + echo "Running in the silent mode, run with -v if you want to see script logs." +fi + +EXCLUDE="all.sh" + +ret=0 +for t in `ls $ROOT/verify/*.sh` +do + if is-excluded $t ; then + echo "Skipping $t" + continue + fi + if $SILENT ; then + echo -e "Verifying $t" + if bash "$t" &> /dev/null; then + echo -e "${color_green}SUCCESS${color_norm}" + else + echo -e "${color_red}FAILED${color_norm}" + ret=1 + fi + else + bash "$t" || ret=1 + fi +done +exit $ret diff --git a/src/vendor/github.com/spf13/pflag/verify/gofmt.sh b/src/vendor/github.com/spf13/pflag/verify/gofmt.sh new file mode 100755 index 00000000..f66acf80 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/verify/gofmt.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +set -o errexit +set -o nounset +set -o pipefail + +ROOT=$(dirname "${BASH_SOURCE}")/.. + +pushd "${ROOT}" > /dev/null + +GOFMT=${GOFMT:-"gofmt"} +bad_files=$(find . -name '*.go' | xargs $GOFMT -s -l) +if [[ -n "${bad_files}" ]]; then + echo "!!! '$GOFMT' needs to be run on the following files: " + echo "${bad_files}" + exit 1 +fi + +# ex: ts=2 sw=2 et filetype=sh diff --git a/src/vendor/github.com/spf13/pflag/verify/golint.sh b/src/vendor/github.com/spf13/pflag/verify/golint.sh new file mode 100755 index 00000000..685c1778 --- /dev/null +++ b/src/vendor/github.com/spf13/pflag/verify/golint.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +ROOT=$(dirname "${BASH_SOURCE}")/.. +GOLINT=${GOLINT:-"golint"} + +pushd "${ROOT}" > /dev/null + bad_files=$($GOLINT -min_confidence=0.9 ./...) + if [[ -n "${bad_files}" ]]; then + echo "!!! '$GOLINT' problems: " + echo "${bad_files}" + exit 1 + fi +popd > /dev/null + +# ex: ts=2 sw=2 et filetype=sh diff --git a/src/vendor/github.com/stretchr/testify/.gitignore b/src/vendor/github.com/stretchr/testify/.gitignore new file mode 100644 index 00000000..5aacdb7c --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +.DS_Store diff --git a/src/vendor/github.com/stretchr/testify/.travis.yml b/src/vendor/github.com/stretchr/testify/.travis.yml new file mode 100644 index 00000000..ffb9e0dd --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/.travis.yml @@ -0,0 +1,16 @@ +language: go + +sudo: false + +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - tip + +script: + - go test -v ./... diff --git a/src/vendor/github.com/stretchr/testify/Godeps/Godeps.json b/src/vendor/github.com/stretchr/testify/Godeps/Godeps.json new file mode 100644 index 00000000..df032ac3 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/Godeps/Godeps.json @@ -0,0 +1,23 @@ +{ + "ImportPath": "github.com/stretchr/testify", + "GoVersion": "go1.5", + "GodepVersion": "v74", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "github.com/davecgh/go-spew/spew", + "Comment": "v1.0.0-3-g6d21280", + "Rev": "6d212800a42e8ab5c146b8ace3490ee17e5225f9" + }, + { + "ImportPath": "github.com/pmezard/go-difflib/difflib", + "Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d" + }, + { + "ImportPath": "github.com/stretchr/objx", + "Rev": "cbeaeb16a013161a98496fad62933b1d21786672" + } + ] +} diff --git a/src/vendor/github.com/stretchr/testify/Godeps/Readme b/src/vendor/github.com/stretchr/testify/Godeps/Readme new file mode 100644 index 00000000..4cdaa53d --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/src/vendor/github.com/stretchr/testify/LICENCE.txt b/src/vendor/github.com/stretchr/testify/LICENCE.txt new file mode 100644 index 00000000..473b670a --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/LICENCE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell + +Please consider promoting this project if you find it useful. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/github.com/stretchr/testify/LICENSE b/src/vendor/github.com/stretchr/testify/LICENSE new file mode 100644 index 00000000..473b670a --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell + +Please consider promoting this project if you find it useful. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/github.com/stretchr/testify/README.md b/src/vendor/github.com/stretchr/testify/README.md new file mode 100644 index 00000000..e57b1811 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/README.md @@ -0,0 +1,332 @@ +Testify - Thou Shalt Write Tests +================================ + +[![Build Status](https://travis-ci.org/stretchr/testify.svg)](https://travis-ci.org/stretchr/testify) [![Go Report Card](https://goreportcard.com/badge/github.com/stretchr/testify)](https://goreportcard.com/report/github.com/stretchr/testify) [![GoDoc](https://godoc.org/github.com/stretchr/testify?status.svg)](https://godoc.org/github.com/stretchr/testify) + +Go code (golang) set of packages that provide many tools for testifying that your code will behave as you intend. + +Features include: + + * [Easy assertions](#assert-package) + * [Mocking](#mock-package) + * [HTTP response trapping](#http-package) + * [Testing suite interfaces and functions](#suite-package) + +Get started: + + * Install testify with [one line of code](#installation), or [update it with another](#staying-up-to-date) + * For an introduction to writing test code in Go, see http://golang.org/doc/code.html#Testing + * Check out the API Documentation http://godoc.org/github.com/stretchr/testify + * To make your testing life easier, check out our other project, [gorc](http://github.com/stretchr/gorc) + * A little about [Test-Driven Development (TDD)](http://en.wikipedia.org/wiki/Test-driven_development) + + + +[`assert`](http://godoc.org/github.com/stretchr/testify/assert "API documentation") package +------------------------------------------------------------------------------------------- + +The `assert` package provides some helpful methods that allow you to write better test code in Go. + + * Prints friendly, easy to read failure descriptions + * Allows for very readable code + * Optionally annotate each assertion with a message + +See it in action: + +```go +package yours + +import ( + "testing" + "github.com/stretchr/testify/assert" +) + +func TestSomething(t *testing.T) { + + // assert equality + assert.Equal(t, 123, 123, "they should be equal") + + // assert inequality + assert.NotEqual(t, 123, 456, "they should not be equal") + + // assert for nil (good for errors) + assert.Nil(t, object) + + // assert for not nil (good when you expect something) + if assert.NotNil(t, object) { + + // now we know that object isn't nil, we are safe to make + // further assertions without causing any errors + assert.Equal(t, "Something", object.Value) + + } + +} +``` + + * Every assert func takes the `testing.T` object as the first argument. This is how it writes the errors out through the normal `go test` capabilities. + * Every assert func returns a bool indicating whether the assertion was successful or not, this is useful for if you want to go on making further assertions under certain conditions. + +if you assert many times, use the below: + +```go +package yours + +import ( + "testing" + "github.com/stretchr/testify/assert" +) + +func TestSomething(t *testing.T) { + assert := assert.New(t) + + // assert equality + assert.Equal(123, 123, "they should be equal") + + // assert inequality + assert.NotEqual(123, 456, "they should not be equal") + + // assert for nil (good for errors) + assert.Nil(object) + + // assert for not nil (good when you expect something) + if assert.NotNil(object) { + + // now we know that object isn't nil, we are safe to make + // further assertions without causing any errors + assert.Equal("Something", object.Value) + } +} +``` + +[`require`](http://godoc.org/github.com/stretchr/testify/require "API documentation") package +--------------------------------------------------------------------------------------------- + +The `require` package provides same global functions as the `assert` package, but instead of returning a boolean result they terminate current test. + +See [t.FailNow](http://golang.org/pkg/testing/#T.FailNow) for details. + + +[`http`](http://godoc.org/github.com/stretchr/testify/http "API documentation") package +--------------------------------------------------------------------------------------- + +The `http` package contains test objects useful for testing code that relies on the `net/http` package. Check out the [(deprecated) API documentation for the `http` package](http://godoc.org/github.com/stretchr/testify/http). + +We recommend you use [httptest](http://golang.org/pkg/net/http/httptest) instead. + +[`mock`](http://godoc.org/github.com/stretchr/testify/mock "API documentation") package +---------------------------------------------------------------------------------------- + +The `mock` package provides a mechanism for easily writing mock objects that can be used in place of real objects when writing test code. + +An example test function that tests a piece of code that relies on an external object `testObj`, can setup expectations (testify) and assert that they indeed happened: + +```go +package yours + +import ( + "testing" + "github.com/stretchr/testify/mock" +) + +/* + Test objects +*/ + +// MyMockedObject is a mocked object that implements an interface +// that describes an object that the code I am testing relies on. +type MyMockedObject struct{ + mock.Mock +} + +// DoSomething is a method on MyMockedObject that implements some interface +// and just records the activity, and returns what the Mock object tells it to. +// +// In the real object, this method would do something useful, but since this +// is a mocked object - we're just going to stub it out. +// +// NOTE: This method is not being tested here, code that uses this object is. +func (m *MyMockedObject) DoSomething(number int) (bool, error) { + + args := m.Called(number) + return args.Bool(0), args.Error(1) + +} + +/* + Actual test functions +*/ + +// TestSomething is an example of how to use our test object to +// make assertions about some target code we are testing. +func TestSomething(t *testing.T) { + + // create an instance of our test object + testObj := new(MyMockedObject) + + // setup expectations + testObj.On("DoSomething", 123).Return(true, nil) + + // call the code we are testing + targetFuncThatDoesSomethingWithObj(testObj) + + // assert that the expectations were met + testObj.AssertExpectations(t) + +} +``` + +For more information on how to write mock code, check out the [API documentation for the `mock` package](http://godoc.org/github.com/stretchr/testify/mock). + +You can use the [mockery tool](http://github.com/vektra/mockery) to autogenerate the mock code against an interface as well, making using mocks much quicker. + +[`suite`](http://godoc.org/github.com/stretchr/testify/suite "API documentation") package +----------------------------------------------------------------------------------------- + +The `suite` package provides functionality that you might be used to from more common object oriented languages. With it, you can build a testing suite as a struct, build setup/teardown methods and testing methods on your struct, and run them with 'go test' as per normal. + +An example suite is shown below: + +```go +// Basic imports +import ( + "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" +) + +// Define the suite, and absorb the built-in basic suite +// functionality from testify - including a T() method which +// returns the current testing context +type ExampleTestSuite struct { + suite.Suite + VariableThatShouldStartAtFive int +} + +// Make sure that VariableThatShouldStartAtFive is set to five +// before each test +func (suite *ExampleTestSuite) SetupTest() { + suite.VariableThatShouldStartAtFive = 5 +} + +// All methods that begin with "Test" are run as tests within a +// suite. +func (suite *ExampleTestSuite) TestExample() { + assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive) +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestExampleTestSuite(t *testing.T) { + suite.Run(t, new(ExampleTestSuite)) +} +``` + +For a more complete example, using all of the functionality provided by the suite package, look at our [example testing suite](https://github.com/stretchr/testify/blob/master/suite/suite_test.go) + +For more information on writing suites, check out the [API documentation for the `suite` package](http://godoc.org/github.com/stretchr/testify/suite). + +`Suite` object has assertion methods: + +```go +// Basic imports +import ( + "testing" + "github.com/stretchr/testify/suite" +) + +// Define the suite, and absorb the built-in basic suite +// functionality from testify - including assertion methods. +type ExampleTestSuite struct { + suite.Suite + VariableThatShouldStartAtFive int +} + +// Make sure that VariableThatShouldStartAtFive is set to five +// before each test +func (suite *ExampleTestSuite) SetupTest() { + suite.VariableThatShouldStartAtFive = 5 +} + +// All methods that begin with "Test" are run as tests within a +// suite. +func (suite *ExampleTestSuite) TestExample() { + suite.Equal(suite.VariableThatShouldStartAtFive, 5) +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestExampleTestSuite(t *testing.T) { + suite.Run(t, new(ExampleTestSuite)) +} +``` + +------ + +Installation +============ + +To install Testify, use `go get`: + + * Latest version: go get github.com/stretchr/testify + * Specific version: go get gopkg.in/stretchr/testify.v1 + +This will then make the following packages available to you: + + github.com/stretchr/testify/assert + github.com/stretchr/testify/mock + github.com/stretchr/testify/http + +Import the `testify/assert` package into your code using this template: + +```go +package yours + +import ( + "testing" + "github.com/stretchr/testify/assert" +) + +func TestSomething(t *testing.T) { + + assert.True(t, true, "True is true!") + +} +``` + +------ + +Staying up to date +================== + +To update Testify to the latest version, use `go get -u github.com/stretchr/testify`. + +------ + +Version History +=============== + + * 1.0 - New package versioning strategy adopted. + +------ + +Contributing +============ + +Please feel free to submit issues, fork the repository and send pull requests! + +When submitting an issue, we ask that you please include a complete test function that demonstrates the issue. Extra credit for those using Testify to write the test code that demonstrates it. + +------ + +Licence +======= +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell + +Please consider promoting this project if you find it useful. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/github.com/stretchr/testify/_codegen/main.go b/src/vendor/github.com/stretchr/testify/_codegen/main.go new file mode 100644 index 00000000..7cc2531e --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/_codegen/main.go @@ -0,0 +1,293 @@ +// This program reads all assertion functions from the assert package and +// automatically generates the corersponding requires and forwarded assertions + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/build" + "go/doc" + "go/format" + "go/importer" + "go/parser" + "go/token" + "go/types" + "io" + "io/ioutil" + "log" + "os" + "path" + "strings" + "text/template" + + "github.com/ernesto-jimenez/gogen/imports" +) + +var ( + pkg = flag.String("assert-path", "github.com/stretchr/testify/assert", "Path to the assert package") + outputPkg = flag.String("output-package", "", "package for the resulting code") + tmplFile = flag.String("template", "", "What file to load the function template from") + out = flag.String("out", "", "What file to write the source code to") +) + +func main() { + flag.Parse() + + scope, docs, err := parsePackageSource(*pkg) + if err != nil { + log.Fatal(err) + } + + importer, funcs, err := analyzeCode(scope, docs) + if err != nil { + log.Fatal(err) + } + + if err := generateCode(importer, funcs); err != nil { + log.Fatal(err) + } +} + +func generateCode(importer imports.Importer, funcs []testFunc) error { + buff := bytes.NewBuffer(nil) + + tmplHead, tmplFunc, err := parseTemplates() + if err != nil { + return err + } + + // Generate header + if err := tmplHead.Execute(buff, struct { + Name string + Imports map[string]string + }{ + *outputPkg, + importer.Imports(), + }); err != nil { + return err + } + + // Generate funcs + for _, fn := range funcs { + buff.Write([]byte("\n\n")) + if err := tmplFunc.Execute(buff, &fn); err != nil { + return err + } + } + + code, err := format.Source(buff.Bytes()) + if err != nil { + return err + } + + // Write file + output, err := outputFile() + if err != nil { + return err + } + defer output.Close() + _, err = io.Copy(output, bytes.NewReader(code)) + return err +} + +func parseTemplates() (*template.Template, *template.Template, error) { + tmplHead, err := template.New("header").Parse(headerTemplate) + if err != nil { + return nil, nil, err + } + if *tmplFile != "" { + f, err := ioutil.ReadFile(*tmplFile) + if err != nil { + return nil, nil, err + } + funcTemplate = string(f) + } + tmpl, err := template.New("function").Parse(funcTemplate) + if err != nil { + return nil, nil, err + } + return tmplHead, tmpl, nil +} + +func outputFile() (*os.File, error) { + filename := *out + if filename == "-" || (filename == "" && *tmplFile == "") { + return os.Stdout, nil + } + if filename == "" { + filename = strings.TrimSuffix(strings.TrimSuffix(*tmplFile, ".tmpl"), ".go") + ".go" + } + return os.Create(filename) +} + +// analyzeCode takes the types scope and the docs and returns the import +// information and information about all the assertion functions. +func analyzeCode(scope *types.Scope, docs *doc.Package) (imports.Importer, []testFunc, error) { + testingT := scope.Lookup("TestingT").Type().Underlying().(*types.Interface) + + importer := imports.New(*outputPkg) + var funcs []testFunc + // Go through all the top level functions + for _, fdocs := range docs.Funcs { + // Find the function + obj := scope.Lookup(fdocs.Name) + + fn, ok := obj.(*types.Func) + if !ok { + continue + } + // Check function signatuer has at least two arguments + sig := fn.Type().(*types.Signature) + if sig.Params().Len() < 2 { + continue + } + // Check first argument is of type testingT + first, ok := sig.Params().At(0).Type().(*types.Named) + if !ok { + continue + } + firstType, ok := first.Underlying().(*types.Interface) + if !ok { + continue + } + if !types.Implements(firstType, testingT) { + continue + } + + funcs = append(funcs, testFunc{*outputPkg, fdocs, fn}) + importer.AddImportsFrom(sig.Params()) + } + return importer, funcs, nil +} + +// parsePackageSource returns the types scope and the package documentation from the pa +func parsePackageSource(pkg string) (*types.Scope, *doc.Package, error) { + pd, err := build.Import(pkg, ".", 0) + if err != nil { + return nil, nil, err + } + + fset := token.NewFileSet() + files := make(map[string]*ast.File) + fileList := make([]*ast.File, len(pd.GoFiles)) + for i, fname := range pd.GoFiles { + src, err := ioutil.ReadFile(path.Join(pd.SrcRoot, pd.ImportPath, fname)) + if err != nil { + return nil, nil, err + } + f, err := parser.ParseFile(fset, fname, src, parser.ParseComments|parser.AllErrors) + if err != nil { + return nil, nil, err + } + files[fname] = f + fileList[i] = f + } + + cfg := types.Config{ + Importer: importer.Default(), + } + info := types.Info{ + Defs: make(map[*ast.Ident]types.Object), + } + tp, err := cfg.Check(pkg, fset, fileList, &info) + if err != nil { + return nil, nil, err + } + + scope := tp.Scope() + + ap, _ := ast.NewPackage(fset, files, nil, nil) + docs := doc.New(ap, pkg, 0) + + return scope, docs, nil +} + +type testFunc struct { + CurrentPkg string + DocInfo *doc.Func + TypeInfo *types.Func +} + +func (f *testFunc) Qualifier(p *types.Package) string { + if p == nil || p.Name() == f.CurrentPkg { + return "" + } + return p.Name() +} + +func (f *testFunc) Params() string { + sig := f.TypeInfo.Type().(*types.Signature) + params := sig.Params() + p := "" + comma := "" + to := params.Len() + var i int + + if sig.Variadic() { + to-- + } + for i = 1; i < to; i++ { + param := params.At(i) + p += fmt.Sprintf("%s%s %s", comma, param.Name(), types.TypeString(param.Type(), f.Qualifier)) + comma = ", " + } + if sig.Variadic() { + param := params.At(params.Len() - 1) + p += fmt.Sprintf("%s%s ...%s", comma, param.Name(), types.TypeString(param.Type().(*types.Slice).Elem(), f.Qualifier)) + } + return p +} + +func (f *testFunc) ForwardedParams() string { + sig := f.TypeInfo.Type().(*types.Signature) + params := sig.Params() + p := "" + comma := "" + to := params.Len() + var i int + + if sig.Variadic() { + to-- + } + for i = 1; i < to; i++ { + param := params.At(i) + p += fmt.Sprintf("%s%s", comma, param.Name()) + comma = ", " + } + if sig.Variadic() { + param := params.At(params.Len() - 1) + p += fmt.Sprintf("%s%s...", comma, param.Name()) + } + return p +} + +func (f *testFunc) Comment() string { + return "// " + strings.Replace(strings.TrimSpace(f.DocInfo.Doc), "\n", "\n// ", -1) +} + +func (f *testFunc) CommentWithoutT(receiver string) string { + search := fmt.Sprintf("assert.%s(t, ", f.DocInfo.Name) + replace := fmt.Sprintf("%s.%s(", receiver, f.DocInfo.Name) + return strings.Replace(f.Comment(), search, replace, -1) +} + +var headerTemplate = `/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND +*/ + +package {{.Name}} + +import ( +{{range $path, $name := .Imports}} + {{$name}} "{{$path}}"{{end}} +) +` + +var funcTemplate = `{{.Comment}} +func (fwd *AssertionsForwarder) {{.DocInfo.Name}}({{.Params}}) bool { + return assert.{{.DocInfo.Name}}({{.ForwardedParams}}) +}` diff --git a/src/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/src/vendor/github.com/stretchr/testify/assert/assertion_forward.go new file mode 100644 index 00000000..29b71d17 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -0,0 +1,346 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package assert + +import ( + http "net/http" + url "net/url" + time "time" +) + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { + return Condition(a.t, comp, msgAndArgs...) +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") +// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") +// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + return Contains(a.t, s, contains, msgAndArgs...) +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Empty(obj) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + return Empty(a.t, object, msgAndArgs...) +} + +// Equal asserts that two objects are equal. +// +// a.Equal(123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return Equal(a.t, expected, actual, msgAndArgs...) +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualError(err, expectedErrorString, "An error was expected") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { + return EqualError(a.t, theError, errString, msgAndArgs...) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return EqualValues(a.t, expected, actual, msgAndArgs...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Error(err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { + return Error(a.t, err, msgAndArgs...) +} + +// Exactly asserts that two objects are equal is value and type. +// +// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return Exactly(a.t, expected, actual, msgAndArgs...) +} + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { + return Fail(a.t, failureMessage, msgAndArgs...) +} + +// FailNow fails test +func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { + return FailNow(a.t, failureMessage, msgAndArgs...) +} + +// False asserts that the specified value is false. +// +// a.False(myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + return False(a.t, value, msgAndArgs...) +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { + return HTTPBodyContains(a.t, handler, method, url, values, str) +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { + return HTTPBodyNotContains(a.t, handler, method, url, values, str) +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool { + return HTTPError(a.t, handler, method, url, values) +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool { + return HTTPRedirect(a.t, handler, method, url, values) +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool { + return HTTPSuccess(a.t, handler, method, url, values) +} + +// Implements asserts that an object is implemented by the specified interface. +// +// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject") +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return Implements(a.t, interfaceObject, object, msgAndArgs...) +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// a.InDelta(math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return IsType(a.t, expectedType, object, msgAndArgs...) +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { + return JSONEq(a.t, expected, actual, msgAndArgs...) +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// a.Len(mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { + return Len(a.t, object, length, msgAndArgs...) +} + +// Nil asserts that the specified object is nil. +// +// a.Nil(err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + return Nil(a.t, object, msgAndArgs...) +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { + return NoError(a.t, err, msgAndArgs...) +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") +// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + return NotContains(a.t, s, contains, msgAndArgs...) +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { + return NotEmpty(a.t, object, msgAndArgs...) +} + +// NotEqual asserts that the specified values are NOT equal. +// +// a.NotEqual(obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return NotEqual(a.t, expected, actual, msgAndArgs...) +} + +// NotNil asserts that the specified object is not nil. +// +// a.NotNil(err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { + return NotNil(a.t, object, msgAndArgs...) +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanics(func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return NotPanics(a.t, f, msgAndArgs...) +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + return NotRegexp(a.t, rx, str, msgAndArgs...) +} + +// NotZero asserts that i is not the zero value for its type and returns the truth. +func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { + return NotZero(a.t, i, msgAndArgs...) +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panics(func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return Panics(a.t, f, msgAndArgs...) +} + +// Regexp asserts that a specified regexp matches a string. +// +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + return Regexp(a.t, rx, str, msgAndArgs...) +} + +// True asserts that the specified value is true. +// +// a.True(myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + return True(a.t, value, msgAndArgs...) +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + +// Zero asserts that i is the zero value for its type and returns the truth. +func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { + return Zero(a.t, i, msgAndArgs...) +} diff --git a/src/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/src/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl new file mode 100644 index 00000000..99f9acfb --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl @@ -0,0 +1,4 @@ +{{.CommentWithoutT "a"}} +func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { + return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) +} diff --git a/src/vendor/github.com/stretchr/testify/assert/assertions.go b/src/vendor/github.com/stretchr/testify/assert/assertions.go new file mode 100644 index 00000000..4aaa4b05 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/assert/assertions.go @@ -0,0 +1,1060 @@ +package assert + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "math" + "reflect" + "regexp" + "runtime" + "strings" + "time" + "unicode" + "unicode/utf8" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" +) + +func init() { + spew.Config.SortKeys = true +} + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Errorf(format string, args ...interface{}) +} + +// Comparison a custom function that returns true on success and false on failure +type Comparison func() (success bool) + +/* + Helper functions +*/ + +// ObjectsAreEqual determines if two objects are considered equal. +// +// This function does no assertion of any kind. +func ObjectsAreEqual(expected, actual interface{}) bool { + + if expected == nil || actual == nil { + return expected == actual + } + + return reflect.DeepEqual(expected, actual) + +} + +// ObjectsAreEqualValues gets whether two objects are equal, or if their +// values are equal. +func ObjectsAreEqualValues(expected, actual interface{}) bool { + if ObjectsAreEqual(expected, actual) { + return true + } + + actualType := reflect.TypeOf(actual) + if actualType == nil { + return false + } + expectedValue := reflect.ValueOf(expected) + if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + // Attempt comparison after type conversion + return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + } + + return false +} + +/* CallerInfo is necessary because the assert functions use the testing object +internally, causing it to print the file:line of the assert method, rather than where +the problem actually occurred in calling code.*/ + +// CallerInfo returns an array of strings containing the file and line number +// of each stack frame leading from the current test to the assert call that +// failed. +func CallerInfo() []string { + + pc := uintptr(0) + file := "" + line := 0 + ok := false + name := "" + + callers := []string{} + for i := 0; ; i++ { + pc, file, line, ok = runtime.Caller(i) + if !ok { + // The breaks below failed to terminate the loop, and we ran off the + // end of the call stack. + break + } + + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break + } + + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + + parts := strings.Split(file, "/") + dir := parts[len(parts)-2] + file = parts[len(parts)-1] + if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + + // Drop the package + segments := strings.Split(name, ".") + name = segments[len(segments)-1] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + } + + return callers +} + +// Stolen from the `go test` tool. +// isTest tells whether name looks like a test (or benchmark, according to prefix). +// It is a Test (say) if there is a character after Test that is not a lower-case letter. +// We don't want TesticularCancer. +func isTest(name, prefix string) bool { + if !strings.HasPrefix(name, prefix) { + return false + } + if len(name) == len(prefix) { // "Test" is ok + return true + } + rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(rune) +} + +// getWhitespaceString returns a string that is long enough to overwrite the default +// output from the go testing framework. +func getWhitespaceString() string { + + _, file, line, ok := runtime.Caller(1) + if !ok { + return "" + } + parts := strings.Split(file, "/") + file = parts[len(parts)-1] + + return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) + +} + +func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { + if len(msgAndArgs) == 0 || msgAndArgs == nil { + return "" + } + if len(msgAndArgs) == 1 { + return msgAndArgs[0].(string) + } + if len(msgAndArgs) > 1 { + return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) + } + return "" +} + +// Indents all lines of the message by appending a number of tabs to each line, in an output format compatible with Go's +// test printing (see inner comment for specifics) +func indentMessageLines(message string, tabs int) string { + outBuf := new(bytes.Buffer) + + for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { + if i != 0 { + outBuf.WriteRune('\n') + } + for ii := 0; ii < tabs; ii++ { + outBuf.WriteRune('\t') + // Bizarrely, all lines except the first need one fewer tabs prepended, so deliberately advance the counter + // by 1 prematurely. + if ii == 0 && i > 0 { + ii++ + } + } + outBuf.WriteString(scanner.Text()) + } + + return outBuf.String() +} + +type failNower interface { + FailNow() +} + +// FailNow fails test +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + Fail(t, failureMessage, msgAndArgs...) + + // We cannot extend TestingT with FailNow() and + // maintain backwards compatibility, so we fallback + // to panicking when FailNow is not available in + // TestingT. + // See issue #263 + + if t, ok := t.(failNower); ok { + t.FailNow() + } else { + panic("test failed and t is missing `FailNow()`") + } + return false +} + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + + message := messageFromMsgAndArgs(msgAndArgs...) + + errorTrace := strings.Join(CallerInfo(), "\n\r\t\t\t") + if len(message) > 0 { + t.Errorf("\033[31m\r%s\r\tError Trace:\t%s\n"+ + "\r\tError:%s\n"+ + "\r\tMessages:\t%s\033[39m\n\r", + getWhitespaceString(), + errorTrace, + indentMessageLines(failureMessage, 2), + message) + } else { + t.Errorf("\033[31m\r%s\r\tError Trace:\t%s\033[39m\n"+ + "\r\tError:%s\n\r", + getWhitespaceString(), + errorTrace, + indentMessageLines(failureMessage, 2)) + } + + return false +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if !reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) + } + + return true + +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { + return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) + } + + return true +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqual(expected, actual) { + diff := diff(expected, actual) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "received: %s%s", expected, actual, diff), msgAndArgs...) + } + + return true + +} + +// formatUnequalValues takes two values of arbitrary types and returns string +// representations appropriate to be presented to the user. +// +// If the values are not of like type, the returned strings will be prefixed +// with the type name, and the value will be enclosed in parenthesis similar +// to a type conversion in the Go grammar. +func formatUnequalValues(expected, actual interface{}) (e string, a string) { + aType := reflect.TypeOf(expected) + bType := reflect.TypeOf(actual) + + if aType != bType && isNumericType(aType) && isNumericType(bType) { + return fmt.Sprintf("%v(%#v)", aType, expected), + fmt.Sprintf("%v(%#v)", bType, actual) + } + + return fmt.Sprintf("%#v", expected), + fmt.Sprintf("%#v", actual) +} + +func isNumericType(t reflect.Type) bool { + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return true + case reflect.Float32, reflect.Float64: + return true + } + + return false +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqualValues(expected, actual) { + diff := diff(expected, actual) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "received: %s%s", expected, actual, diff), msgAndArgs...) + } + + return true + +} + +// Exactly asserts that two objects are equal is value and type. +// +// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + aType := reflect.TypeOf(expected) + bType := reflect.TypeOf(actual) + + if aType != bType { + return Fail(t, fmt.Sprintf("Types expected to match exactly\n\r\t%v != %v", aType, bType), msgAndArgs...) + } + + return Equal(t, expected, actual, msgAndArgs...) + +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if !isNil(object) { + return true + } + return Fail(t, "Expected value not to be nil.", msgAndArgs...) +} + +// isNil checks if a specified object is nil or not, without Failing. +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { + return true + } + + return false +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if isNil(object) { + return true + } + return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) +} + +var numericZeros = []interface{}{ + int(0), + int8(0), + int16(0), + int32(0), + int64(0), + uint(0), + uint8(0), + uint16(0), + uint32(0), + uint64(0), + float32(0), + float64(0), +} + +// isEmpty gets whether the specified object is considered empty or not. +func isEmpty(object interface{}) bool { + + if object == nil { + return true + } else if object == "" { + return true + } else if object == false { + return true + } + + for _, v := range numericZeros { + if object == v { + return true + } + } + + objValue := reflect.ValueOf(object) + + switch objValue.Kind() { + case reflect.Map: + fallthrough + case reflect.Slice, reflect.Chan: + { + return (objValue.Len() == 0) + } + case reflect.Struct: + switch object.(type) { + case time.Time: + return object.(time.Time).IsZero() + } + case reflect.Ptr: + { + if objValue.IsNil() { + return true + } + switch object.(type) { + case *time.Time: + return object.(*time.Time).IsZero() + default: + return false + } + } + } + return false +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +// +// Returns whether the assertion was successful (true) or not (false). +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + pass := isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + pass := !isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// getLen try to get length of object. +// return (false, 0) if impossible. +func getLen(x interface{}) (ok bool, length int) { + v := reflect.ValueOf(x) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + return true, v.Len() +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { + ok, l := getLen(object) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + } + + if l != length { + return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + } + return true +} + +// True asserts that the specified value is true. +// +// assert.True(t, myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + if value != true { + return Fail(t, "Should be true", msgAndArgs...) + } + + return true + +} + +// False asserts that the specified value is false. +// +// assert.False(t, myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + if value != false { + return Fail(t, "Should be false", msgAndArgs...) + } + + return true + +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if ObjectsAreEqual(expected, actual) { + return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) + } + + return true + +} + +// containsElement try loop over the list check if the list includes the element. +// return (false, false) if impossible. +// return (true, false) if element was not found. +// return (true, true) if element was found. +func includeElement(list interface{}, element interface{}) (ok, found bool) { + + listValue := reflect.ValueOf(list) + elementValue := reflect.ValueOf(element) + defer func() { + if e := recover(); e != nil { + ok = false + found = false + } + }() + + if reflect.TypeOf(list).Kind() == reflect.String { + return true, strings.Contains(listValue.String(), elementValue.String()) + } + + if reflect.TypeOf(list).Kind() == reflect.Map { + mapKeys := listValue.MapKeys() + for i := 0; i < len(mapKeys); i++ { + if ObjectsAreEqual(mapKeys[i].Interface(), element) { + return true, true + } + } + return true, false + } + + for i := 0; i < listValue.Len(); i++ { + if ObjectsAreEqual(listValue.Index(i).Interface(), element) { + return true, true + } + } + return true, false + +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") +// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") +// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") +// +// Returns whether the assertion was successful (true) or not (false). +func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") +// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if found { + return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { + result := comp() + if !result { + Fail(t, "Condition failed!", msgAndArgs...) + } + return result +} + +// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics +// methods, and represents a simple func that takes no arguments, and returns nothing. +type PanicTestFunc func() + +// didPanic returns true if the function passed to it panics. Otherwise, it returns false. +func didPanic(f PanicTestFunc) (bool, interface{}) { + + didPanic := false + var message interface{} + func() { + + defer func() { + if message = recover(); message != nil { + didPanic = true + } + }() + + // call the target function + f() + + }() + + return didPanic, message + +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + if funcDidPanic, panicValue := didPanic(f); funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + + dt := expected.Sub(actual) + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +func toFloat(x interface{}) (float64, bool) { + var xf float64 + xok := true + + switch xn := x.(type) { + case uint8: + xf = float64(xn) + case uint16: + xf = float64(xn) + case uint32: + xf = float64(xn) + case uint64: + xf = float64(xn) + case int: + xf = float64(xn) + case int8: + xf = float64(xn) + case int16: + xf = float64(xn) + case int32: + xf = float64(xn) + case int64: + xf = float64(xn) + case float32: + xf = float64(xn) + case float64: + xf = float64(xn) + default: + xok = false + } + + return xf, xok +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + + af, aok := toFloat(expected) + bf, bok := toFloat(actual) + + if !aok || !bok { + return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + } + + if math.IsNaN(af) { + return Fail(t, fmt.Sprintf("Actual must not be NaN"), msgAndArgs...) + } + + if math.IsNaN(bf) { + return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) + } + + dt := af - bf + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta) + if !result { + return result + } + } + + return true +} + +func calcRelativeError(expected, actual interface{}) (float64, error) { + af, aok := toFloat(expected) + if !aok { + return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + } + if af == 0 { + return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") + } + bf, bok := toFloat(actual) + if !bok { + return 0, fmt.Errorf("expected value %q cannot be converted to float", actual) + } + + return math.Abs(af-bf) / math.Abs(af), nil +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + actualEpsilon, err := calcRelativeError(expected, actual) + if err != nil { + return Fail(t, err.Error(), msgAndArgs...) + } + if actualEpsilon > epsilon { + return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ + " < %#v (actual)", actualEpsilon, epsilon), msgAndArgs...) + } + + return true +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) + if !result { + return result + } + } + + return true +} + +/* + Errors +*/ + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + if err != nil { + return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) + } + + return true +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + + if err == nil { + return Fail(t, "An error is expected but got nil.", msgAndArgs...) + } + + return true +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualError(t, err, expectedErrorString, "An error was expected") +// +// Returns whether the assertion was successful (true) or not (false). +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { + if !Error(t, theError, msgAndArgs...) { + return false + } + expected := errString + actual := theError.Error() + // don't need to use deep equals here, we know they are both strings + if expected != actual { + return Fail(t, fmt.Sprintf("Error message not equal:\n"+ + "expected: %q\n"+ + "received: %q", expected, actual), msgAndArgs...) + } + return true +} + +// matchRegexp return true if a specified regexp matches a string. +func matchRegexp(rx interface{}, str interface{}) bool { + + var r *regexp.Regexp + if rr, ok := rx.(*regexp.Regexp); ok { + r = rr + } else { + r = regexp.MustCompile(fmt.Sprint(rx)) + } + + return (r.FindStringIndex(fmt.Sprint(str)) != nil) + +} + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + + match := matchRegexp(rx, str) + + if !match { + Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) + } + + return match +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + match := matchRegexp(rx, str) + + if match { + Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) + } + + return !match + +} + +// Zero asserts that i is the zero value for its type and returns the truth. +func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// NotZero asserts that i is not the zero value for its type and returns the truth. +func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// +// Returns whether the assertion was successful (true) or not (false). +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + var expectedJSONAsInterface, actualJSONAsInterface interface{} + + if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) + } + + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) + } + + return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice or array. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { + return "" + } + + e := spew.Sdump(expected) + a := spew.Sdump(actual) + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + + return "\n\nDiff:\n" + diff +} diff --git a/src/vendor/github.com/stretchr/testify/assert/assertions_test.go b/src/vendor/github.com/stretchr/testify/assert/assertions_test.go new file mode 100644 index 00000000..7a2b0fb4 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/assert/assertions_test.go @@ -0,0 +1,1210 @@ +package assert + +import ( + "errors" + "io" + "math" + "os" + "reflect" + "regexp" + "testing" + "time" +) + +var ( + i interface{} + zeros = []interface{}{ + false, + byte(0), + complex64(0), + complex128(0), + float32(0), + float64(0), + int(0), + int8(0), + int16(0), + int32(0), + int64(0), + rune(0), + uint(0), + uint8(0), + uint16(0), + uint32(0), + uint64(0), + uintptr(0), + "", + [0]interface{}{}, + []interface{}(nil), + struct{ x int }{}, + (*interface{})(nil), + (func())(nil), + nil, + interface{}(nil), + map[interface{}]interface{}(nil), + (chan interface{})(nil), + (<-chan interface{})(nil), + (chan<- interface{})(nil), + } + nonZeros = []interface{}{ + true, + byte(1), + complex64(1), + complex128(1), + float32(1), + float64(1), + int(1), + int8(1), + int16(1), + int32(1), + int64(1), + rune(1), + uint(1), + uint8(1), + uint16(1), + uint32(1), + uint64(1), + uintptr(1), + "s", + [1]interface{}{1}, + []interface{}{}, + struct{ x int }{1}, + (*interface{})(&i), + (func())(func() {}), + interface{}(1), + map[interface{}]interface{}{}, + (chan interface{})(make(chan interface{})), + (<-chan interface{})(make(chan interface{})), + (chan<- interface{})(make(chan interface{})), + } +) + +// AssertionTesterInterface defines an interface to be used for testing assertion methods +type AssertionTesterInterface interface { + TestMethod() +} + +// AssertionTesterConformingObject is an object that conforms to the AssertionTesterInterface interface +type AssertionTesterConformingObject struct { +} + +func (a *AssertionTesterConformingObject) TestMethod() { +} + +// AssertionTesterNonConformingObject is an object that does not conform to the AssertionTesterInterface interface +type AssertionTesterNonConformingObject struct { +} + +func TestObjectsAreEqual(t *testing.T) { + + if !ObjectsAreEqual("Hello World", "Hello World") { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(123, 123) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(123.5, 123.5) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual([]byte("Hello World"), []byte("Hello World")) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(nil, nil) { + t.Error("objectsAreEqual should return true") + } + if ObjectsAreEqual(map[int]int{5: 10}, map[int]int{10: 20}) { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual('x', "x") { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual("x", 'x') { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual(0, 0.1) { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual(0.1, 0) { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual(uint32(10), int32(10)) { + t.Error("objectsAreEqual should return false") + } + if !ObjectsAreEqualValues(uint32(10), int32(10)) { + t.Error("ObjectsAreEqualValues should return true") + } + if ObjectsAreEqualValues(0, nil) { + t.Fail() + } + if ObjectsAreEqualValues(nil, 0) { + t.Fail() + } + +} + +func TestImplements(t *testing.T) { + + mockT := new(testing.T) + + if !Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) { + t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface") + } + if Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) { + t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface") + } + +} + +func TestIsType(t *testing.T) { + + mockT := new(testing.T) + + if !IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject") + } + if IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) { + t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject") + } + +} + +func TestEqual(t *testing.T) { + + mockT := new(testing.T) + + if !Equal(mockT, "Hello World", "Hello World") { + t.Error("Equal should return true") + } + if !Equal(mockT, 123, 123) { + t.Error("Equal should return true") + } + if !Equal(mockT, 123.5, 123.5) { + t.Error("Equal should return true") + } + if !Equal(mockT, []byte("Hello World"), []byte("Hello World")) { + t.Error("Equal should return true") + } + if !Equal(mockT, nil, nil) { + t.Error("Equal should return true") + } + if !Equal(mockT, int32(123), int32(123)) { + t.Error("Equal should return true") + } + if !Equal(mockT, uint64(123), uint64(123)) { + t.Error("Equal should return true") + } + +} + +func TestFormatUnequalValues(t *testing.T) { + expected, actual := formatUnequalValues("foo", "bar") + Equal(t, `"foo"`, expected, "value should not include type") + Equal(t, `"bar"`, actual, "value should not include type") + + expected, actual = formatUnequalValues(123, 123) + Equal(t, `123`, expected, "value should not include type") + Equal(t, `123`, actual, "value should not include type") + + expected, actual = formatUnequalValues(int64(123), int32(123)) + Equal(t, `int64(123)`, expected, "value should include type") + Equal(t, `int32(123)`, actual, "value should include type") + + type testStructType struct { + Val string + } + + expected, actual = formatUnequalValues(&testStructType{Val: "test"}, &testStructType{Val: "test"}) + Equal(t, `&assert.testStructType{Val:"test"}`, expected, "value should not include type annotation") + Equal(t, `&assert.testStructType{Val:"test"}`, actual, "value should not include type annotation") +} + +func TestNotNil(t *testing.T) { + + mockT := new(testing.T) + + if !NotNil(mockT, new(AssertionTesterConformingObject)) { + t.Error("NotNil should return true: object is not nil") + } + if NotNil(mockT, nil) { + t.Error("NotNil should return false: object is nil") + } + if NotNil(mockT, (*struct{})(nil)) { + t.Error("NotNil should return false: object is (*struct{})(nil)") + } + +} + +func TestNil(t *testing.T) { + + mockT := new(testing.T) + + if !Nil(mockT, nil) { + t.Error("Nil should return true: object is nil") + } + if !Nil(mockT, (*struct{})(nil)) { + t.Error("Nil should return true: object is (*struct{})(nil)") + } + if Nil(mockT, new(AssertionTesterConformingObject)) { + t.Error("Nil should return false: object is not nil") + } + +} + +func TestTrue(t *testing.T) { + + mockT := new(testing.T) + + if !True(mockT, true) { + t.Error("True should return true") + } + if True(mockT, false) { + t.Error("True should return false") + } + +} + +func TestFalse(t *testing.T) { + + mockT := new(testing.T) + + if !False(mockT, false) { + t.Error("False should return true") + } + if False(mockT, true) { + t.Error("False should return false") + } + +} + +func TestExactly(t *testing.T) { + + mockT := new(testing.T) + + a := float32(1) + b := float64(1) + c := float32(1) + d := float32(2) + + if Exactly(mockT, a, b) { + t.Error("Exactly should return false") + } + if Exactly(mockT, a, d) { + t.Error("Exactly should return false") + } + if !Exactly(mockT, a, c) { + t.Error("Exactly should return true") + } + + if Exactly(mockT, nil, a) { + t.Error("Exactly should return false") + } + if Exactly(mockT, a, nil) { + t.Error("Exactly should return false") + } + +} + +func TestNotEqual(t *testing.T) { + + mockT := new(testing.T) + + if !NotEqual(mockT, "Hello World", "Hello World!") { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, 123, 1234) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, 123.5, 123.55) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, []byte("Hello World"), []byte("Hello World!")) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, nil, new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return true") + } + funcA := func() int { return 23 } + funcB := func() int { return 42 } + if !NotEqual(mockT, funcA, funcB) { + t.Error("NotEqual should return true") + } + + if NotEqual(mockT, "Hello World", "Hello World") { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, 123, 123) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, 123.5, 123.5) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, []byte("Hello World"), []byte("Hello World")) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return false") + } +} + +type A struct { + Name, Value string +} + +func TestContains(t *testing.T) { + + mockT := new(testing.T) + list := []string{"Foo", "Bar"} + complexList := []*A{ + {"b", "c"}, + {"d", "e"}, + {"g", "h"}, + {"j", "k"}, + } + simpleMap := map[interface{}]interface{}{"Foo": "Bar"} + + if !Contains(mockT, "Hello World", "Hello") { + t.Error("Contains should return true: \"Hello World\" contains \"Hello\"") + } + if Contains(mockT, "Hello World", "Salut") { + t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"") + } + + if !Contains(mockT, list, "Bar") { + t.Error("Contains should return true: \"[\"Foo\", \"Bar\"]\" contains \"Bar\"") + } + if Contains(mockT, list, "Salut") { + t.Error("Contains should return false: \"[\"Foo\", \"Bar\"]\" does not contain \"Salut\"") + } + if !Contains(mockT, complexList, &A{"g", "h"}) { + t.Error("Contains should return true: complexList contains {\"g\", \"h\"}") + } + if Contains(mockT, complexList, &A{"g", "e"}) { + t.Error("Contains should return false: complexList contains {\"g\", \"e\"}") + } + if Contains(mockT, complexList, &A{"g", "e"}) { + t.Error("Contains should return false: complexList contains {\"g\", \"e\"}") + } + if !Contains(mockT, simpleMap, "Foo") { + t.Error("Contains should return true: \"{\"Foo\": \"Bar\"}\" contains \"Foo\"") + } + if Contains(mockT, simpleMap, "Bar") { + t.Error("Contains should return false: \"{\"Foo\": \"Bar\"}\" does not contains \"Bar\"") + } +} + +func TestNotContains(t *testing.T) { + + mockT := new(testing.T) + list := []string{"Foo", "Bar"} + simpleMap := map[interface{}]interface{}{"Foo": "Bar"} + + if !NotContains(mockT, "Hello World", "Hello!") { + t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"") + } + if NotContains(mockT, "Hello World", "Hello") { + t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"") + } + + if !NotContains(mockT, list, "Foo!") { + t.Error("NotContains should return true: \"[\"Foo\", \"Bar\"]\" does not contain \"Foo!\"") + } + if NotContains(mockT, list, "Foo") { + t.Error("NotContains should return false: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") + } + if NotContains(mockT, simpleMap, "Foo") { + t.Error("Contains should return true: \"{\"Foo\": \"Bar\"}\" contains \"Foo\"") + } + if !NotContains(mockT, simpleMap, "Bar") { + t.Error("Contains should return false: \"{\"Foo\": \"Bar\"}\" does not contains \"Bar\"") + } +} + +func Test_includeElement(t *testing.T) { + + list1 := []string{"Foo", "Bar"} + list2 := []int{1, 2} + simpleMap := map[interface{}]interface{}{"Foo": "Bar"} + + ok, found := includeElement("Hello World", "World") + True(t, ok) + True(t, found) + + ok, found = includeElement(list1, "Foo") + True(t, ok) + True(t, found) + + ok, found = includeElement(list1, "Bar") + True(t, ok) + True(t, found) + + ok, found = includeElement(list2, 1) + True(t, ok) + True(t, found) + + ok, found = includeElement(list2, 2) + True(t, ok) + True(t, found) + + ok, found = includeElement(list1, "Foo!") + True(t, ok) + False(t, found) + + ok, found = includeElement(list2, 3) + True(t, ok) + False(t, found) + + ok, found = includeElement(list2, "1") + True(t, ok) + False(t, found) + + ok, found = includeElement(simpleMap, "Foo") + True(t, ok) + True(t, found) + + ok, found = includeElement(simpleMap, "Bar") + True(t, ok) + False(t, found) + + ok, found = includeElement(1433, "1") + False(t, ok) + False(t, found) +} + +func TestCondition(t *testing.T) { + mockT := new(testing.T) + + if !Condition(mockT, func() bool { return true }, "Truth") { + t.Error("Condition should return true") + } + + if Condition(mockT, func() bool { return false }, "Lie") { + t.Error("Condition should return false") + } + +} + +func TestDidPanic(t *testing.T) { + + if funcDidPanic, _ := didPanic(func() { + panic("Panic!") + }); !funcDidPanic { + t.Error("didPanic should return true") + } + + if funcDidPanic, _ := didPanic(func() { + }); funcDidPanic { + t.Error("didPanic should return false") + } + +} + +func TestPanics(t *testing.T) { + + mockT := new(testing.T) + + if !Panics(mockT, func() { + panic("Panic!") + }) { + t.Error("Panics should return true") + } + + if Panics(mockT, func() { + }) { + t.Error("Panics should return false") + } + +} + +func TestNotPanics(t *testing.T) { + + mockT := new(testing.T) + + if !NotPanics(mockT, func() { + }) { + t.Error("NotPanics should return true") + } + + if NotPanics(mockT, func() { + panic("Panic!") + }) { + t.Error("NotPanics should return false") + } + +} + +func TestNoError(t *testing.T) { + + mockT := new(testing.T) + + // start with a nil error + var err error + + True(t, NoError(mockT, err), "NoError should return True for nil arg") + + // now set an error + err = errors.New("some error") + + False(t, NoError(mockT, err), "NoError with error should return False") + + // returning an empty error interface + err = func() error { + var err *customError + if err != nil { + t.Fatal("err should be nil here") + } + return err + }() + + if err == nil { // err is not nil here! + t.Errorf("Error should be nil due to empty interface %v", err) + } + + False(t, NoError(mockT, err), "NoError should fail with empty error interface") +} + +type customError struct{} + +func (*customError) Error() string { return "fail" } + +func TestError(t *testing.T) { + + mockT := new(testing.T) + + // start with a nil error + var err error + + False(t, Error(mockT, err), "Error should return False for nil arg") + + // now set an error + err = errors.New("some error") + + True(t, Error(mockT, err), "Error with error should return True") + + // returning an empty error interface + err = func() error { + var err *customError + if err != nil { + t.Fatal("err should be nil here") + } + return err + }() + + if err == nil { // err is not nil here! + t.Errorf("Error should be nil due to empty interface %v", err) + } + + True(t, Error(mockT, err), "Error should pass with empty error interface") +} + +func TestEqualError(t *testing.T) { + mockT := new(testing.T) + + // start with a nil error + var err error + False(t, EqualError(mockT, err, ""), + "EqualError should return false for nil arg") + + // now set an error + err = errors.New("some error") + False(t, EqualError(mockT, err, "Not some error"), + "EqualError should return false for different error string") + True(t, EqualError(mockT, err, "some error"), + "EqualError should return true") +} + +func Test_isEmpty(t *testing.T) { + + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + + True(t, isEmpty("")) + True(t, isEmpty(nil)) + True(t, isEmpty([]string{})) + True(t, isEmpty(0)) + True(t, isEmpty(int32(0))) + True(t, isEmpty(int64(0))) + True(t, isEmpty(false)) + True(t, isEmpty(map[string]string{})) + True(t, isEmpty(new(time.Time))) + True(t, isEmpty(time.Time{})) + True(t, isEmpty(make(chan struct{}))) + False(t, isEmpty("something")) + False(t, isEmpty(errors.New("something"))) + False(t, isEmpty([]string{"something"})) + False(t, isEmpty(1)) + False(t, isEmpty(true)) + False(t, isEmpty(map[string]string{"Hello": "World"})) + False(t, isEmpty(chWithValue)) + +} + +func TestEmpty(t *testing.T) { + + mockT := new(testing.T) + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + var tiP *time.Time + var tiNP time.Time + var s *string + var f *os.File + + True(t, Empty(mockT, ""), "Empty string is empty") + True(t, Empty(mockT, nil), "Nil is empty") + True(t, Empty(mockT, []string{}), "Empty string array is empty") + True(t, Empty(mockT, 0), "Zero int value is empty") + True(t, Empty(mockT, false), "False value is empty") + True(t, Empty(mockT, make(chan struct{})), "Channel without values is empty") + True(t, Empty(mockT, s), "Nil string pointer is empty") + True(t, Empty(mockT, f), "Nil os.File pointer is empty") + True(t, Empty(mockT, tiP), "Nil time.Time pointer is empty") + True(t, Empty(mockT, tiNP), "time.Time is empty") + + False(t, Empty(mockT, "something"), "Non Empty string is not empty") + False(t, Empty(mockT, errors.New("something")), "Non nil object is not empty") + False(t, Empty(mockT, []string{"something"}), "Non empty string array is not empty") + False(t, Empty(mockT, 1), "Non-zero int value is not empty") + False(t, Empty(mockT, true), "True value is not empty") + False(t, Empty(mockT, chWithValue), "Channel with values is not empty") +} + +func TestNotEmpty(t *testing.T) { + + mockT := new(testing.T) + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + + False(t, NotEmpty(mockT, ""), "Empty string is empty") + False(t, NotEmpty(mockT, nil), "Nil is empty") + False(t, NotEmpty(mockT, []string{}), "Empty string array is empty") + False(t, NotEmpty(mockT, 0), "Zero int value is empty") + False(t, NotEmpty(mockT, false), "False value is empty") + False(t, NotEmpty(mockT, make(chan struct{})), "Channel without values is empty") + + True(t, NotEmpty(mockT, "something"), "Non Empty string is not empty") + True(t, NotEmpty(mockT, errors.New("something")), "Non nil object is not empty") + True(t, NotEmpty(mockT, []string{"something"}), "Non empty string array is not empty") + True(t, NotEmpty(mockT, 1), "Non-zero int value is not empty") + True(t, NotEmpty(mockT, true), "True value is not empty") + True(t, NotEmpty(mockT, chWithValue), "Channel with values is not empty") +} + +func Test_getLen(t *testing.T) { + falseCases := []interface{}{ + nil, + 0, + true, + false, + 'A', + struct{}{}, + } + for _, v := range falseCases { + ok, l := getLen(v) + False(t, ok, "Expected getLen fail to get length of %#v", v) + Equal(t, 0, l, "getLen should return 0 for %#v", v) + } + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + trueCases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range trueCases { + ok, l := getLen(c.v) + True(t, ok, "Expected getLen success to get length of %#v", c.v) + Equal(t, c.l, l) + } +} + +func TestLen(t *testing.T) { + mockT := new(testing.T) + + False(t, Len(mockT, nil, 0), "nil does not have length") + False(t, Len(mockT, 0, 0), "int does not have length") + False(t, Len(mockT, true, 0), "true does not have length") + False(t, Len(mockT, false, 0), "false does not have length") + False(t, Len(mockT, 'A', 0), "Rune does not have length") + False(t, Len(mockT, struct{}{}, 0), "Struct does not have length") + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + + cases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range cases { + True(t, Len(mockT, c.v, c.l), "%#v have %d items", c.v, c.l) + } + + cases = []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 4}, + {[...]int{1, 2, 3}, 2}, + {"ABC", 2}, + {map[int]int{1: 2, 2: 4, 3: 6}, 4}, + {ch, 2}, + + {[]int{}, 1}, + {map[int]int{}, 1}, + {make(chan int), 1}, + + {[]int(nil), 1}, + {map[int]int(nil), 1}, + {(chan int)(nil), 1}, + } + + for _, c := range cases { + False(t, Len(mockT, c.v, c.l), "%#v have %d items", c.v, c.l) + } +} + +func TestWithinDuration(t *testing.T) { + + mockT := new(testing.T) + a := time.Now() + b := a.Add(10 * time.Second) + + True(t, WithinDuration(mockT, a, b, 10*time.Second), "A 10s difference is within a 10s time difference") + True(t, WithinDuration(mockT, b, a, 10*time.Second), "A 10s difference is within a 10s time difference") + + False(t, WithinDuration(mockT, a, b, 9*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, 9*time.Second), "A 10s difference is not within a 9s time difference") + + False(t, WithinDuration(mockT, a, b, -9*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, -9*time.Second), "A 10s difference is not within a 9s time difference") + + False(t, WithinDuration(mockT, a, b, -11*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, -11*time.Second), "A 10s difference is not within a 9s time difference") +} + +func TestInDelta(t *testing.T) { + mockT := new(testing.T) + + True(t, InDelta(mockT, 1.001, 1, 0.01), "|1.001 - 1| <= 0.01") + True(t, InDelta(mockT, 1, 1.001, 0.01), "|1 - 1.001| <= 0.01") + True(t, InDelta(mockT, 1, 2, 1), "|1 - 2| <= 1") + False(t, InDelta(mockT, 1, 2, 0.5), "Expected |1 - 2| <= 0.5 to fail") + False(t, InDelta(mockT, 2, 1, 0.5), "Expected |2 - 1| <= 0.5 to fail") + False(t, InDelta(mockT, "", nil, 1), "Expected non numerals to fail") + False(t, InDelta(mockT, 42, math.NaN(), 0.01), "Expected NaN for actual to fail") + False(t, InDelta(mockT, math.NaN(), 42, 0.01), "Expected NaN for expected to fail") + + cases := []struct { + a, b interface{} + delta float64 + }{ + {uint8(2), uint8(1), 1}, + {uint16(2), uint16(1), 1}, + {uint32(2), uint32(1), 1}, + {uint64(2), uint64(1), 1}, + + {int(2), int(1), 1}, + {int8(2), int8(1), 1}, + {int16(2), int16(1), 1}, + {int32(2), int32(1), 1}, + {int64(2), int64(1), 1}, + + {float32(2), float32(1), 1}, + {float64(2), float64(1), 1}, + } + + for _, tc := range cases { + True(t, InDelta(mockT, tc.a, tc.b, tc.delta), "Expected |%V - %V| <= %v", tc.a, tc.b, tc.delta) + } +} + +func TestInDeltaSlice(t *testing.T) { + mockT := new(testing.T) + + True(t, InDeltaSlice(mockT, + []float64{1.001, 0.999}, + []float64{1, 1}, + 0.1), "{1.001, 0.009} is element-wise close to {1, 1} in delta=0.1") + + True(t, InDeltaSlice(mockT, + []float64{1, 2}, + []float64{0, 3}, + 1), "{1, 2} is element-wise close to {0, 3} in delta=1") + + False(t, InDeltaSlice(mockT, + []float64{1, 2}, + []float64{0, 3}, + 0.1), "{1, 2} is not element-wise close to {0, 3} in delta=0.1") + + False(t, InDeltaSlice(mockT, "", nil, 1), "Expected non numeral slices to fail") +} + +func TestInEpsilon(t *testing.T) { + mockT := new(testing.T) + + cases := []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), uint16(2), .001}, + {2.1, 2.2, 0.1}, + {2.2, 2.1, 0.1}, + {-2.1, -2.2, 0.1}, + {-2.2, -2.1, 0.1}, + {uint64(100), uint8(101), 0.01}, + {0.1, -0.1, 2}, + {0.1, 0, 2}, + } + + for _, tc := range cases { + True(t, InEpsilon(t, tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon), "test: %q", tc) + } + + cases = []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), int16(-2), .001}, + {uint64(100), uint8(102), 0.01}, + {2.1, 2.2, 0.001}, + {2.2, 2.1, 0.001}, + {2.1, -2.2, 1}, + {2.1, "bla-bla", 0}, + {0.1, -0.1, 1.99}, + {0, 0.1, 2}, // expected must be different to zero + } + + for _, tc := range cases { + False(t, InEpsilon(mockT, tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } + +} + +func TestInEpsilonSlice(t *testing.T) { + mockT := new(testing.T) + + True(t, InEpsilonSlice(mockT, + []float64{2.2, 2.0}, + []float64{2.1, 2.1}, + 0.06), "{2.2, 2.0} is element-wise close to {2.1, 2.1} in espilon=0.06") + + False(t, InEpsilonSlice(mockT, + []float64{2.2, 2.0}, + []float64{2.1, 2.1}, + 0.04), "{2.2, 2.0} is not element-wise close to {2.1, 2.1} in espilon=0.04") + + False(t, InEpsilonSlice(mockT, "", nil, 1), "Expected non numeral slices to fail") +} + +func TestRegexp(t *testing.T) { + mockT := new(testing.T) + + cases := []struct { + rx, str string + }{ + {"^start", "start of the line"}, + {"end$", "in the end"}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12.34"}, + } + + for _, tc := range cases { + True(t, Regexp(mockT, tc.rx, tc.str)) + True(t, Regexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + False(t, NotRegexp(mockT, tc.rx, tc.str)) + False(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + } + + cases = []struct { + rx, str string + }{ + {"^asdfastart", "Not the start of the line"}, + {"end$", "in the end."}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12a.34"}, + } + + for _, tc := range cases { + False(t, Regexp(mockT, tc.rx, tc.str), "Expected \"%s\" to not match \"%s\"", tc.rx, tc.str) + False(t, Regexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + True(t, NotRegexp(mockT, tc.rx, tc.str)) + True(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + } +} + +func testAutogeneratedFunction() { + defer func() { + if err := recover(); err == nil { + panic("did not panic") + } + CallerInfo() + }() + t := struct { + io.Closer + }{} + var c io.Closer + c = t + c.Close() +} + +func TestCallerInfoWithAutogeneratedFunctions(t *testing.T) { + NotPanics(t, func() { + testAutogeneratedFunction() + }) +} + +func TestZero(t *testing.T) { + mockT := new(testing.T) + + for _, test := range zeros { + True(t, Zero(mockT, test, "%#v is not the %v zero value", test, reflect.TypeOf(test))) + } + + for _, test := range nonZeros { + False(t, Zero(mockT, test, "%#v is not the %v zero value", test, reflect.TypeOf(test))) + } +} + +func TestNotZero(t *testing.T) { + mockT := new(testing.T) + + for _, test := range zeros { + False(t, NotZero(mockT, test, "%#v is not the %v zero value", test, reflect.TypeOf(test))) + } + + for _, test := range nonZeros { + True(t, NotZero(mockT, test, "%#v is not the %v zero value", test, reflect.TypeOf(test))) + } +} + +func TestJSONEq_EqualSONString(t *testing.T) { + mockT := new(testing.T) + True(t, JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`)) +} + +func TestJSONEq_EquivalentButNotEqual(t *testing.T) { + mockT := new(testing.T) + True(t, JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)) +} + +func TestJSONEq_HashOfArraysAndHashes(t *testing.T) { + mockT := new(testing.T) + True(t, JSONEq(mockT, "{\r\n\t\"numeric\": 1.5,\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]],\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\"\r\n}", + "{\r\n\t\"numeric\": 1.5,\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\",\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]]\r\n}")) +} + +func TestJSONEq_Array(t *testing.T) { + mockT := new(testing.T) + True(t, JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `["foo", {"nested": "hash", "hello": "world"}]`)) +} + +func TestJSONEq_HashAndArrayNotEquivalent(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `{"foo": "bar", {"nested": "hash", "hello": "world"}}`)) +} + +func TestJSONEq_HashesNotEquivalent(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, `{"foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)) +} + +func TestJSONEq_ActualIsNotJSON(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, `{"foo": "bar"}`, "Not JSON")) +} + +func TestJSONEq_ExpectedIsNotJSON(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, "Not JSON", `{"foo": "bar", "hello": "world"}`)) +} + +func TestJSONEq_ExpectedAndActualNotJSON(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, "Not JSON", "Not JSON")) +} + +func TestJSONEq_ArraysOfDifferentOrder(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `[{ "hello": "world", "nested": "hash"}, "foo"]`)) +} + +func TestDiff(t *testing.T) { + expected := ` + +Diff: +--- Expected ++++ Actual +@@ -1,3 +1,3 @@ + (struct { foo string }) { +- foo: (string) (len=5) "hello" ++ foo: (string) (len=3) "bar" + } +` + actual := diff( + struct{ foo string }{"hello"}, + struct{ foo string }{"bar"}, + ) + Equal(t, expected, actual) + + expected = ` + +Diff: +--- Expected ++++ Actual +@@ -2,5 +2,5 @@ + (int) 1, +- (int) 2, + (int) 3, +- (int) 4 ++ (int) 5, ++ (int) 7 + } +` + actual = diff( + []int{1, 2, 3, 4}, + []int{1, 3, 5, 7}, + ) + Equal(t, expected, actual) + + expected = ` + +Diff: +--- Expected ++++ Actual +@@ -2,4 +2,4 @@ + (int) 1, +- (int) 2, +- (int) 3 ++ (int) 3, ++ (int) 5 + } +` + actual = diff( + []int{1, 2, 3, 4}[0:3], + []int{1, 3, 5, 7}[0:3], + ) + Equal(t, expected, actual) + + expected = ` + +Diff: +--- Expected ++++ Actual +@@ -1,6 +1,6 @@ + (map[string]int) (len=4) { +- (string) (len=4) "four": (int) 4, ++ (string) (len=4) "five": (int) 5, + (string) (len=3) "one": (int) 1, +- (string) (len=5) "three": (int) 3, +- (string) (len=3) "two": (int) 2 ++ (string) (len=5) "seven": (int) 7, ++ (string) (len=5) "three": (int) 3 + } +` + + actual = diff( + map[string]int{"one": 1, "two": 2, "three": 3, "four": 4}, + map[string]int{"one": 1, "three": 3, "five": 5, "seven": 7}, + ) + Equal(t, expected, actual) +} + +func TestDiffEmptyCases(t *testing.T) { + Equal(t, "", diff(nil, nil)) + Equal(t, "", diff(struct{ foo string }{}, nil)) + Equal(t, "", diff(nil, struct{ foo string }{})) + Equal(t, "", diff(1, 2)) + Equal(t, "", diff(1, 2)) + Equal(t, "", diff([]int{1}, []bool{true})) +} + +// Ensure there are no data races +func TestDiffRace(t *testing.T) { + t.Parallel() + + expected := map[string]string{ + "a": "A", + "b": "B", + "c": "C", + } + + actual := map[string]string{ + "d": "D", + "e": "E", + "f": "F", + } + + // run diffs in parallel simulating tests with t.Parallel() + numRoutines := 10 + rChans := make([]chan string, numRoutines) + for idx := range rChans { + rChans[idx] = make(chan string) + go func(ch chan string) { + defer close(ch) + ch <- diff(expected, actual) + }(rChans[idx]) + } + + for _, ch := range rChans { + for msg := range ch { + NotZero(t, msg) // dummy assert + } + } +} + +type mockTestingT struct { +} + +func (m *mockTestingT) Errorf(format string, args ...interface{}) {} + +func TestFailNowWithPlainTestingT(t *testing.T) { + mockT := &mockTestingT{} + + Panics(t, func() { + FailNow(mockT, "failed") + }, "should panic since mockT is missing FailNow()") +} + +type mockFailNowTestingT struct { +} + +func (m *mockFailNowTestingT) Errorf(format string, args ...interface{}) {} + +func (m *mockFailNowTestingT) FailNow() {} + +func TestFailNowWithFullTestingT(t *testing.T) { + mockT := &mockFailNowTestingT{} + + NotPanics(t, func() { + FailNow(mockT, "failed") + }, "should call mockT.FailNow() rather than panicking") +} diff --git a/src/vendor/github.com/stretchr/testify/assert/doc.go b/src/vendor/github.com/stretchr/testify/assert/doc.go new file mode 100644 index 00000000..c9dccc4d --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/assert/doc.go @@ -0,0 +1,45 @@ +// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. +// +// Example Usage +// +// The following is a complete example using assert in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// if you assert many times, use the format below: +// +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// assert := assert.New(t) +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(a, b, "The two words should be the same.") +// } +// +// Assertions +// +// Assertions allow you to easily write test code, and are global funcs in the `assert` package. +// All assertion functions take, as the first argument, the `*testing.T` object provided by the +// testing framework. This allows the assertion funcs to write the failings and other details to +// the correct place. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +package assert diff --git a/src/vendor/github.com/stretchr/testify/assert/errors.go b/src/vendor/github.com/stretchr/testify/assert/errors.go new file mode 100644 index 00000000..ac9dc9d1 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/assert/errors.go @@ -0,0 +1,10 @@ +package assert + +import ( + "errors" +) + +// AnError is an error instance useful for testing. If the code does not care +// about error specifics, and only needs to return the error for example, this +// error should be used to make the test code more readable. +var AnError = errors.New("assert.AnError general error for testing") diff --git a/src/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/src/vendor/github.com/stretchr/testify/assert/forward_assertions.go new file mode 100644 index 00000000..b867e95e --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/assert/forward_assertions.go @@ -0,0 +1,16 @@ +package assert + +// Assertions provides assertion methods around the +// TestingT interface. +type Assertions struct { + t TestingT +} + +// New makes a new Assertions object for the specified TestingT. +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl diff --git a/src/vendor/github.com/stretchr/testify/assert/forward_assertions_test.go b/src/vendor/github.com/stretchr/testify/assert/forward_assertions_test.go new file mode 100644 index 00000000..22e1df1d --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/assert/forward_assertions_test.go @@ -0,0 +1,611 @@ +package assert + +import ( + "errors" + "regexp" + "testing" + "time" +) + +func TestImplementsWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) { + t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface") + } + if assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) { + t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface") + } +} + +func TestIsTypeWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject") + } + if assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) { + t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject") + } + +} + +func TestEqualWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Equal("Hello World", "Hello World") { + t.Error("Equal should return true") + } + if !assert.Equal(123, 123) { + t.Error("Equal should return true") + } + if !assert.Equal(123.5, 123.5) { + t.Error("Equal should return true") + } + if !assert.Equal([]byte("Hello World"), []byte("Hello World")) { + t.Error("Equal should return true") + } + if !assert.Equal(nil, nil) { + t.Error("Equal should return true") + } +} + +func TestEqualValuesWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.EqualValues(uint32(10), int32(10)) { + t.Error("EqualValues should return true") + } +} + +func TestNotNilWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.NotNil(new(AssertionTesterConformingObject)) { + t.Error("NotNil should return true: object is not nil") + } + if assert.NotNil(nil) { + t.Error("NotNil should return false: object is nil") + } + +} + +func TestNilWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Nil(nil) { + t.Error("Nil should return true: object is nil") + } + if assert.Nil(new(AssertionTesterConformingObject)) { + t.Error("Nil should return false: object is not nil") + } + +} + +func TestTrueWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.True(true) { + t.Error("True should return true") + } + if assert.True(false) { + t.Error("True should return false") + } + +} + +func TestFalseWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.False(false) { + t.Error("False should return true") + } + if assert.False(true) { + t.Error("False should return false") + } + +} + +func TestExactlyWrapper(t *testing.T) { + assert := New(new(testing.T)) + + a := float32(1) + b := float64(1) + c := float32(1) + d := float32(2) + + if assert.Exactly(a, b) { + t.Error("Exactly should return false") + } + if assert.Exactly(a, d) { + t.Error("Exactly should return false") + } + if !assert.Exactly(a, c) { + t.Error("Exactly should return true") + } + + if assert.Exactly(nil, a) { + t.Error("Exactly should return false") + } + if assert.Exactly(a, nil) { + t.Error("Exactly should return false") + } + +} + +func TestNotEqualWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.NotEqual("Hello World", "Hello World!") { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(123, 1234) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(123.5, 123.55) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual([]byte("Hello World"), []byte("Hello World!")) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(nil, new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return true") + } +} + +func TestContainsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + list := []string{"Foo", "Bar"} + + if !assert.Contains("Hello World", "Hello") { + t.Error("Contains should return true: \"Hello World\" contains \"Hello\"") + } + if assert.Contains("Hello World", "Salut") { + t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"") + } + + if !assert.Contains(list, "Foo") { + t.Error("Contains should return true: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") + } + if assert.Contains(list, "Salut") { + t.Error("Contains should return false: \"[\"Foo\", \"Bar\"]\" does not contain \"Salut\"") + } + +} + +func TestNotContainsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + list := []string{"Foo", "Bar"} + + if !assert.NotContains("Hello World", "Hello!") { + t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"") + } + if assert.NotContains("Hello World", "Hello") { + t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"") + } + + if !assert.NotContains(list, "Foo!") { + t.Error("NotContains should return true: \"[\"Foo\", \"Bar\"]\" does not contain \"Foo!\"") + } + if assert.NotContains(list, "Foo") { + t.Error("NotContains should return false: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") + } + +} + +func TestConditionWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.Condition(func() bool { return true }, "Truth") { + t.Error("Condition should return true") + } + + if assert.Condition(func() bool { return false }, "Lie") { + t.Error("Condition should return false") + } + +} + +func TestDidPanicWrapper(t *testing.T) { + + if funcDidPanic, _ := didPanic(func() { + panic("Panic!") + }); !funcDidPanic { + t.Error("didPanic should return true") + } + + if funcDidPanic, _ := didPanic(func() { + }); funcDidPanic { + t.Error("didPanic should return false") + } + +} + +func TestPanicsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.Panics(func() { + panic("Panic!") + }) { + t.Error("Panics should return true") + } + + if assert.Panics(func() { + }) { + t.Error("Panics should return false") + } + +} + +func TestNotPanicsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.NotPanics(func() { + }) { + t.Error("NotPanics should return true") + } + + if assert.NotPanics(func() { + panic("Panic!") + }) { + t.Error("NotPanics should return false") + } + +} + +func TestNoErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + + assert.True(mockAssert.NoError(err), "NoError should return True for nil arg") + + // now set an error + err = errors.New("Some error") + + assert.False(mockAssert.NoError(err), "NoError with error should return False") + +} + +func TestErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + + assert.False(mockAssert.Error(err), "Error should return False for nil arg") + + // now set an error + err = errors.New("Some error") + + assert.True(mockAssert.Error(err), "Error with error should return True") + +} + +func TestEqualErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + assert.False(mockAssert.EqualError(err, ""), + "EqualError should return false for nil arg") + + // now set an error + err = errors.New("some error") + assert.False(mockAssert.EqualError(err, "Not some error"), + "EqualError should return false for different error string") + assert.True(mockAssert.EqualError(err, "some error"), + "EqualError should return true") +} + +func TestEmptyWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.True(mockAssert.Empty(""), "Empty string is empty") + assert.True(mockAssert.Empty(nil), "Nil is empty") + assert.True(mockAssert.Empty([]string{}), "Empty string array is empty") + assert.True(mockAssert.Empty(0), "Zero int value is empty") + assert.True(mockAssert.Empty(false), "False value is empty") + + assert.False(mockAssert.Empty("something"), "Non Empty string is not empty") + assert.False(mockAssert.Empty(errors.New("something")), "Non nil object is not empty") + assert.False(mockAssert.Empty([]string{"something"}), "Non empty string array is not empty") + assert.False(mockAssert.Empty(1), "Non-zero int value is not empty") + assert.False(mockAssert.Empty(true), "True value is not empty") + +} + +func TestNotEmptyWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.False(mockAssert.NotEmpty(""), "Empty string is empty") + assert.False(mockAssert.NotEmpty(nil), "Nil is empty") + assert.False(mockAssert.NotEmpty([]string{}), "Empty string array is empty") + assert.False(mockAssert.NotEmpty(0), "Zero int value is empty") + assert.False(mockAssert.NotEmpty(false), "False value is empty") + + assert.True(mockAssert.NotEmpty("something"), "Non Empty string is not empty") + assert.True(mockAssert.NotEmpty(errors.New("something")), "Non nil object is not empty") + assert.True(mockAssert.NotEmpty([]string{"something"}), "Non empty string array is not empty") + assert.True(mockAssert.NotEmpty(1), "Non-zero int value is not empty") + assert.True(mockAssert.NotEmpty(true), "True value is not empty") + +} + +func TestLenWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.False(mockAssert.Len(nil, 0), "nil does not have length") + assert.False(mockAssert.Len(0, 0), "int does not have length") + assert.False(mockAssert.Len(true, 0), "true does not have length") + assert.False(mockAssert.Len(false, 0), "false does not have length") + assert.False(mockAssert.Len('A', 0), "Rune does not have length") + assert.False(mockAssert.Len(struct{}{}, 0), "Struct does not have length") + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + + cases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range cases { + assert.True(mockAssert.Len(c.v, c.l), "%#v have %d items", c.v, c.l) + } +} + +func TestWithinDurationWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + a := time.Now() + b := a.Add(10 * time.Second) + + assert.True(mockAssert.WithinDuration(a, b, 10*time.Second), "A 10s difference is within a 10s time difference") + assert.True(mockAssert.WithinDuration(b, a, 10*time.Second), "A 10s difference is within a 10s time difference") + + assert.False(mockAssert.WithinDuration(a, b, 9*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, 9*time.Second), "A 10s difference is not within a 9s time difference") + + assert.False(mockAssert.WithinDuration(a, b, -9*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, -9*time.Second), "A 10s difference is not within a 9s time difference") + + assert.False(mockAssert.WithinDuration(a, b, -11*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, -11*time.Second), "A 10s difference is not within a 9s time difference") +} + +func TestInDeltaWrapper(t *testing.T) { + assert := New(new(testing.T)) + + True(t, assert.InDelta(1.001, 1, 0.01), "|1.001 - 1| <= 0.01") + True(t, assert.InDelta(1, 1.001, 0.01), "|1 - 1.001| <= 0.01") + True(t, assert.InDelta(1, 2, 1), "|1 - 2| <= 1") + False(t, assert.InDelta(1, 2, 0.5), "Expected |1 - 2| <= 0.5 to fail") + False(t, assert.InDelta(2, 1, 0.5), "Expected |2 - 1| <= 0.5 to fail") + False(t, assert.InDelta("", nil, 1), "Expected non numerals to fail") + + cases := []struct { + a, b interface{} + delta float64 + }{ + {uint8(2), uint8(1), 1}, + {uint16(2), uint16(1), 1}, + {uint32(2), uint32(1), 1}, + {uint64(2), uint64(1), 1}, + + {int(2), int(1), 1}, + {int8(2), int8(1), 1}, + {int16(2), int16(1), 1}, + {int32(2), int32(1), 1}, + {int64(2), int64(1), 1}, + + {float32(2), float32(1), 1}, + {float64(2), float64(1), 1}, + } + + for _, tc := range cases { + True(t, assert.InDelta(tc.a, tc.b, tc.delta), "Expected |%V - %V| <= %v", tc.a, tc.b, tc.delta) + } +} + +func TestInEpsilonWrapper(t *testing.T) { + assert := New(new(testing.T)) + + cases := []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), uint16(2), .001}, + {2.1, 2.2, 0.1}, + {2.2, 2.1, 0.1}, + {-2.1, -2.2, 0.1}, + {-2.2, -2.1, 0.1}, + {uint64(100), uint8(101), 0.01}, + {0.1, -0.1, 2}, + } + + for _, tc := range cases { + True(t, assert.InEpsilon(tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } + + cases = []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), int16(-2), .001}, + {uint64(100), uint8(102), 0.01}, + {2.1, 2.2, 0.001}, + {2.2, 2.1, 0.001}, + {2.1, -2.2, 1}, + {2.1, "bla-bla", 0}, + {0.1, -0.1, 1.99}, + } + + for _, tc := range cases { + False(t, assert.InEpsilon(tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } +} + +func TestRegexpWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + cases := []struct { + rx, str string + }{ + {"^start", "start of the line"}, + {"end$", "in the end"}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12.34"}, + } + + for _, tc := range cases { + True(t, assert.Regexp(tc.rx, tc.str)) + True(t, assert.Regexp(regexp.MustCompile(tc.rx), tc.str)) + False(t, assert.NotRegexp(tc.rx, tc.str)) + False(t, assert.NotRegexp(regexp.MustCompile(tc.rx), tc.str)) + } + + cases = []struct { + rx, str string + }{ + {"^asdfastart", "Not the start of the line"}, + {"end$", "in the end."}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12a.34"}, + } + + for _, tc := range cases { + False(t, assert.Regexp(tc.rx, tc.str), "Expected \"%s\" to not match \"%s\"", tc.rx, tc.str) + False(t, assert.Regexp(regexp.MustCompile(tc.rx), tc.str)) + True(t, assert.NotRegexp(tc.rx, tc.str)) + True(t, assert.NotRegexp(regexp.MustCompile(tc.rx), tc.str)) + } +} + +func TestZeroWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + for _, test := range zeros { + assert.True(mockAssert.Zero(test), "Zero should return true for %v", test) + } + + for _, test := range nonZeros { + assert.False(mockAssert.Zero(test), "Zero should return false for %v", test) + } +} + +func TestNotZeroWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + for _, test := range zeros { + assert.False(mockAssert.NotZero(test), "Zero should return true for %v", test) + } + + for _, test := range nonZeros { + assert.True(mockAssert.NotZero(test), "Zero should return false for %v", test) + } +} + +func TestJSONEqWrapper_EqualSONString(t *testing.T) { + assert := New(new(testing.T)) + if !assert.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`) { + t.Error("JSONEq should return true") + } + +} + +func TestJSONEqWrapper_EquivalentButNotEqual(t *testing.T) { + assert := New(new(testing.T)) + if !assert.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) { + t.Error("JSONEq should return true") + } + +} + +func TestJSONEqWrapper_HashOfArraysAndHashes(t *testing.T) { + assert := New(new(testing.T)) + if !assert.JSONEq("{\r\n\t\"numeric\": 1.5,\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]],\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\"\r\n}", + "{\r\n\t\"numeric\": 1.5,\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\",\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]]\r\n}") { + t.Error("JSONEq should return true") + } +} + +func TestJSONEqWrapper_Array(t *testing.T) { + assert := New(new(testing.T)) + if !assert.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `["foo", {"nested": "hash", "hello": "world"}]`) { + t.Error("JSONEq should return true") + } + +} + +func TestJSONEqWrapper_HashAndArrayNotEquivalent(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `{"foo": "bar", {"nested": "hash", "hello": "world"}}`) { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_HashesNotEquivalent(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq(`{"foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_ActualIsNotJSON(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq(`{"foo": "bar"}`, "Not JSON") { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_ExpectedIsNotJSON(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq("Not JSON", `{"foo": "bar", "hello": "world"}`) { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_ExpectedAndActualNotJSON(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq("Not JSON", "Not JSON") { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_ArraysOfDifferentOrder(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `[{ "hello": "world", "nested": "hash"}, "foo"]`) { + t.Error("JSONEq should return false") + } +} diff --git a/src/vendor/github.com/stretchr/testify/assert/http_assertions.go b/src/vendor/github.com/stretchr/testify/assert/http_assertions.go new file mode 100644 index 00000000..fa7ab89b --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -0,0 +1,106 @@ +package assert + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" +) + +// httpCode is a helper that returns HTTP code of the response. It returns -1 +// if building a new request fails. +func httpCode(handler http.HandlerFunc, method, url string, values url.Values) int { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if err != nil { + return -1 + } + handler(w, req) + return w.Code +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { + code := httpCode(handler, method, url, values) + if code == -1 { + return false + } + return code >= http.StatusOK && code <= http.StatusPartialContent +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { + code := httpCode(handler, method, url, values) + if code == -1 { + return false + } + return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { + code := httpCode(handler, method, url, values) + if code == -1 { + return false + } + return code >= http.StatusBadRequest +} + +// HTTPBody is a helper that returns HTTP body of the response. It returns +// empty string if building a new request fails. +func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if err != nil { + return "" + } + handler(w, req) + return w.Body.String() +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if !contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return contains +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return !contains +} diff --git a/src/vendor/github.com/stretchr/testify/assert/http_assertions_test.go b/src/vendor/github.com/stretchr/testify/assert/http_assertions_test.go new file mode 100644 index 00000000..684c2d5d --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/assert/http_assertions_test.go @@ -0,0 +1,86 @@ +package assert + +import ( + "fmt" + "net/http" + "net/url" + "testing" +) + +func httpOK(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) +} + +func httpRedirect(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusTemporaryRedirect) +} + +func httpError(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) +} + +func TestHTTPStatuses(t *testing.T) { + assert := New(t) + mockT := new(testing.T) + + assert.Equal(HTTPSuccess(mockT, httpOK, "GET", "/", nil), true) + assert.Equal(HTTPSuccess(mockT, httpRedirect, "GET", "/", nil), false) + assert.Equal(HTTPSuccess(mockT, httpError, "GET", "/", nil), false) + + assert.Equal(HTTPRedirect(mockT, httpOK, "GET", "/", nil), false) + assert.Equal(HTTPRedirect(mockT, httpRedirect, "GET", "/", nil), true) + assert.Equal(HTTPRedirect(mockT, httpError, "GET", "/", nil), false) + + assert.Equal(HTTPError(mockT, httpOK, "GET", "/", nil), false) + assert.Equal(HTTPError(mockT, httpRedirect, "GET", "/", nil), false) + assert.Equal(HTTPError(mockT, httpError, "GET", "/", nil), true) +} + +func TestHTTPStatusesWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.Equal(mockAssert.HTTPSuccess(httpOK, "GET", "/", nil), true) + assert.Equal(mockAssert.HTTPSuccess(httpRedirect, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPSuccess(httpError, "GET", "/", nil), false) + + assert.Equal(mockAssert.HTTPRedirect(httpOK, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPRedirect(httpRedirect, "GET", "/", nil), true) + assert.Equal(mockAssert.HTTPRedirect(httpError, "GET", "/", nil), false) + + assert.Equal(mockAssert.HTTPError(httpOK, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPError(httpRedirect, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPError(httpError, "GET", "/", nil), true) +} + +func httpHelloName(w http.ResponseWriter, r *http.Request) { + name := r.FormValue("name") + w.Write([]byte(fmt.Sprintf("Hello, %s!", name))) +} + +func TestHttpBody(t *testing.T) { + assert := New(t) + mockT := new(testing.T) + + assert.True(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.True(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.False(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) + + assert.False(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.False(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.True(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) +} + +func TestHttpBodyWrappers(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.True(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.True(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.False(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) + + assert.False(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.False(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.True(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) + +} diff --git a/src/vendor/github.com/stretchr/testify/doc.go b/src/vendor/github.com/stretchr/testify/doc.go new file mode 100644 index 00000000..377d5cc5 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/doc.go @@ -0,0 +1,22 @@ +// Package testify is a set of packages that provide many tools for testifying that your code will behave as you intend. +// +// testify contains the following packages: +// +// The assert package provides a comprehensive set of assertion functions that tie in to the Go testing system. +// +// The http package contains tools to make it easier to test http activity using the Go testing system. +// +// The mock package provides a system by which it is possible to mock your objects and verify calls are happening as expected. +// +// The suite package provides a basic structure for using structs as testing suites, and methods on those structs as tests. It includes setup/teardown functionality in the way of interfaces. +package testify + +// blank imports help docs. +import ( + // assert package + _ "github.com/stretchr/testify/assert" + // http package + _ "github.com/stretchr/testify/http" + // mock package + _ "github.com/stretchr/testify/mock" +) diff --git a/src/vendor/github.com/stretchr/testify/http/doc.go b/src/vendor/github.com/stretchr/testify/http/doc.go new file mode 100644 index 00000000..695167c6 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/http/doc.go @@ -0,0 +1,2 @@ +// Package http DEPRECATED USE net/http/httptest +package http diff --git a/src/vendor/github.com/stretchr/testify/http/test_response_writer.go b/src/vendor/github.com/stretchr/testify/http/test_response_writer.go new file mode 100644 index 00000000..5c3f813f --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/http/test_response_writer.go @@ -0,0 +1,49 @@ +package http + +import ( + "net/http" +) + +// TestResponseWriter DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. +type TestResponseWriter struct { + + // StatusCode is the last int written by the call to WriteHeader(int) + StatusCode int + + // Output is a string containing the written bytes using the Write([]byte) func. + Output string + + // header is the internal storage of the http.Header object + header http.Header +} + +// Header DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. +func (rw *TestResponseWriter) Header() http.Header { + + if rw.header == nil { + rw.header = make(http.Header) + } + + return rw.header +} + +// Write DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. +func (rw *TestResponseWriter) Write(bytes []byte) (int, error) { + + // assume 200 success if no header has been set + if rw.StatusCode == 0 { + rw.WriteHeader(200) + } + + // add these bytes to the output string + rw.Output = rw.Output + string(bytes) + + // return normal values + return 0, nil + +} + +// WriteHeader DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. +func (rw *TestResponseWriter) WriteHeader(i int) { + rw.StatusCode = i +} diff --git a/src/vendor/github.com/stretchr/testify/http/test_round_tripper.go b/src/vendor/github.com/stretchr/testify/http/test_round_tripper.go new file mode 100644 index 00000000..b1e32f1d --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/http/test_round_tripper.go @@ -0,0 +1,17 @@ +package http + +import ( + "github.com/stretchr/testify/mock" + "net/http" +) + +// TestRoundTripper DEPRECATED USE net/http/httptest +type TestRoundTripper struct { + mock.Mock +} + +// RoundTrip DEPRECATED USE net/http/httptest +func (t *TestRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + args := t.Called(req) + return args.Get(0).(*http.Response), args.Error(1) +} diff --git a/src/vendor/github.com/stretchr/testify/mock/doc.go b/src/vendor/github.com/stretchr/testify/mock/doc.go new file mode 100644 index 00000000..7324128e --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/mock/doc.go @@ -0,0 +1,44 @@ +// Package mock provides a system by which it is possible to mock your objects +// and verify calls are happening as expected. +// +// Example Usage +// +// The mock package provides an object, Mock, that tracks activity on another object. It is usually +// embedded into a test object as shown below: +// +// type MyTestObject struct { +// // add a Mock object instance +// mock.Mock +// +// // other fields go here as normal +// } +// +// When implementing the methods of an interface, you wire your functions up +// to call the Mock.Called(args...) method, and return the appropriate values. +// +// For example, to mock a method that saves the name and age of a person and returns +// the year of their birth or an error, you might write this: +// +// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { +// args := o.Called(firstname, lastname, age) +// return args.Int(0), args.Error(1) +// } +// +// The Int, Error and Bool methods are examples of strongly typed getters that take the argument +// index position. Given this argument list: +// +// (12, true, "Something") +// +// You could read them out strongly typed like this: +// +// args.Int(0) +// args.Bool(1) +// args.String(2) +// +// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: +// +// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) +// +// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those +// cases you should check for nil first. +package mock diff --git a/src/vendor/github.com/stretchr/testify/mock/mock.go b/src/vendor/github.com/stretchr/testify/mock/mock.go new file mode 100644 index 00000000..ad6812eb --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/mock/mock.go @@ -0,0 +1,763 @@ +package mock + +import ( + "fmt" + "reflect" + "regexp" + "runtime" + "strings" + "sync" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" +) + +func inin() { + spew.Config.SortKeys = true +} + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Logf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + FailNow() +} + +/* + Call +*/ + +// Call represents a method call and is used for setting expectations, +// as well as recording activity. +type Call struct { + Parent *Mock + + // The name of the method that was or will be called. + Method string + + // Holds the arguments of the method. + Arguments Arguments + + // Holds the arguments that should be returned when + // this method is called. + ReturnArguments Arguments + + // The number of times to return the return arguments when setting + // expectations. 0 means to always return the value. + Repeatability int + + // Amount of times this call has been called + totalCalls int + + // Holds a channel that will be used to block the Return until it either + // receives a message or is closed. nil means it returns immediately. + WaitFor <-chan time.Time + + // Holds a handler used to manipulate arguments content that are passed by + // reference. It's useful when mocking methods such as unmarshalers or + // decoders. + RunFn func(Arguments) +} + +func newCall(parent *Mock, methodName string, methodArguments ...interface{}) *Call { + return &Call{ + Parent: parent, + Method: methodName, + Arguments: methodArguments, + ReturnArguments: make([]interface{}, 0), + Repeatability: 0, + WaitFor: nil, + RunFn: nil, + } +} + +func (c *Call) lock() { + c.Parent.mutex.Lock() +} + +func (c *Call) unlock() { + c.Parent.mutex.Unlock() +} + +// Return specifies the return arguments for the expectation. +// +// Mock.On("DoSomething").Return(errors.New("failed")) +func (c *Call) Return(returnArguments ...interface{}) *Call { + c.lock() + defer c.unlock() + + c.ReturnArguments = returnArguments + + return c +} + +// Once indicates that that the mock should only return the value once. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() +func (c *Call) Once() *Call { + return c.Times(1) +} + +// Twice indicates that that the mock should only return the value twice. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() +func (c *Call) Twice() *Call { + return c.Times(2) +} + +// Times indicates that that the mock should only return the indicated number +// of times. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) +func (c *Call) Times(i int) *Call { + c.lock() + defer c.unlock() + c.Repeatability = i + return c +} + +// WaitUntil sets the channel that will block the mock's return until its closed +// or a message is received. +// +// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) +func (c *Call) WaitUntil(w <-chan time.Time) *Call { + c.lock() + defer c.unlock() + c.WaitFor = w + return c +} + +// After sets how long to block until the call returns +// +// Mock.On("MyMethod", arg1, arg2).After(time.Second) +func (c *Call) After(d time.Duration) *Call { + return c.WaitUntil(time.After(d)) +} + +// Run sets a handler to be called before returning. It can be used when +// mocking a method such as unmarshalers that takes a pointer to a struct and +// sets properties in such struct +// +// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}").Return().Run(func(args Arguments) { +// arg := args.Get(0).(*map[string]interface{}) +// arg["foo"] = "bar" +// }) +func (c *Call) Run(fn func(Arguments)) *Call { + c.lock() + defer c.unlock() + c.RunFn = fn + return c +} + +// On chains a new expectation description onto the mocked interface. This +// allows syntax like. +// +// Mock. +// On("MyMethod", 1).Return(nil). +// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error")) +func (c *Call) On(methodName string, arguments ...interface{}) *Call { + return c.Parent.On(methodName, arguments...) +} + +// Mock is the workhorse used to track activity on another object. +// For an example of its usage, refer to the "Example Usage" section at the top +// of this document. +type Mock struct { + // Represents the calls that are expected of + // an object. + ExpectedCalls []*Call + + // Holds the calls that were made to this mocked object. + Calls []Call + + // TestData holds any data that might be useful for testing. Testify ignores + // this data completely allowing you to do whatever you like with it. + testData objx.Map + + //mutex sync.Mutex +} + +// TestData holds any data that might be useful for testing. Testify ignores +// this data completely allowing you to do whatever you like with it. +func (m *Mock) TestData() objx.Map { + + if m.testData == nil { + m.testData = make(objx.Map) + } + + return m.testData +} + +/* + Setting expectations +*/ + +// On starts a description of an expectation of the specified method +// being called. +// +// Mock.On("MyMethod", arg1, arg2) +func (m *Mock) On(methodName string, arguments ...interface{}) *Call { + for _, arg := range arguments { + if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { + panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg)) + } + } + + m.mutex.Lock() + defer m.mutex.Unlock() + c := newCall(m, methodName, arguments...) + m.ExpectedCalls = append(m.ExpectedCalls, c) + return c +} + +// /* +// Recording and responding to activity +// */ + +func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { + m.mutex.Lock() + defer m.mutex.Unlock() + for i, call := range m.ExpectedCalls { + if call.Method == method && call.Repeatability > -1 { + + _, diffCount := call.Arguments.Diff(arguments) + if diffCount == 0 { + return i, call + } + + } + } + return -1, nil +} + +func (m *Mock) findClosestCall(method string, arguments ...interface{}) (bool, *Call) { + diffCount := 0 + var closestCall *Call + + for _, call := range m.expectedCalls() { + if call.Method == method { + + _, tempDiffCount := call.Arguments.Diff(arguments) + if tempDiffCount < diffCount || diffCount == 0 { + diffCount = tempDiffCount + closestCall = call + } + + } + } + + if closestCall == nil { + return false, nil + } + + return true, closestCall +} + +func callString(method string, arguments Arguments, includeArgumentValues bool) string { + + var argValsString string + if includeArgumentValues { + var argVals []string + for argIndex, arg := range arguments { + argVals = append(argVals, fmt.Sprintf("%d: %#v", argIndex, arg)) + } + argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) + } + + return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString) +} + +// Called tells the mock object that a method has been called, and gets an array +// of arguments to return. Panics if the call is unexpected (i.e. not preceded by +// appropriate .On .Return() calls) +// If Call.WaitFor is set, blocks until the channel is closed or receives a message. +func (m *Mock) Called(arguments ...interface{}) Arguments { + // get the calling function's name + pc, _, _, ok := runtime.Caller(1) + if !ok { + panic("Couldn't get the caller information") + } + functionPath := runtime.FuncForPC(pc).Name() + //Next four lines are required to use GCCGO function naming conventions. + //For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock + //uses inteface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree + //With GCCGO we need to remove interface information starting from pN
. + re := regexp.MustCompile("\\.pN\\d+_") + if re.MatchString(functionPath) { + functionPath = re.Split(functionPath, -1)[0] + } + parts := strings.Split(functionPath, ".") + functionName := parts[len(parts)-1] + + found, call := m.findExpectedCall(functionName, arguments...) + + if found < 0 { + // we have to fail here - because we don't know what to do + // as the return arguments. This is because: + // + // a) this is a totally unexpected call to this method, + // b) the arguments are not what was expected, or + // c) the developer has forgotten to add an accompanying On...Return pair. + + closestFound, closestCall := m.findClosestCall(functionName, arguments...) + + if closestFound { + panic(fmt.Sprintf("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\n", callString(functionName, arguments, true), callString(functionName, closestCall.Arguments, true), diffArguments(arguments, closestCall.Arguments))) + } else { + panic(fmt.Sprintf("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", functionName, functionName, callString(functionName, arguments, true), assert.CallerInfo())) + } + } else { + m.mutex.Lock() + switch { + case call.Repeatability == 1: + call.Repeatability = -1 + call.totalCalls++ + + case call.Repeatability > 1: + call.Repeatability-- + call.totalCalls++ + + case call.Repeatability == 0: + call.totalCalls++ + } + m.mutex.Unlock() + } + + // add the call + m.mutex.Lock() + m.Calls = append(m.Calls, *newCall(m, functionName, arguments...)) + m.mutex.Unlock() + + // block if specified + if call.WaitFor != nil { + <-call.WaitFor + } + + if call.RunFn != nil { + call.RunFn(arguments) + } + + return call.ReturnArguments +} + +/* + Assertions +*/ + +type assertExpectationser interface { + AssertExpectations(TestingT) bool +} + +// AssertExpectationsForObjects asserts that everything specified with On and Return +// of the specified objects was in fact called as expected. +// +// Calls may have occurred in any order. +func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { + for _, obj := range testObjects { + if m, ok := obj.(Mock); ok { + t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)") + obj = &m + } + m := obj.(assertExpectationser) + if !m.AssertExpectations(t) { + return false + } + } + return true +} + +// AssertExpectations asserts that everything specified with On and Return was +// in fact called as expected. Calls may have occurred in any order. +func (m *Mock) AssertExpectations(t TestingT) bool { + var somethingMissing bool + var failedExpectations int + + // iterate through each expectation + expectedCalls := m.expectedCalls() + for _, expectedCall := range expectedCalls { + if !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) && expectedCall.totalCalls == 0 { + somethingMissing = true + failedExpectations++ + t.Logf("\u274C\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + } else { + m.mutex.Lock() + if expectedCall.Repeatability > 0 { + somethingMissing = true + failedExpectations++ + } else { + t.Logf("\u2705\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + } + m.mutex.Unlock() + } + } + + if somethingMissing { + t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo()) + } + + return !somethingMissing +} + +// AssertNumberOfCalls asserts that the method was called expectedCalls times. +func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool { + var actualCalls int + for _, call := range m.calls() { + if call.Method == methodName { + actualCalls++ + } + } + return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) +} + +// AssertCalled asserts that the method was called. +// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. +func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool { + if !assert.True(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method should have been called with %d argument(s), but was not.", methodName, len(arguments))) { + t.Logf("%v", m.expectedCalls()) + return false + } + return true +} + +// AssertNotCalled asserts that the method was not called. +// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. +func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool { + if !assert.False(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method was called with %d argument(s), but should NOT have been.", methodName, len(arguments))) { + t.Logf("%v", m.expectedCalls()) + return false + } + return true +} + +func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { + for _, call := range m.calls() { + if call.Method == methodName { + + _, differences := Arguments(expected).Diff(call.Arguments) + + if differences == 0 { + // found the expected call + return true + } + + } + } + // we didn't find the expected call + return false +} + +func (m *Mock) expectedCalls() []*Call { + m.mutex.Lock() + defer m.mutex.Unlock() + return append([]*Call{}, m.ExpectedCalls...) +} + +func (m *Mock) calls() []Call { + m.mutex.Lock() + defer m.mutex.Unlock() + return append([]Call{}, m.Calls...) +} + +/* + Arguments +*/ + +// Arguments holds an array of method arguments or return values. +type Arguments []interface{} + +const ( + // Anything is used in Diff and Assert when the argument being tested + // shouldn't be taken into consideration. + Anything string = "mock.Anything" +) + +// AnythingOfTypeArgument is a string that contains the type of an argument +// for use when type checking. Used in Diff and Assert. +type AnythingOfTypeArgument string + +// AnythingOfType returns an AnythingOfTypeArgument object containing the +// name of the type to check for. Used in Diff and Assert. +// +// For example: +// Assert(t, AnythingOfType("string"), AnythingOfType("int")) +func AnythingOfType(t string) AnythingOfTypeArgument { + return AnythingOfTypeArgument(t) +} + +// argumentMatcher performs custom argument matching, returning whether or +// not the argument is matched by the expectation fixture function. +type argumentMatcher struct { + // fn is a function which accepts one argument, and returns a bool. + fn reflect.Value +} + +func (f argumentMatcher) Matches(argument interface{}) bool { + expectType := f.fn.Type().In(0) + + if reflect.TypeOf(argument).AssignableTo(expectType) { + result := f.fn.Call([]reflect.Value{reflect.ValueOf(argument)}) + return result[0].Bool() + } + return false +} + +func (f argumentMatcher) String() string { + return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).Name()) +} + +// MatchedBy can be used to match a mock call based on only certain properties +// from a complex struct or some calculation. It takes a function that will be +// evaluated with the called argument and will return true when there's a match +// and false otherwise. +// +// Example: +// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" })) +// +// |fn|, must be a function accepting a single argument (of the expected type) +// which returns a bool. If |fn| doesn't match the required signature, +// MathedBy() panics. +func MatchedBy(fn interface{}) argumentMatcher { + fnType := reflect.TypeOf(fn) + + if fnType.Kind() != reflect.Func { + panic(fmt.Sprintf("assert: arguments: %s is not a func", fn)) + } + if fnType.NumIn() != 1 { + panic(fmt.Sprintf("assert: arguments: %s does not take exactly one argument", fn)) + } + if fnType.NumOut() != 1 || fnType.Out(0).Kind() != reflect.Bool { + panic(fmt.Sprintf("assert: arguments: %s does not return a bool", fn)) + } + + return argumentMatcher{fn: reflect.ValueOf(fn)} +} + +// Get Returns the argument at the specified index. +func (args Arguments) Get(index int) interface{} { + if index+1 > len(args) { + panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args))) + } + return args[index] +} + +// Is gets whether the objects match the arguments specified. +func (args Arguments) Is(objects ...interface{}) bool { + for i, obj := range args { + if obj != objects[i] { + return false + } + } + return true +} + +// Diff gets a string describing the differences between the arguments +// and the specified objects. +// +// Returns the diff string and number of differences found. +func (args Arguments) Diff(objects []interface{}) (string, int) { + + var output = "\n" + var differences int + + var maxArgCount = len(args) + if len(objects) > maxArgCount { + maxArgCount = len(objects) + } + + for i := 0; i < maxArgCount; i++ { + var actual, expected interface{} + + if len(objects) <= i { + actual = "(Missing)" + } else { + actual = objects[i] + } + + if len(args) <= i { + expected = "(Missing)" + } else { + expected = args[i] + } + + if matcher, ok := expected.(argumentMatcher); ok { + if matcher.Matches(actual) { + output = fmt.Sprintf("%s\t%d: \u2705 %s matched by %s\n", output, i, actual, matcher) + } else { + differences++ + output = fmt.Sprintf("%s\t%d: \u2705 %s not matched by %s\n", output, i, actual, matcher) + } + } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { + + // type checking + if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: \u274C type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actual) + } + + } else { + + // normal checking + + if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { + // match + output = fmt.Sprintf("%s\t%d: \u2705 %s == %s\n", output, i, actual, expected) + } else { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: \u274C %s != %s\n", output, i, actual, expected) + } + } + + } + + if differences == 0 { + return "No differences.", differences + } + + return output, differences + +} + +// Assert compares the arguments with the specified objects and fails if +// they do not exactly match. +func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { + + // get the differences + diff, diffCount := args.Diff(objects) + + if diffCount == 0 { + return true + } + + // there are differences... report them... + t.Logf(diff) + t.Errorf("%sArguments do not match.", assert.CallerInfo()) + + return false + +} + +// String gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +// +// If no index is provided, String() returns a complete string representation +// of the arguments. +func (args Arguments) String(indexOrNil ...int) string { + + if len(indexOrNil) == 0 { + // normal String() method - return a string representation of the args + var argsStr []string + for _, arg := range args { + argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg))) + } + return strings.Join(argsStr, ",") + } else if len(indexOrNil) == 1 { + // Index has been specified - get the argument at that index + var index = indexOrNil[0] + var s string + var ok bool + if s, ok = args.Get(index).(string); !ok { + panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index))) + } + return s + } + + panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) + +} + +// Int gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Int(index int) int { + var s int + var ok bool + if s, ok = args.Get(index).(int); !ok { + panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Error gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Error(index int) error { + obj := args.Get(index) + var s error + var ok bool + if obj == nil { + return nil + } + if s, ok = obj.(error); !ok { + panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Bool gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Bool(index int) bool { + var s bool + var ok bool + if s, ok = args.Get(index).(bool); !ok { + panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +func diffArguments(expected Arguments, actual Arguments) string { + for x := range expected { + if diffString := diff(expected[x], actual[x]); diffString != "" { + return fmt.Sprintf("Difference found in argument %v:\n\n%s", x, diffString) + } + } + + return "" +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice or array. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { + return "" + } + + e := spew.Sdump(expected) + a := spew.Sdump(actual) + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + + return diff +} diff --git a/src/vendor/github.com/stretchr/testify/mock/mock_test.go b/src/vendor/github.com/stretchr/testify/mock/mock_test.go new file mode 100644 index 00000000..8cb4615d --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/mock/mock_test.go @@ -0,0 +1,1132 @@ +package mock + +import ( + "errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "testing" + "time" +) + +/* + Test objects +*/ + +// ExampleInterface represents an example interface. +type ExampleInterface interface { + TheExampleMethod(a, b, c int) (int, error) +} + +// TestExampleImplementation is a test implementation of ExampleInterface +type TestExampleImplementation struct { + Mock +} + +func (i *TestExampleImplementation) TheExampleMethod(a, b, c int) (int, error) { + args := i.Called(a, b, c) + return args.Int(0), errors.New("Whoops") +} + +func (i *TestExampleImplementation) TheExampleMethod2(yesorno bool) { + i.Called(yesorno) +} + +type ExampleType struct { + ran bool +} + +func (i *TestExampleImplementation) TheExampleMethod3(et *ExampleType) error { + args := i.Called(et) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethodFunc(fn func(string) error) error { + args := i.Called(fn) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethodVariadic(a ...int) error { + args := i.Called(a) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethodVariadicInterface(a ...interface{}) error { + args := i.Called(a) + return args.Error(0) +} + +type ExampleFuncType func(string) error + +func (i *TestExampleImplementation) TheExampleMethodFuncType(fn ExampleFuncType) error { + args := i.Called(fn) + return args.Error(0) +} + +/* + Mock +*/ + +func Test_Mock_TestData(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + if assert.NotNil(t, mockedService.TestData()) { + + mockedService.TestData().Set("something", 123) + assert.Equal(t, 123, mockedService.TestData().Get("something").Data()) + } +} + +func Test_Mock_On(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.On("TheExampleMethod") + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, "TheExampleMethod", c.Method) +} + +func Test_Mock_Chained_On(t *testing.T) { + // make a test impl object + var mockedService = new(TestExampleImplementation) + + mockedService. + On("TheExampleMethod", 1, 2, 3). + Return(0). + On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")). + Return(nil) + + expectedCalls := []*Call{ + &Call{ + Parent: &mockedService.Mock, + Method: "TheExampleMethod", + Arguments: []interface{}{1, 2, 3}, + ReturnArguments: []interface{}{0}, + }, + &Call{ + Parent: &mockedService.Mock, + Method: "TheExampleMethod3", + Arguments: []interface{}{AnythingOfType("*mock.ExampleType")}, + ReturnArguments: []interface{}{nil}, + }, + } + assert.Equal(t, expectedCalls, mockedService.ExpectedCalls) +} + +func Test_Mock_On_WithArgs(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.On("TheExampleMethod", 1, 2, 3, 4) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, "TheExampleMethod", c.Method) + assert.Equal(t, Arguments{1, 2, 3, 4}, c.Arguments) +} + +func Test_Mock_On_WithFuncArg(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethodFunc", AnythingOfType("func(string) error")). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, "TheExampleMethodFunc", c.Method) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, AnythingOfType("func(string) error"), c.Arguments[0]) + + fn := func(string) error { return nil } + + assert.NotPanics(t, func() { + mockedService.TheExampleMethodFunc(fn) + }) +} + +func Test_Mock_On_WithIntArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + mockedService.On("TheExampleMethod", + MatchedBy(func(a int) bool { + return a == 1 + }), MatchedBy(func(b int) bool { + return b == 2 + }), MatchedBy(func(c int) bool { + return c == 3 + })).Return(0, nil) + + assert.Panics(t, func() { + mockedService.TheExampleMethod(1, 2, 4) + }) + assert.Panics(t, func() { + mockedService.TheExampleMethod(2, 2, 3) + }) + assert.NotPanics(t, func() { + mockedService.TheExampleMethod(1, 2, 3) + }) +} + +func Test_Mock_On_WithPtrArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + mockedService.On("TheExampleMethod3", + MatchedBy(func(a *ExampleType) bool { return a.ran == true }), + ).Return(nil) + + mockedService.On("TheExampleMethod3", + MatchedBy(func(a *ExampleType) bool { return a.ran == false }), + ).Return(errors.New("error")) + + assert.Equal(t, mockedService.TheExampleMethod3(&ExampleType{true}), nil) + assert.EqualError(t, mockedService.TheExampleMethod3(&ExampleType{false}), "error") +} + +func Test_Mock_On_WithFuncArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + fixture1, fixture2 := errors.New("fixture1"), errors.New("fixture2") + + mockedService.On("TheExampleMethodFunc", + MatchedBy(func(a func(string) error) bool { return a("string") == fixture1 }), + ).Return(errors.New("fixture1")) + + mockedService.On("TheExampleMethodFunc", + MatchedBy(func(a func(string) error) bool { return a("string") == fixture2 }), + ).Return(errors.New("fixture2")) + + assert.EqualError(t, mockedService.TheExampleMethodFunc( + func(string) error { return fixture1 }), "fixture1") + assert.EqualError(t, mockedService.TheExampleMethodFunc( + func(string) error { return fixture2 }), "fixture2") +} + +func Test_Mock_On_WithVariadicFunc(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethodVariadic", []int{1, 2, 3}). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, []int{1, 2, 3}, c.Arguments[0]) + + assert.NotPanics(t, func() { + mockedService.TheExampleMethodVariadic(1, 2, 3) + }) + assert.Panics(t, func() { + mockedService.TheExampleMethodVariadic(1, 2) + }) + +} + +func Test_Mock_On_WithVariadicFuncWithInterface(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.On("TheExampleMethodVariadicInterface", []interface{}{1, 2, 3}). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, []interface{}{1, 2, 3}, c.Arguments[0]) + + assert.NotPanics(t, func() { + mockedService.TheExampleMethodVariadicInterface(1, 2, 3) + }) + assert.Panics(t, func() { + mockedService.TheExampleMethodVariadicInterface(1, 2) + }) + +} + +func Test_Mock_On_WithVariadicFuncWithEmptyInterfaceArray(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + var expected []interface{} + c := mockedService. + On("TheExampleMethodVariadicInterface", expected). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, expected, c.Arguments[0]) + + assert.NotPanics(t, func() { + mockedService.TheExampleMethodVariadicInterface() + }) + assert.Panics(t, func() { + mockedService.TheExampleMethodVariadicInterface(1, 2) + }) + +} + +func Test_Mock_On_WithFuncPanics(t *testing.T) { + // make a test impl object + var mockedService = new(TestExampleImplementation) + + assert.Panics(t, func() { + mockedService.On("TheExampleMethodFunc", func(string) error { return nil }) + }) +} + +func Test_Mock_On_WithFuncTypeArg(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethodFuncType", AnythingOfType("mock.ExampleFuncType")). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, AnythingOfType("mock.ExampleFuncType"), c.Arguments[0]) + + fn := func(string) error { return nil } + assert.NotPanics(t, func() { + mockedService.TheExampleMethodFuncType(fn) + }) +} + +func Test_Mock_Return(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethod", "A", "B", true). + Return(1, "two", true) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 0, call.Repeatability) + assert.Nil(t, call.WaitFor) +} + +func Test_Mock_Return_WaitUntil(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + ch := time.After(time.Second) + + c := mockedService.Mock. + On("TheExampleMethod", "A", "B", true). + WaitUntil(ch). + Return(1, "two", true) + + // assert that the call was created + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 0, call.Repeatability) + assert.Equal(t, ch, call.WaitFor) +} + +func Test_Mock_Return_After(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.Mock. + On("TheExampleMethod", "A", "B", true). + Return(1, "two", true). + After(time.Second) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 0, call.Repeatability) + assert.NotEqual(t, nil, call.WaitFor) + +} + +func Test_Mock_Return_Run(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + fn := func(args Arguments) { + arg := args.Get(0).(*ExampleType) + arg.ran = true + } + + c := mockedService.Mock. + On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")). + Return(nil). + Run(fn) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod3", call.Method) + assert.Equal(t, AnythingOfType("*mock.ExampleType"), call.Arguments[0]) + assert.Equal(t, nil, call.ReturnArguments[0]) + assert.Equal(t, 0, call.Repeatability) + assert.NotEqual(t, nil, call.WaitFor) + assert.NotNil(t, call.Run) + + et := ExampleType{} + assert.Equal(t, false, et.ran) + mockedService.TheExampleMethod3(&et) + assert.Equal(t, true, et.ran) +} + +func Test_Mock_Return_Run_Out_Of_Order(t *testing.T) { + // make a test impl object + var mockedService = new(TestExampleImplementation) + f := func(args Arguments) { + arg := args.Get(0).(*ExampleType) + arg.ran = true + } + + c := mockedService.Mock. + On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")). + Run(f). + Return(nil) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod3", call.Method) + assert.Equal(t, AnythingOfType("*mock.ExampleType"), call.Arguments[0]) + assert.Equal(t, nil, call.ReturnArguments[0]) + assert.Equal(t, 0, call.Repeatability) + assert.NotEqual(t, nil, call.WaitFor) + assert.NotNil(t, call.Run) +} + +func Test_Mock_Return_Once(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.On("TheExampleMethod", "A", "B", true). + Return(1, "two", true). + Once() + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 1, call.Repeatability) + assert.Nil(t, call.WaitFor) +} + +func Test_Mock_Return_Twice(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethod", "A", "B", true). + Return(1, "two", true). + Twice() + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 2, call.Repeatability) + assert.Nil(t, call.WaitFor) +} + +func Test_Mock_Return_Times(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethod", "A", "B", true). + Return(1, "two", true). + Times(5) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 5, call.Repeatability) + assert.Nil(t, call.WaitFor) +} + +func Test_Mock_Return_Nothing(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethod", "A", "B", true). + Return() + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 0, len(call.ReturnArguments)) +} + +func Test_Mock_findExpectedCall(t *testing.T) { + + m := new(Mock) + m.On("One", 1).Return("one") + m.On("Two", 2).Return("two") + m.On("Two", 3).Return("three") + + f, c := m.findExpectedCall("Two", 3) + + if assert.Equal(t, 2, f) { + if assert.NotNil(t, c) { + assert.Equal(t, "Two", c.Method) + assert.Equal(t, 3, c.Arguments[0]) + assert.Equal(t, "three", c.ReturnArguments[0]) + } + } + +} + +func Test_Mock_findExpectedCall_For_Unknown_Method(t *testing.T) { + + m := new(Mock) + m.On("One", 1).Return("one") + m.On("Two", 2).Return("two") + m.On("Two", 3).Return("three") + + f, _ := m.findExpectedCall("Two") + + assert.Equal(t, -1, f) + +} + +func Test_Mock_findExpectedCall_Respects_Repeatability(t *testing.T) { + + m := new(Mock) + m.On("One", 1).Return("one") + m.On("Two", 2).Return("two").Once() + m.On("Two", 3).Return("three").Twice() + m.On("Two", 3).Return("three").Times(8) + + f, c := m.findExpectedCall("Two", 3) + + if assert.Equal(t, 2, f) { + if assert.NotNil(t, c) { + assert.Equal(t, "Two", c.Method) + assert.Equal(t, 3, c.Arguments[0]) + assert.Equal(t, "three", c.ReturnArguments[0]) + } + } + +} + +func Test_callString(t *testing.T) { + + assert.Equal(t, `Method(int,bool,string)`, callString("Method", []interface{}{1, true, "something"}, false)) + +} + +func Test_Mock_Called(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_Called", 1, 2, 3).Return(5, "6", true) + + returnArguments := mockedService.Called(1, 2, 3) + + if assert.Equal(t, 1, len(mockedService.Calls)) { + assert.Equal(t, "Test_Mock_Called", mockedService.Calls[0].Method) + assert.Equal(t, 1, mockedService.Calls[0].Arguments[0]) + assert.Equal(t, 2, mockedService.Calls[0].Arguments[1]) + assert.Equal(t, 3, mockedService.Calls[0].Arguments[2]) + } + + if assert.Equal(t, 3, len(returnArguments)) { + assert.Equal(t, 5, returnArguments[0]) + assert.Equal(t, "6", returnArguments[1]) + assert.Equal(t, true, returnArguments[2]) + } + +} + +func asyncCall(m *Mock, ch chan Arguments) { + ch <- m.Called(1, 2, 3) +} + +func Test_Mock_Called_blocks(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.Mock.On("asyncCall", 1, 2, 3).Return(5, "6", true).After(2 * time.Millisecond) + + ch := make(chan Arguments) + + go asyncCall(&mockedService.Mock, ch) + + select { + case <-ch: + t.Fatal("should have waited") + case <-time.After(1 * time.Millisecond): + } + + returnArguments := <-ch + + if assert.Equal(t, 1, len(mockedService.Mock.Calls)) { + assert.Equal(t, "asyncCall", mockedService.Mock.Calls[0].Method) + assert.Equal(t, 1, mockedService.Mock.Calls[0].Arguments[0]) + assert.Equal(t, 2, mockedService.Mock.Calls[0].Arguments[1]) + assert.Equal(t, 3, mockedService.Mock.Calls[0].Arguments[2]) + } + + if assert.Equal(t, 3, len(returnArguments)) { + assert.Equal(t, 5, returnArguments[0]) + assert.Equal(t, "6", returnArguments[1]) + assert.Equal(t, true, returnArguments[2]) + } + +} + +func Test_Mock_Called_For_Bounded_Repeatability(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService. + On("Test_Mock_Called_For_Bounded_Repeatability", 1, 2, 3). + Return(5, "6", true). + Once() + mockedService. + On("Test_Mock_Called_For_Bounded_Repeatability", 1, 2, 3). + Return(-1, "hi", false) + + returnArguments1 := mockedService.Called(1, 2, 3) + returnArguments2 := mockedService.Called(1, 2, 3) + + if assert.Equal(t, 2, len(mockedService.Calls)) { + assert.Equal(t, "Test_Mock_Called_For_Bounded_Repeatability", mockedService.Calls[0].Method) + assert.Equal(t, 1, mockedService.Calls[0].Arguments[0]) + assert.Equal(t, 2, mockedService.Calls[0].Arguments[1]) + assert.Equal(t, 3, mockedService.Calls[0].Arguments[2]) + + assert.Equal(t, "Test_Mock_Called_For_Bounded_Repeatability", mockedService.Calls[1].Method) + assert.Equal(t, 1, mockedService.Calls[1].Arguments[0]) + assert.Equal(t, 2, mockedService.Calls[1].Arguments[1]) + assert.Equal(t, 3, mockedService.Calls[1].Arguments[2]) + } + + if assert.Equal(t, 3, len(returnArguments1)) { + assert.Equal(t, 5, returnArguments1[0]) + assert.Equal(t, "6", returnArguments1[1]) + assert.Equal(t, true, returnArguments1[2]) + } + + if assert.Equal(t, 3, len(returnArguments2)) { + assert.Equal(t, -1, returnArguments2[0]) + assert.Equal(t, "hi", returnArguments2[1]) + assert.Equal(t, false, returnArguments2[2]) + } + +} + +func Test_Mock_Called_For_SetTime_Expectation(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("TheExampleMethod", 1, 2, 3).Return(5, "6", true).Times(4) + + mockedService.TheExampleMethod(1, 2, 3) + mockedService.TheExampleMethod(1, 2, 3) + mockedService.TheExampleMethod(1, 2, 3) + mockedService.TheExampleMethod(1, 2, 3) + assert.Panics(t, func() { + mockedService.TheExampleMethod(1, 2, 3) + }) + +} + +func Test_Mock_Called_Unexpected(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + // make sure it panics if no expectation was made + assert.Panics(t, func() { + mockedService.Called(1, 2, 3) + }, "Calling unexpected method should panic") + +} + +func Test_AssertExpectationsForObjects_Helper(t *testing.T) { + + var mockedService1 = new(TestExampleImplementation) + var mockedService2 = new(TestExampleImplementation) + var mockedService3 = new(TestExampleImplementation) + + mockedService1.On("Test_AssertExpectationsForObjects_Helper", 1).Return() + mockedService2.On("Test_AssertExpectationsForObjects_Helper", 2).Return() + mockedService3.On("Test_AssertExpectationsForObjects_Helper", 3).Return() + + mockedService1.Called(1) + mockedService2.Called(2) + mockedService3.Called(3) + + assert.True(t, AssertExpectationsForObjects(t, mockedService1.Mock, mockedService2.Mock, mockedService3.Mock)) + assert.True(t, AssertExpectationsForObjects(t, mockedService1, mockedService2, mockedService3)) + +} + +func Test_AssertExpectationsForObjects_Helper_Failed(t *testing.T) { + + var mockedService1 = new(TestExampleImplementation) + var mockedService2 = new(TestExampleImplementation) + var mockedService3 = new(TestExampleImplementation) + + mockedService1.On("Test_AssertExpectationsForObjects_Helper_Failed", 1).Return() + mockedService2.On("Test_AssertExpectationsForObjects_Helper_Failed", 2).Return() + mockedService3.On("Test_AssertExpectationsForObjects_Helper_Failed", 3).Return() + + mockedService1.Called(1) + mockedService3.Called(3) + + tt := new(testing.T) + assert.False(t, AssertExpectationsForObjects(tt, mockedService1.Mock, mockedService2.Mock, mockedService3.Mock)) + assert.False(t, AssertExpectationsForObjects(tt, mockedService1, mockedService2, mockedService3)) + +} + +func Test_Mock_AssertExpectations(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations", 1, 2, 3).Return(5, 6, 7) + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Called(1, 2, 3) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectations_Placeholder_NoArgs(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations_Placeholder_NoArgs").Return(5, 6, 7).Once() + mockedService.On("Test_Mock_AssertExpectations_Placeholder_NoArgs").Return(7, 6, 5) + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Called() + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectations_Placeholder(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations_Placeholder", 1, 2, 3).Return(5, 6, 7).Once() + mockedService.On("Test_Mock_AssertExpectations_Placeholder", 3, 2, 1).Return(7, 6, 5) + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Called(1, 2, 3) + + // now assert expectations + assert.False(t, mockedService.AssertExpectations(tt)) + + // make call to the second expectation + mockedService.Called(3, 2, 1) + + // now assert expectations again + assert.True(t, mockedService.AssertExpectations(tt)) +} + +func Test_Mock_AssertExpectations_With_Pointers(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations_With_Pointers", &struct{ Foo int }{1}).Return(1) + mockedService.On("Test_Mock_AssertExpectations_With_Pointers", &struct{ Foo int }{2}).Return(2) + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + s := struct{ Foo int }{1} + // make the calls now + mockedService.Called(&s) + s.Foo = 2 + mockedService.Called(&s) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectationsCustomType(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")).Return(nil).Once() + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.TheExampleMethod3(&ExampleType{}) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectations_With_Repeatability(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Twice() + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Called(1, 2, 3) + + assert.False(t, mockedService.AssertExpectations(tt)) + + mockedService.Called(1, 2, 3) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_TwoCallsWithDifferentArguments(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_TwoCallsWithDifferentArguments", 1, 2, 3).Return(5, 6, 7) + mockedService.On("Test_Mock_TwoCallsWithDifferentArguments", 4, 5, 6).Return(5, 6, 7) + + args1 := mockedService.Called(1, 2, 3) + assert.Equal(t, 5, args1.Int(0)) + assert.Equal(t, 6, args1.Int(1)) + assert.Equal(t, 7, args1.Int(2)) + + args2 := mockedService.Called(4, 5, 6) + assert.Equal(t, 5, args2.Int(0)) + assert.Equal(t, 6, args2.Int(1)) + assert.Equal(t, 7, args2.Int(2)) + +} + +func Test_Mock_AssertNumberOfCalls(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertNumberOfCalls", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + assert.True(t, mockedService.AssertNumberOfCalls(t, "Test_Mock_AssertNumberOfCalls", 1)) + + mockedService.Called(1, 2, 3) + assert.True(t, mockedService.AssertNumberOfCalls(t, "Test_Mock_AssertNumberOfCalls", 2)) + +} + +func Test_Mock_AssertCalled(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertCalled", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + + assert.True(t, mockedService.AssertCalled(t, "Test_Mock_AssertCalled", 1, 2, 3)) + +} + +func Test_Mock_AssertCalled_WithAnythingOfTypeArgument(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService. + On("Test_Mock_AssertCalled_WithAnythingOfTypeArgument", Anything, Anything, Anything). + Return() + + mockedService.Called(1, "two", []uint8("three")) + + assert.True(t, mockedService.AssertCalled(t, "Test_Mock_AssertCalled_WithAnythingOfTypeArgument", AnythingOfType("int"), AnythingOfType("string"), AnythingOfType("[]uint8"))) + +} + +func Test_Mock_AssertCalled_WithArguments(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertCalled_WithArguments", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + + tt := new(testing.T) + assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 1, 2, 3)) + assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 2, 3, 4)) + +} + +func Test_Mock_AssertCalled_WithArguments_With_Repeatability(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertCalled_WithArguments_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Once() + mockedService.On("Test_Mock_AssertCalled_WithArguments_With_Repeatability", 2, 3, 4).Return(5, 6, 7).Once() + + mockedService.Called(1, 2, 3) + mockedService.Called(2, 3, 4) + + tt := new(testing.T) + assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 1, 2, 3)) + assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 2, 3, 4)) + assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 3, 4, 5)) + +} + +func Test_Mock_AssertNotCalled(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertNotCalled", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + + assert.True(t, mockedService.AssertNotCalled(t, "Test_Mock_NotCalled")) + +} + +/* + Arguments helper methods +*/ +func Test_Arguments_Get(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + + assert.Equal(t, "string", args.Get(0).(string)) + assert.Equal(t, 123, args.Get(1).(int)) + assert.Equal(t, true, args.Get(2).(bool)) + +} + +func Test_Arguments_Is(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + + assert.True(t, args.Is("string", 123, true)) + assert.False(t, args.Is("wrong", 456, false)) + +} + +func Test_Arguments_Diff(t *testing.T) { + + var args = Arguments([]interface{}{"Hello World", 123, true}) + var diff string + var count int + diff, count = args.Diff([]interface{}{"Hello World", 456, "false"}) + + assert.Equal(t, 2, count) + assert.Contains(t, diff, `%!s(int=456) != %!s(int=123)`) + assert.Contains(t, diff, `false != %!s(bool=true)`) + +} + +func Test_Arguments_Diff_DifferentNumberOfArgs(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + var diff string + var count int + diff, count = args.Diff([]interface{}{"string", 456, "false", "extra"}) + + assert.Equal(t, 3, count) + assert.Contains(t, diff, `extra != (Missing)`) + +} + +func Test_Arguments_Diff_WithAnythingArgument(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + var count int + _, count = args.Diff([]interface{}{"string", Anything, true}) + + assert.Equal(t, 0, count) + +} + +func Test_Arguments_Diff_WithAnythingArgument_InActualToo(t *testing.T) { + + var args = Arguments([]interface{}{"string", Anything, true}) + var count int + _, count = args.Diff([]interface{}{"string", 123, true}) + + assert.Equal(t, 0, count) + +} + +func Test_Arguments_Diff_WithAnythingOfTypeArgument(t *testing.T) { + + var args = Arguments([]interface{}{"string", AnythingOfType("int"), true}) + var count int + _, count = args.Diff([]interface{}{"string", 123, true}) + + assert.Equal(t, 0, count) + +} + +func Test_Arguments_Diff_WithAnythingOfTypeArgument_Failing(t *testing.T) { + + var args = Arguments([]interface{}{"string", AnythingOfType("string"), true}) + var count int + var diff string + diff, count = args.Diff([]interface{}{"string", 123, true}) + + assert.Equal(t, 1, count) + assert.Contains(t, diff, `string != type int - %!s(int=123)`) + +} + +func Test_Arguments_Diff_WithArgMatcher(t *testing.T) { + matchFn := func(a int) bool { + return a == 123 + } + var args = Arguments([]interface{}{"string", MatchedBy(matchFn), true}) + + diff, count := args.Diff([]interface{}{"string", 124, true}) + assert.Equal(t, 1, count) + assert.Contains(t, diff, `%!s(int=124) not matched by func(int) bool`) + + diff, count = args.Diff([]interface{}{"string", false, true}) + assert.Equal(t, 1, count) + assert.Contains(t, diff, `%!s(bool=false) not matched by func(int) bool`) + + diff, count = args.Diff([]interface{}{"string", 123, false}) + assert.Contains(t, diff, `%!s(int=123) matched by func(int) bool`) + + diff, count = args.Diff([]interface{}{"string", 123, true}) + assert.Equal(t, 0, count) + assert.Contains(t, diff, `No differences.`) +} + +func Test_Arguments_Assert(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + + assert.True(t, args.Assert(t, "string", 123, true)) + +} + +func Test_Arguments_String_Representation(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + assert.Equal(t, `string,int,bool`, args.String()) + +} + +func Test_Arguments_String(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + assert.Equal(t, "string", args.String(0)) + +} + +func Test_Arguments_Error(t *testing.T) { + + var err = errors.New("An Error") + var args = Arguments([]interface{}{"string", 123, true, err}) + assert.Equal(t, err, args.Error(3)) + +} + +func Test_Arguments_Error_Nil(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true, nil}) + assert.Equal(t, nil, args.Error(3)) + +} + +func Test_Arguments_Int(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + assert.Equal(t, 123, args.Int(1)) + +} + +func Test_Arguments_Bool(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + assert.Equal(t, true, args.Bool(2)) + +} diff --git a/src/vendor/github.com/stretchr/testify/package_test.go b/src/vendor/github.com/stretchr/testify/package_test.go new file mode 100644 index 00000000..7ac5d6d8 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/package_test.go @@ -0,0 +1,12 @@ +package testify + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestImports(t *testing.T) { + if assert.Equal(t, 1, 1) != true { + t.Error("Something is wrong.") + } +} diff --git a/src/vendor/github.com/stretchr/testify/require/doc.go b/src/vendor/github.com/stretchr/testify/require/doc.go new file mode 100644 index 00000000..169de392 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/require/doc.go @@ -0,0 +1,28 @@ +// Package require implements the same assertions as the `assert` package but +// stops test execution when a test fails. +// +// Example Usage +// +// The following is a complete example using require in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/require" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// require.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// Assertions +// +// The `require` package have same global functions as in the `assert` package, +// but instead of returning a boolean result they call `t.FailNow()`. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +package require diff --git a/src/vendor/github.com/stretchr/testify/require/forward_requirements.go b/src/vendor/github.com/stretchr/testify/require/forward_requirements.go new file mode 100644 index 00000000..d3c2ab9b --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/require/forward_requirements.go @@ -0,0 +1,16 @@ +package require + +// Assertions provides assertion methods around the +// TestingT interface. +type Assertions struct { + t TestingT +} + +// New makes a new Assertions object for the specified TestingT. +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl diff --git a/src/vendor/github.com/stretchr/testify/require/forward_requirements_test.go b/src/vendor/github.com/stretchr/testify/require/forward_requirements_test.go new file mode 100644 index 00000000..b120ae3b --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/require/forward_requirements_test.go @@ -0,0 +1,385 @@ +package require + +import ( + "errors" + "testing" + "time" +) + +func TestImplementsWrapper(t *testing.T) { + require := New(t) + + require.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestIsTypeWrapper(t *testing.T) { + require := New(t) + require.IsType(new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.IsType(new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqualWrapper(t *testing.T) { + require := New(t) + require.Equal(1, 1) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Equal(1, 2) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotEqualWrapper(t *testing.T) { + require := New(t) + require.NotEqual(1, 2) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotEqual(2, 2) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestExactlyWrapper(t *testing.T) { + require := New(t) + + a := float32(1) + b := float32(1) + c := float64(1) + + require.Exactly(a, b) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Exactly(a, c) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotNilWrapper(t *testing.T) { + require := New(t) + require.NotNil(t, new(AssertionTesterConformingObject)) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotNil(nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNilWrapper(t *testing.T) { + require := New(t) + require.Nil(nil) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Nil(new(AssertionTesterConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestTrueWrapper(t *testing.T) { + require := New(t) + require.True(true) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.True(false) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestFalseWrapper(t *testing.T) { + require := New(t) + require.False(false) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.False(true) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestContainsWrapper(t *testing.T) { + require := New(t) + require.Contains("Hello World", "Hello") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Contains("Hello World", "Salut") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotContainsWrapper(t *testing.T) { + require := New(t) + require.NotContains("Hello World", "Hello!") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotContains("Hello World", "Hello") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestPanicsWrapper(t *testing.T) { + require := New(t) + require.Panics(func() { + panic("Panic!") + }) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Panics(func() {}) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotPanicsWrapper(t *testing.T) { + require := New(t) + require.NotPanics(func() {}) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotPanics(func() { + panic("Panic!") + }) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNoErrorWrapper(t *testing.T) { + require := New(t) + require.NoError(nil) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NoError(errors.New("some error")) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestErrorWrapper(t *testing.T) { + require := New(t) + require.Error(errors.New("some error")) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Error(nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqualErrorWrapper(t *testing.T) { + require := New(t) + require.EqualError(errors.New("some error"), "some error") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.EqualError(errors.New("some error"), "Not some error") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEmptyWrapper(t *testing.T) { + require := New(t) + require.Empty("") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Empty("x") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotEmptyWrapper(t *testing.T) { + require := New(t) + require.NotEmpty("x") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotEmpty("") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestWithinDurationWrapper(t *testing.T) { + require := New(t) + a := time.Now() + b := a.Add(10 * time.Second) + + require.WithinDuration(a, b, 15*time.Second) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.WithinDuration(a, b, 5*time.Second) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestInDeltaWrapper(t *testing.T) { + require := New(t) + require.InDelta(1.001, 1, 0.01) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.InDelta(1, 2, 0.5) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestZeroWrapper(t *testing.T) { + require := New(t) + require.Zero(0) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Zero(1) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotZeroWrapper(t *testing.T) { + require := New(t) + require.NotZero(1) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotZero(0) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_EqualSONString(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEqWrapper_EquivalentButNotEqual(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEqWrapper_HashOfArraysAndHashes(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq("{\r\n\t\"numeric\": 1.5,\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]],\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\"\r\n}", + "{\r\n\t\"numeric\": 1.5,\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\",\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]]\r\n}") + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEqWrapper_Array(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `["foo", {"nested": "hash", "hello": "world"}]`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEqWrapper_HashAndArrayNotEquivalent(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `{"foo": "bar", {"nested": "hash", "hello": "world"}}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_HashesNotEquivalent(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`{"foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_ActualIsNotJSON(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`{"foo": "bar"}`, "Not JSON") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_ExpectedIsNotJSON(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq("Not JSON", `{"foo": "bar", "hello": "world"}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_ExpectedAndActualNotJSON(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq("Not JSON", "Not JSON") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_ArraysOfDifferentOrder(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `[{ "hello": "world", "nested": "hash"}, "foo"]`) + if !mockT.Failed { + t.Error("Check should fail") + } +} diff --git a/src/vendor/github.com/stretchr/testify/require/require.go b/src/vendor/github.com/stretchr/testify/require/require.go new file mode 100644 index 00000000..a0c40450 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/require/require.go @@ -0,0 +1,423 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package require + +import ( + assert "github.com/stretchr/testify/assert" + http "net/http" + url "net/url" + time "time" +) + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) { + if !assert.Condition(t, comp, msgAndArgs...) { + t.FailNow() + } +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") +// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") +// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") +// +// Returns whether the assertion was successful (true) or not (false). +func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if !assert.Contains(t, s, contains, msgAndArgs...) { + t.FailNow() + } +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +// +// Returns whether the assertion was successful (true) or not (false). +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.Empty(t, object, msgAndArgs...) { + t.FailNow() + } +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.Equal(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualError(t, err, expectedErrorString, "An error was expected") +// +// Returns whether the assertion was successful (true) or not (false). +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { + if !assert.EqualError(t, theError, errString, msgAndArgs...) { + t.FailNow() + } +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.EqualValues(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func Error(t TestingT, err error, msgAndArgs ...interface{}) { + if !assert.Error(t, err, msgAndArgs...) { + t.FailNow() + } +} + +// Exactly asserts that two objects are equal is value and type. +// +// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.Exactly(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) { + if !assert.Fail(t, failureMessage, msgAndArgs...) { + t.FailNow() + } +} + +// FailNow fails test +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) { + if !assert.FailNow(t, failureMessage, msgAndArgs...) { + t.FailNow() + } +} + +// False asserts that the specified value is false. +// +// assert.False(t, myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func False(t TestingT, value bool, msgAndArgs ...interface{}) { + if !assert.False(t, value, msgAndArgs...) { + t.FailNow() + } +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { + if !assert.HTTPBodyContains(t, handler, method, url, values, str) { + t.FailNow() + } +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { + if !assert.HTTPBodyNotContains(t, handler, method, url, values, str) { + t.FailNow() + } +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) { + if !assert.HTTPError(t, handler, method, url, values) { + t.FailNow() + } +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) { + if !assert.HTTPRedirect(t, handler, method, url, values) { + t.FailNow() + } +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) { + if !assert.HTTPSuccess(t, handler, method, url, values) { + t.FailNow() + } +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if !assert.Implements(t, interfaceObject, object, msgAndArgs...) { + t.FailNow() + } +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { + t.FailNow() + } +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + if !assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) { + t.FailNow() + } +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { + if !assert.IsType(t, expectedType, object, msgAndArgs...) { + t.FailNow() + } +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// +// Returns whether the assertion was successful (true) or not (false). +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if !assert.JSONEq(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { + if !assert.Len(t, object, length, msgAndArgs...) { + t.FailNow() + } +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.Nil(t, object, msgAndArgs...) { + t.FailNow() + } +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NoError(t TestingT, err error, msgAndArgs ...interface{}) { + if !assert.NoError(t, err, msgAndArgs...) { + t.FailNow() + } +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") +// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if !assert.NotContains(t, s, contains, msgAndArgs...) { + t.FailNow() + } +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.NotEmpty(t, object, msgAndArgs...) { + t.FailNow() + } +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.NotEqual(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.NotNil(t, object, msgAndArgs...) { + t.FailNow() + } +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if !assert.NotPanics(t, f, msgAndArgs...) { + t.FailNow() + } +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if !assert.NotRegexp(t, rx, str, msgAndArgs...) { + t.FailNow() + } +} + +// NotZero asserts that i is not the zero value for its type and returns the truth. +func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) { + if !assert.NotZero(t, i, msgAndArgs...) { + t.FailNow() + } +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if !assert.Panics(t, f, msgAndArgs...) { + t.FailNow() + } +} + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if !assert.Regexp(t, rx, str, msgAndArgs...) { + t.FailNow() + } +} + +// True asserts that the specified value is true. +// +// assert.True(t, myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func True(t TestingT, value bool, msgAndArgs ...interface{}) { + if !assert.True(t, value, msgAndArgs...) { + t.FailNow() + } +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + +// Zero asserts that i is the zero value for its type and returns the truth. +func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) { + if !assert.Zero(t, i, msgAndArgs...) { + t.FailNow() + } +} diff --git a/src/vendor/github.com/stretchr/testify/require/require.go.tmpl b/src/vendor/github.com/stretchr/testify/require/require.go.tmpl new file mode 100644 index 00000000..d2c38f6f --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/require/require.go.tmpl @@ -0,0 +1,6 @@ +{{.Comment}} +func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { + if !assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { + t.FailNow() + } +} diff --git a/src/vendor/github.com/stretchr/testify/require/require_forward.go b/src/vendor/github.com/stretchr/testify/require/require_forward.go new file mode 100644 index 00000000..83e9842e --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/require/require_forward.go @@ -0,0 +1,347 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package require + +import ( + assert "github.com/stretchr/testify/assert" + http "net/http" + url "net/url" + time "time" +) + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) { + Condition(a.t, comp, msgAndArgs...) +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") +// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") +// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { + Contains(a.t, s, contains, msgAndArgs...) +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Empty(obj) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { + Empty(a.t, object, msgAndArgs...) +} + +// Equal asserts that two objects are equal. +// +// a.Equal(123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + Equal(a.t, expected, actual, msgAndArgs...) +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualError(err, expectedErrorString, "An error was expected") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) { + EqualError(a.t, theError, errString, msgAndArgs...) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + EqualValues(a.t, expected, actual, msgAndArgs...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Error(err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { + Error(a.t, err, msgAndArgs...) +} + +// Exactly asserts that two objects are equal is value and type. +// +// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + Exactly(a.t, expected, actual, msgAndArgs...) +} + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) { + Fail(a.t, failureMessage, msgAndArgs...) +} + +// FailNow fails test +func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) { + FailNow(a.t, failureMessage, msgAndArgs...) +} + +// False asserts that the specified value is false. +// +// a.False(myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) { + False(a.t, value, msgAndArgs...) +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { + HTTPBodyContains(a.t, handler, method, url, values, str) +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { + HTTPBodyNotContains(a.t, handler, method, url, values, str) +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) { + HTTPError(a.t, handler, method, url, values) +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) { + HTTPRedirect(a.t, handler, method, url, values) +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) { + HTTPSuccess(a.t, handler, method, url, values) +} + +// Implements asserts that an object is implemented by the specified interface. +// +// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject") +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + Implements(a.t, interfaceObject, object, msgAndArgs...) +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// a.InDelta(math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { + IsType(a.t, expectedType, object, msgAndArgs...) +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) { + JSONEq(a.t, expected, actual, msgAndArgs...) +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// a.Len(mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) { + Len(a.t, object, length, msgAndArgs...) +} + +// Nil asserts that the specified object is nil. +// +// a.Nil(err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) { + Nil(a.t, object, msgAndArgs...) +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) { + NoError(a.t, err, msgAndArgs...) +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") +// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { + NotContains(a.t, s, contains, msgAndArgs...) +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { + NotEmpty(a.t, object, msgAndArgs...) +} + +// NotEqual asserts that the specified values are NOT equal. +// +// a.NotEqual(obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + NotEqual(a.t, expected, actual, msgAndArgs...) +} + +// NotNil asserts that the specified object is not nil. +// +// a.NotNil(err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) { + NotNil(a.t, object, msgAndArgs...) +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanics(func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + NotPanics(a.t, f, msgAndArgs...) +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + NotRegexp(a.t, rx, str, msgAndArgs...) +} + +// NotZero asserts that i is not the zero value for its type and returns the truth. +func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) { + NotZero(a.t, i, msgAndArgs...) +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panics(func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + Panics(a.t, f, msgAndArgs...) +} + +// Regexp asserts that a specified regexp matches a string. +// +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + Regexp(a.t, rx, str, msgAndArgs...) +} + +// True asserts that the specified value is true. +// +// a.True(myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) { + True(a.t, value, msgAndArgs...) +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + +// Zero asserts that i is the zero value for its type and returns the truth. +func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) { + Zero(a.t, i, msgAndArgs...) +} diff --git a/src/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl b/src/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl new file mode 100644 index 00000000..b93569e0 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl @@ -0,0 +1,4 @@ +{{.CommentWithoutT "a"}} +func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) { + {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) +} diff --git a/src/vendor/github.com/stretchr/testify/require/requirements.go b/src/vendor/github.com/stretchr/testify/require/requirements.go new file mode 100644 index 00000000..41147562 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/require/requirements.go @@ -0,0 +1,9 @@ +package require + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Errorf(format string, args ...interface{}) + FailNow() +} + +//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl diff --git a/src/vendor/github.com/stretchr/testify/require/requirements_test.go b/src/vendor/github.com/stretchr/testify/require/requirements_test.go new file mode 100644 index 00000000..d2ccc99c --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/require/requirements_test.go @@ -0,0 +1,369 @@ +package require + +import ( + "errors" + "testing" + "time" +) + +// AssertionTesterInterface defines an interface to be used for testing assertion methods +type AssertionTesterInterface interface { + TestMethod() +} + +// AssertionTesterConformingObject is an object that conforms to the AssertionTesterInterface interface +type AssertionTesterConformingObject struct { +} + +func (a *AssertionTesterConformingObject) TestMethod() { +} + +// AssertionTesterNonConformingObject is an object that does not conform to the AssertionTesterInterface interface +type AssertionTesterNonConformingObject struct { +} + +type MockT struct { + Failed bool +} + +func (t *MockT) FailNow() { + t.Failed = true +} + +func (t *MockT) Errorf(format string, args ...interface{}) { + _, _ = format, args +} + +func TestImplements(t *testing.T) { + + Implements(t, (*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestIsType(t *testing.T) { + + IsType(t, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqual(t *testing.T) { + + Equal(t, 1, 1) + + mockT := new(MockT) + Equal(mockT, 1, 2) + if !mockT.Failed { + t.Error("Check should fail") + } + +} + +func TestNotEqual(t *testing.T) { + + NotEqual(t, 1, 2) + mockT := new(MockT) + NotEqual(mockT, 2, 2) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestExactly(t *testing.T) { + + a := float32(1) + b := float32(1) + c := float64(1) + + Exactly(t, a, b) + + mockT := new(MockT) + Exactly(mockT, a, c) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotNil(t *testing.T) { + + NotNil(t, new(AssertionTesterConformingObject)) + + mockT := new(MockT) + NotNil(mockT, nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNil(t *testing.T) { + + Nil(t, nil) + + mockT := new(MockT) + Nil(mockT, new(AssertionTesterConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestTrue(t *testing.T) { + + True(t, true) + + mockT := new(MockT) + True(mockT, false) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestFalse(t *testing.T) { + + False(t, false) + + mockT := new(MockT) + False(mockT, true) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestContains(t *testing.T) { + + Contains(t, "Hello World", "Hello") + + mockT := new(MockT) + Contains(mockT, "Hello World", "Salut") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotContains(t *testing.T) { + + NotContains(t, "Hello World", "Hello!") + + mockT := new(MockT) + NotContains(mockT, "Hello World", "Hello") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestPanics(t *testing.T) { + + Panics(t, func() { + panic("Panic!") + }) + + mockT := new(MockT) + Panics(mockT, func() {}) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotPanics(t *testing.T) { + + NotPanics(t, func() {}) + + mockT := new(MockT) + NotPanics(mockT, func() { + panic("Panic!") + }) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNoError(t *testing.T) { + + NoError(t, nil) + + mockT := new(MockT) + NoError(mockT, errors.New("some error")) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestError(t *testing.T) { + + Error(t, errors.New("some error")) + + mockT := new(MockT) + Error(mockT, nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqualError(t *testing.T) { + + EqualError(t, errors.New("some error"), "some error") + + mockT := new(MockT) + EqualError(mockT, errors.New("some error"), "Not some error") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEmpty(t *testing.T) { + + Empty(t, "") + + mockT := new(MockT) + Empty(mockT, "x") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotEmpty(t *testing.T) { + + NotEmpty(t, "x") + + mockT := new(MockT) + NotEmpty(mockT, "") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestWithinDuration(t *testing.T) { + + a := time.Now() + b := a.Add(10 * time.Second) + + WithinDuration(t, a, b, 15*time.Second) + + mockT := new(MockT) + WithinDuration(mockT, a, b, 5*time.Second) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestInDelta(t *testing.T) { + + InDelta(t, 1.001, 1, 0.01) + + mockT := new(MockT) + InDelta(mockT, 1, 2, 0.5) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestZero(t *testing.T) { + + Zero(t, "") + + mockT := new(MockT) + Zero(mockT, "x") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotZero(t *testing.T) { + + NotZero(t, "x") + + mockT := new(MockT) + NotZero(mockT, "") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_EqualSONString(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEq_EquivalentButNotEqual(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEq_HashOfArraysAndHashes(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, "{\r\n\t\"numeric\": 1.5,\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]],\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\"\r\n}", + "{\r\n\t\"numeric\": 1.5,\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\",\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]]\r\n}") + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEq_Array(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `["foo", {"nested": "hash", "hello": "world"}]`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEq_HashAndArrayNotEquivalent(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `{"foo": "bar", {"nested": "hash", "hello": "world"}}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_HashesNotEquivalent(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `{"foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_ActualIsNotJSON(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `{"foo": "bar"}`, "Not JSON") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_ExpectedIsNotJSON(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, "Not JSON", `{"foo": "bar", "hello": "world"}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_ExpectedAndActualNotJSON(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, "Not JSON", "Not JSON") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_ArraysOfDifferentOrder(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `[{ "hello": "world", "nested": "hash"}, "foo"]`) + if !mockT.Failed { + t.Error("Check should fail") + } +} diff --git a/src/vendor/github.com/stretchr/testify/suite/doc.go b/src/vendor/github.com/stretchr/testify/suite/doc.go new file mode 100644 index 00000000..f91a245d --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/suite/doc.go @@ -0,0 +1,65 @@ +// Package suite contains logic for creating testing suite structs +// and running the methods on those structs as tests. The most useful +// piece of this package is that you can create setup/teardown methods +// on your testing suites, which will run before/after the whole suite +// or individual tests (depending on which interface(s) you +// implement). +// +// A testing suite is usually built by first extending the built-in +// suite functionality from suite.Suite in testify. Alternatively, +// you could reproduce that logic on your own if you wanted (you +// just need to implement the TestingSuite interface from +// suite/interfaces.go). +// +// After that, you can implement any of the interfaces in +// suite/interfaces.go to add setup/teardown functionality to your +// suite, and add any methods that start with "Test" to add tests. +// Methods that do not match any suite interfaces and do not begin +// with "Test" will not be run by testify, and can safely be used as +// helper methods. +// +// Once you've built your testing suite, you need to run the suite +// (using suite.Run from testify) inside any function that matches the +// identity that "go test" is already looking for (i.e. +// func(*testing.T)). +// +// Regular expression to select test suites specified command-line +// argument "-run". Regular expression to select the methods +// of test suites specified command-line argument "-m". +// Suite object has assertion methods. +// +// A crude example: +// // Basic imports +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/suite" +// ) +// +// // Define the suite, and absorb the built-in basic suite +// // functionality from testify - including a T() method which +// // returns the current testing context +// type ExampleTestSuite struct { +// suite.Suite +// VariableThatShouldStartAtFive int +// } +// +// // Make sure that VariableThatShouldStartAtFive is set to five +// // before each test +// func (suite *ExampleTestSuite) SetupTest() { +// suite.VariableThatShouldStartAtFive = 5 +// } +// +// // All methods that begin with "Test" are run as tests within a +// // suite. +// func (suite *ExampleTestSuite) TestExample() { +// assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive) +// suite.Equal(5, suite.VariableThatShouldStartAtFive) +// } +// +// // In order for 'go test' to run this suite, we need to create +// // a normal test function and pass our suite to suite.Run +// func TestExampleTestSuite(t *testing.T) { +// suite.Run(t, new(ExampleTestSuite)) +// } +package suite diff --git a/src/vendor/github.com/stretchr/testify/suite/interfaces.go b/src/vendor/github.com/stretchr/testify/suite/interfaces.go new file mode 100644 index 00000000..20969472 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/suite/interfaces.go @@ -0,0 +1,34 @@ +package suite + +import "testing" + +// TestingSuite can store and return the current *testing.T context +// generated by 'go test'. +type TestingSuite interface { + T() *testing.T + SetT(*testing.T) +} + +// SetupAllSuite has a SetupSuite method, which will run before the +// tests in the suite are run. +type SetupAllSuite interface { + SetupSuite() +} + +// SetupTestSuite has a SetupTest method, which will run before each +// test in the suite. +type SetupTestSuite interface { + SetupTest() +} + +// TearDownAllSuite has a TearDownSuite method, which will run after +// all the tests in the suite have been run. +type TearDownAllSuite interface { + TearDownSuite() +} + +// TearDownTestSuite has a TearDownTest method, which will run after +// each test in the suite. +type TearDownTestSuite interface { + TearDownTest() +} diff --git a/src/vendor/github.com/stretchr/testify/suite/suite.go b/src/vendor/github.com/stretchr/testify/suite/suite.go new file mode 100644 index 00000000..db741300 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/suite/suite.go @@ -0,0 +1,115 @@ +package suite + +import ( + "flag" + "fmt" + "os" + "reflect" + "regexp" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var matchMethod = flag.String("testify.m", "", "regular expression to select tests of the testify suite to run") + +// Suite is a basic testing suite with methods for storing and +// retrieving the current *testing.T context. +type Suite struct { + *assert.Assertions + require *require.Assertions + t *testing.T +} + +// T retrieves the current *testing.T context. +func (suite *Suite) T() *testing.T { + return suite.t +} + +// SetT sets the current *testing.T context. +func (suite *Suite) SetT(t *testing.T) { + suite.t = t + suite.Assertions = assert.New(t) + suite.require = require.New(t) +} + +// Require returns a require context for suite. +func (suite *Suite) Require() *require.Assertions { + if suite.require == nil { + suite.require = require.New(suite.T()) + } + return suite.require +} + +// Assert returns an assert context for suite. Normally, you can call +// `suite.NoError(expected, actual)`, but for situations where the embedded +// methods are overridden (for example, you might want to override +// assert.Assertions with require.Assertions), this method is provided so you +// can call `suite.Assert().NoError()`. +func (suite *Suite) Assert() *assert.Assertions { + if suite.Assertions == nil { + suite.Assertions = assert.New(suite.T()) + } + return suite.Assertions +} + +// Run takes a testing suite and runs all of the tests attached +// to it. +func Run(t *testing.T, suite TestingSuite) { + suite.SetT(t) + + if setupAllSuite, ok := suite.(SetupAllSuite); ok { + setupAllSuite.SetupSuite() + } + defer func() { + if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok { + tearDownAllSuite.TearDownSuite() + } + }() + + methodFinder := reflect.TypeOf(suite) + tests := []testing.InternalTest{} + for index := 0; index < methodFinder.NumMethod(); index++ { + method := methodFinder.Method(index) + ok, err := methodFilter(method.Name) + if err != nil { + fmt.Fprintf(os.Stderr, "testify: invalid regexp for -m: %s\n", err) + os.Exit(1) + } + if ok { + test := testing.InternalTest{ + Name: method.Name, + F: func(t *testing.T) { + parentT := suite.T() + suite.SetT(t) + if setupTestSuite, ok := suite.(SetupTestSuite); ok { + setupTestSuite.SetupTest() + } + defer func() { + if tearDownTestSuite, ok := suite.(TearDownTestSuite); ok { + tearDownTestSuite.TearDownTest() + } + suite.SetT(parentT) + }() + method.Func.Call([]reflect.Value{reflect.ValueOf(suite)}) + }, + } + tests = append(tests, test) + } + } + + if !testing.RunTests(func(_, _ string) (bool, error) { return true, nil }, + tests) { + t.Fail() + } +} + +// Filtering method according to set regular expression +// specified command-line argument -m +func methodFilter(name string) (bool, error) { + if ok, _ := regexp.MatchString("^Test", name); !ok { + return false, nil + } + return regexp.MatchString(*matchMethod, name) +} diff --git a/src/vendor/github.com/stretchr/testify/suite/suite_test.go b/src/vendor/github.com/stretchr/testify/suite/suite_test.go new file mode 100644 index 00000000..c7c4e88f --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/suite/suite_test.go @@ -0,0 +1,239 @@ +package suite + +import ( + "errors" + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +// SuiteRequireTwice is intended to test the usage of suite.Require in two +// different tests +type SuiteRequireTwice struct{ Suite } + +// TestSuiteRequireTwice checks for regressions of issue #149 where +// suite.requirements was not initialised in suite.SetT() +// A regression would result on these tests panicking rather than failing. +func TestSuiteRequireTwice(t *testing.T) { + ok := testing.RunTests( + func(_, _ string) (bool, error) { return true, nil }, + []testing.InternalTest{{ + Name: "TestSuiteRequireTwice", + F: func(t *testing.T) { + suite := new(SuiteRequireTwice) + Run(t, suite) + }, + }}, + ) + assert.Equal(t, false, ok) +} + +func (s *SuiteRequireTwice) TestRequireOne() { + r := s.Require() + r.Equal(1, 2) +} + +func (s *SuiteRequireTwice) TestRequireTwo() { + r := s.Require() + r.Equal(1, 2) +} + +// This suite is intended to store values to make sure that only +// testing-suite-related methods are run. It's also a fully +// functional example of a testing suite, using setup/teardown methods +// and a helper method that is ignored by testify. To make this look +// more like a real world example, all tests in the suite perform some +// type of assertion. +type SuiteTester struct { + // Include our basic suite logic. + Suite + + // Keep counts of how many times each method is run. + SetupSuiteRunCount int + TearDownSuiteRunCount int + SetupTestRunCount int + TearDownTestRunCount int + TestOneRunCount int + TestTwoRunCount int + NonTestMethodRunCount int +} + +type SuiteSkipTester struct { + // Include our basic suite logic. + Suite + + // Keep counts of how many times each method is run. + SetupSuiteRunCount int + TearDownSuiteRunCount int +} + +// The SetupSuite method will be run by testify once, at the very +// start of the testing suite, before any tests are run. +func (suite *SuiteTester) SetupSuite() { + suite.SetupSuiteRunCount++ +} + +func (suite *SuiteSkipTester) SetupSuite() { + suite.SetupSuiteRunCount++ + suite.T().Skip() +} + +// The TearDownSuite method will be run by testify once, at the very +// end of the testing suite, after all tests have been run. +func (suite *SuiteTester) TearDownSuite() { + suite.TearDownSuiteRunCount++ +} + +func (suite *SuiteSkipTester) TearDownSuite() { + suite.TearDownSuiteRunCount++ +} + +// The SetupTest method will be run before every test in the suite. +func (suite *SuiteTester) SetupTest() { + suite.SetupTestRunCount++ +} + +// The TearDownTest method will be run after every test in the suite. +func (suite *SuiteTester) TearDownTest() { + suite.TearDownTestRunCount++ +} + +// Every method in a testing suite that begins with "Test" will be run +// as a test. TestOne is an example of a test. For the purposes of +// this example, we've included assertions in the tests, since most +// tests will issue assertions. +func (suite *SuiteTester) TestOne() { + beforeCount := suite.TestOneRunCount + suite.TestOneRunCount++ + assert.Equal(suite.T(), suite.TestOneRunCount, beforeCount+1) + suite.Equal(suite.TestOneRunCount, beforeCount+1) +} + +// TestTwo is another example of a test. +func (suite *SuiteTester) TestTwo() { + beforeCount := suite.TestTwoRunCount + suite.TestTwoRunCount++ + assert.NotEqual(suite.T(), suite.TestTwoRunCount, beforeCount) + suite.NotEqual(suite.TestTwoRunCount, beforeCount) +} + +func (suite *SuiteTester) TestSkip() { + suite.T().Skip() +} + +// NonTestMethod does not begin with "Test", so it will not be run by +// testify as a test in the suite. This is useful for creating helper +// methods for your tests. +func (suite *SuiteTester) NonTestMethod() { + suite.NonTestMethodRunCount++ +} + +// TestRunSuite will be run by the 'go test' command, so within it, we +// can run our suite using the Run(*testing.T, TestingSuite) function. +func TestRunSuite(t *testing.T) { + suiteTester := new(SuiteTester) + Run(t, suiteTester) + + // Normally, the test would end here. The following are simply + // some assertions to ensure that the Run function is working as + // intended - they are not part of the example. + + // The suite was only run once, so the SetupSuite and TearDownSuite + // methods should have each been run only once. + assert.Equal(t, suiteTester.SetupSuiteRunCount, 1) + assert.Equal(t, suiteTester.TearDownSuiteRunCount, 1) + + // There are three test methods (TestOne, TestTwo, and TestSkip), so + // the SetupTest and TearDownTest methods (which should be run once for + // each test) should have been run three times. + assert.Equal(t, suiteTester.SetupTestRunCount, 3) + assert.Equal(t, suiteTester.TearDownTestRunCount, 3) + + // Each test should have been run once. + assert.Equal(t, suiteTester.TestOneRunCount, 1) + assert.Equal(t, suiteTester.TestTwoRunCount, 1) + + // Methods that don't match the test method identifier shouldn't + // have been run at all. + assert.Equal(t, suiteTester.NonTestMethodRunCount, 0) + + suiteSkipTester := new(SuiteSkipTester) + Run(t, suiteSkipTester) + + // The suite was only run once, so the SetupSuite and TearDownSuite + // methods should have each been run only once, even though SetupSuite + // called Skip() + assert.Equal(t, suiteSkipTester.SetupSuiteRunCount, 1) + assert.Equal(t, suiteSkipTester.TearDownSuiteRunCount, 1) + +} + +func TestSuiteGetters(t *testing.T) { + suite := new(SuiteTester) + suite.SetT(t) + assert.NotNil(t, suite.Assert()) + assert.Equal(t, suite.Assertions, suite.Assert()) + assert.NotNil(t, suite.Require()) + assert.Equal(t, suite.require, suite.Require()) +} + +type SuiteLoggingTester struct { + Suite +} + +func (s *SuiteLoggingTester) TestLoggingPass() { + s.T().Log("TESTLOGPASS") +} + +func (s *SuiteLoggingTester) TestLoggingFail() { + s.T().Log("TESTLOGFAIL") + assert.NotNil(s.T(), nil) // expected to fail +} + +type StdoutCapture struct { + oldStdout *os.File + readPipe *os.File +} + +func (sc *StdoutCapture) StartCapture() { + sc.oldStdout = os.Stdout + sc.readPipe, os.Stdout, _ = os.Pipe() +} + +func (sc *StdoutCapture) StopCapture() (string, error) { + if sc.oldStdout == nil || sc.readPipe == nil { + return "", errors.New("StartCapture not called before StopCapture") + } + os.Stdout.Close() + os.Stdout = sc.oldStdout + bytes, err := ioutil.ReadAll(sc.readPipe) + if err != nil { + return "", err + } + return string(bytes), nil +} + +func TestSuiteLogging(t *testing.T) { + testT := testing.T{} + + suiteLoggingTester := new(SuiteLoggingTester) + + capture := StdoutCapture{} + capture.StartCapture() + Run(&testT, suiteLoggingTester) + output, err := capture.StopCapture() + + assert.Nil(t, err, "Got an error trying to capture stdout!") + + // Failed tests' output is always printed + assert.Contains(t, output, "TESTLOGFAIL") + + if testing.Verbose() { + // In verbose mode, output from successful tests is also printed + assert.Contains(t, output, "TESTLOGPASS") + } else { + assert.NotContains(t, output, "TESTLOGPASS") + } +} diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/LICENSE b/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 00000000..bb673323 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2013 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypass.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 00000000..d42a0bc4 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,152 @@ +// Copyright (c) 2015 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +var ( + // offsetPtr, offsetScalar, and offsetFlag are the offsets for the + // internal reflect.Value fields. These values are valid before golang + // commit ecccf07e7f9d which changed the format. The are also valid + // after commit 82f48826c6c7 which changed the format again to mirror + // the original format. Code in the init function updates these offsets + // as necessary. + offsetPtr = uintptr(ptrSize) + offsetScalar = uintptr(0) + offsetFlag = uintptr(ptrSize * 2) + + // flagKindWidth and flagKindShift indicate various bits that the + // reflect package uses internally to track kind information. + // + // flagRO indicates whether or not the value field of a reflect.Value is + // read-only. + // + // flagIndir indicates whether the value field of a reflect.Value is + // the actual data or a pointer to the data. + // + // These values are valid before golang commit 90a7c3c86944 which + // changed their positions. Code in the init function updates these + // flags as necessary. + flagKindWidth = uintptr(5) + flagKindShift = uintptr(flagKindWidth - 1) + flagRO = uintptr(1 << 0) + flagIndir = uintptr(1 << 1) +) + +func init() { + // Older versions of reflect.Value stored small integers directly in the + // ptr field (which is named val in the older versions). Versions + // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named + // scalar for this purpose which unfortunately came before the flag + // field, so the offset of the flag field is different for those + // versions. + // + // This code constructs a new reflect.Value from a known small integer + // and checks if the size of the reflect.Value struct indicates it has + // the scalar field. When it does, the offsets are updated accordingly. + vv := reflect.ValueOf(0xf00) + if unsafe.Sizeof(vv) == (ptrSize * 4) { + offsetScalar = ptrSize * 2 + offsetFlag = ptrSize * 3 + } + + // Commit 90a7c3c86944 changed the flag positions such that the low + // order bits are the kind. This code extracts the kind from the flags + // field and ensures it's the correct type. When it's not, the flag + // order has been changed to the newer format, so the flags are updated + // accordingly. + upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) + upfv := *(*uintptr)(upf) + flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { + flagKindShift = 0 + flagRO = 1 << 5 + flagIndir = 1 << 6 + + // Commit adf9b30e5594 modified the flags to separate the + // flagRO flag into two bits which specifies whether or not the + // field is embedded. This causes flagIndir to move over a bit + // and means that flagRO is the combination of either of the + // original flagRO bit and the new bit. + // + // This code detects the change by extracting what used to be + // the indirect bit to ensure it's set. When it's not, the flag + // order has been changed to the newer format, so the flags are + // updated accordingly. + if upfv&flagIndir == 0 { + flagRO = 3 << 5 + flagIndir = 1 << 7 + } + } +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { + indirects := 1 + vt := v.Type() + upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) + rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) + if rvf&flagIndir != 0 { + vt = reflect.PtrTo(v.Type()) + indirects++ + } else if offsetScalar != 0 { + // The value is in the scalar field when it's not one of the + // reference types. + switch vt.Kind() { + case reflect.Uintptr: + case reflect.Chan: + case reflect.Func: + case reflect.Map: + case reflect.Ptr: + case reflect.UnsafePointer: + default: + upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + + offsetScalar) + } + } + + pv := reflect.NewAt(vt, upv) + rv = pv + for i := 0; i < indirects; i++ { + rv = rv.Elem() + } + return rv +} diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 00000000..e47a4e79 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 00000000..14f02dc1 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/config.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 00000000..55528272 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/doc.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 00000000..5be0c406 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 00000000..a0ff95e2 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound == true: + d.w.Write(nilAngleBytes) + + case cycleFound == true: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 00000000..ecf3b80e --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound == true: + f.fs.Write(nilAngleBytes) + + case cycleFound == true: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 00000000..d8233f54 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/LICENSE b/src/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/LICENSE new file mode 100644 index 00000000..c67dad61 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib/difflib.go new file mode 100644 index 00000000..64cc40fe --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib/difflib.go @@ -0,0 +1,758 @@ +// Package difflib is a partial port of Python difflib module. +// +// It provides tools to compare sequences of strings and generate textual diffs. +// +// The following class and functions have been ported: +// +// - SequenceMatcher +// +// - unified_diff +// +// - context_diff +// +// Getting unified diffs was the main goal of the port. Keep in mind this code +// is mostly suitable to output text differences in a human friendly way, there +// are no guarantees generated diffs are consumable by patch(1). +package difflib + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool) *SequenceMatcher { + + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s, _ := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s, _ := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s, _ := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// and for all (i',j',k') meeting those conditions, +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n)}) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s] = m.fullBCount[s] + 1 + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches += 1 + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + w := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + err := w("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = w("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := w("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := w(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := w("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := w("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return string(w.Bytes()), err +} + +// Convert range to the "ed" format. +func formatRangeContext(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + if length <= 1 { + return fmt.Sprintf("%d", beginning) + } + return fmt.Sprintf("%d,%d", beginning, beginning+length-1) +} + +type ContextDiff UnifiedDiff + +// Compare two sequences of lines; generate the delta as a context diff. +// +// Context diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by diff.Context +// which defaults to three. +// +// By default, the diff control lines (those with *** or ---) are +// created with a trailing newline. +// +// For inputs that do not have trailing newlines, set the diff.Eol +// argument to "" so that the output will be uniformly newline free. +// +// The context diff format normally has a header for filenames and +// modification times. Any or all of these may be specified using +// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. +// The modification times are normally expressed in the ISO 8601 format. +// If not specified, the strings default to blanks. +func WriteContextDiff(writer io.Writer, diff ContextDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + var diffErr error + w := func(format string, args ...interface{}) { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + if diffErr == nil && err != nil { + diffErr = err + } + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + prefix := map[byte]string{ + 'i': "+ ", + 'd': "- ", + 'r': "! ", + 'e': " ", + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + w("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) + w("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + } + + first, last := g[0], g[len(g)-1] + w("***************" + diff.Eol) + + range1 := formatRangeContext(first.I1, last.I2) + w("*** %s ****%s", range1, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'd' { + for _, cc := range g { + if cc.Tag == 'i' { + continue + } + for _, line := range diff.A[cc.I1:cc.I2] { + w(prefix[cc.Tag] + line) + } + } + break + } + } + + range2 := formatRangeContext(first.J1, last.J2) + w("--- %s ----%s", range2, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'i' { + for _, cc := range g { + if cc.Tag == 'd' { + continue + } + for _, line := range diff.B[cc.J1:cc.J2] { + w(prefix[cc.Tag] + line) + } + } + break + } + } + } + return diffErr +} + +// Like WriteContextDiff but returns the diff a string. +func GetContextDiffString(diff ContextDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteContextDiff(w, diff) + return string(w.Bytes()), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/.gitignore b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/.gitignore new file mode 100644 index 00000000..00268614 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/LICENSE.md b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/LICENSE.md new file mode 100644 index 00000000..21999458 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/LICENSE.md @@ -0,0 +1,23 @@ +objx - by Mat Ryer and Tyler Bunnell + +The MIT License (MIT) + +Copyright (c) 2014 Stretchr, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/README.md b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/README.md new file mode 100644 index 00000000..4aa18068 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/README.md @@ -0,0 +1,3 @@ +# objx + + * Jump into the [API Documentation](http://godoc.org/github.com/stretchr/objx) diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/accessors.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/accessors.go new file mode 100644 index 00000000..721bcac7 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/accessors.go @@ -0,0 +1,179 @@ +package objx + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// arrayAccesRegexString is the regex used to extract the array number +// from the access path +const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` + +// arrayAccesRegex is the compiled arrayAccesRegexString +var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) + +// Get gets the value using the specified selector and +// returns it inside a new Obj object. +// +// If it cannot find the value, Get will return a nil +// value inside an instance of Obj. +// +// Get can only operate directly on map[string]interface{} and []interface. +// +// Example +// +// To access the title of the third chapter of the second book, do: +// +// o.Get("books[1].chapters[2].title") +func (m Map) Get(selector string) *Value { + rawObj := access(m, selector, nil, false, false) + return &Value{data: rawObj} +} + +// Set sets the value using the specified selector and +// returns the object on which Set was called. +// +// Set can only operate directly on map[string]interface{} and []interface +// +// Example +// +// To set the title of the third chapter of the second book, do: +// +// o.Set("books[1].chapters[2].title","Time to Go") +func (m Map) Set(selector string, value interface{}) Map { + access(m, selector, value, true, false) + return m +} + +// access accesses the object using the selector and performs the +// appropriate action. +func access(current, selector, value interface{}, isSet, panics bool) interface{} { + + switch selector.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + + if array, ok := current.([]interface{}); ok { + index := intFromInterface(selector) + + if index >= len(array) { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) + } + return nil + } + + return array[index] + } + + return nil + + case string: + + selStr := selector.(string) + selSegs := strings.SplitN(selStr, PathSeparator, 2) + thisSel := selSegs[0] + index := -1 + var err error + + // https://github.com/stretchr/objx/issues/12 + if strings.Contains(thisSel, "[") { + + arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel) + + if len(arrayMatches) > 0 { + + // Get the key into the map + thisSel = arrayMatches[1] + + // Get the index into the array at the key + index, err = strconv.Atoi(arrayMatches[2]) + + if err != nil { + // This should never happen. If it does, something has gone + // seriously wrong. Panic. + panic("objx: Array index is not an integer. Must use array[int].") + } + + } + } + + if curMap, ok := current.(Map); ok { + current = map[string]interface{}(curMap) + } + + // get the object in question + switch current.(type) { + case map[string]interface{}: + curMSI := current.(map[string]interface{}) + if len(selSegs) <= 1 && isSet { + curMSI[thisSel] = value + return nil + } else { + current = curMSI[thisSel] + } + default: + current = nil + } + + if current == nil && panics { + panic(fmt.Sprintf("objx: '%v' invalid on object.", selector)) + } + + // do we need to access the item of an array? + if index > -1 { + if array, ok := current.([]interface{}); ok { + if index < len(array) { + current = array[index] + } else { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) + } + current = nil + } + } + } + + if len(selSegs) > 1 { + current = access(current, selSegs[1], value, isSet, panics) + } + + } + + return current + +} + +// intFromInterface converts an interface object to the largest +// representation of an unsigned integer using a type switch and +// assertions +func intFromInterface(selector interface{}) int { + var value int + switch selector.(type) { + case int: + value = selector.(int) + case int8: + value = int(selector.(int8)) + case int16: + value = int(selector.(int16)) + case int32: + value = int(selector.(int32)) + case int64: + value = int(selector.(int64)) + case uint: + value = int(selector.(uint)) + case uint8: + value = int(selector.(uint8)) + case uint16: + value = int(selector.(uint16)) + case uint32: + value = int(selector.(uint32)) + case uint64: + value = int(selector.(uint64)) + default: + panic("objx: array access argument is not an integer type (this should never happen)") + } + + return value +} diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/array-access.txt b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/array-access.txt new file mode 100644 index 00000000..30602347 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/array-access.txt @@ -0,0 +1,14 @@ + case []{1}: + a := object.([]{1}) + if isSet { + a[index] = value.({1}) + } else { + if index >= len(a) { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range because the []{1} only contains %d items.", index, len(a))) + } + return nil + } else { + return a[index] + } + } diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/index.html b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/index.html new file mode 100644 index 00000000..379ffc3c --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/index.html @@ -0,0 +1,86 @@ + + + + Codegen + + + + + +

+ Template +

+

+ Use {x} as a placeholder for each argument. +

+ + +

+ Arguments (comma separated) +

+

+ One block per line +

+ + +

+ Output +

+ + + + + + + + diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/template.txt b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/template.txt new file mode 100644 index 00000000..b396900b --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/template.txt @@ -0,0 +1,286 @@ +/* + {4} ({1} and []{1}) + -------------------------------------------------- +*/ + +// {4} gets the value as a {1}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) {4}(optionalDefault ...{1}) {1} { + if s, ok := v.data.({1}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return {3} +} + +// Must{4} gets the value as a {1}. +// +// Panics if the object is not a {1}. +func (v *Value) Must{4}() {1} { + return v.data.({1}) +} + +// {4}Slice gets the value as a []{1}, returns the optionalDefault +// value or nil if the value is not a []{1}. +func (v *Value) {4}Slice(optionalDefault ...[]{1}) []{1} { + if s, ok := v.data.([]{1}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// Must{4}Slice gets the value as a []{1}. +// +// Panics if the object is not a []{1}. +func (v *Value) Must{4}Slice() []{1} { + return v.data.([]{1}) +} + +// Is{4} gets whether the object contained is a {1} or not. +func (v *Value) Is{4}() bool { + _, ok := v.data.({1}) + return ok +} + +// Is{4}Slice gets whether the object contained is a []{1} or not. +func (v *Value) Is{4}Slice() bool { + _, ok := v.data.([]{1}) + return ok +} + +// Each{4} calls the specified callback for each object +// in the []{1}. +// +// Panics if the object is the wrong type. +func (v *Value) Each{4}(callback func(int, {1}) bool) *Value { + + for index, val := range v.Must{4}Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// Where{4} uses the specified decider function to select items +// from the []{1}. The object contained in the result will contain +// only the selected items. +func (v *Value) Where{4}(decider func(int, {1}) bool) *Value { + + var selected []{1} + + v.Each{4}(func(index int, val {1}) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data:selected} + +} + +// Group{4} uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]{1}. +func (v *Value) Group{4}(grouper func(int, {1}) string) *Value { + + groups := make(map[string][]{1}) + + v.Each{4}(func(index int, val {1}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]{1}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data:groups} + +} + +// Replace{4} uses the specified function to replace each {1}s +// by iterating each item. The data in the returned result will be a +// []{1} containing the replaced items. +func (v *Value) Replace{4}(replacer func(int, {1}) {1}) *Value { + + arr := v.Must{4}Slice() + replaced := make([]{1}, len(arr)) + + v.Each{4}(func(index int, val {1}) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data:replaced} + +} + +// Collect{4} uses the specified collector function to collect a value +// for each of the {1}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) Collect{4}(collector func(int, {1}) interface{}) *Value { + + arr := v.Must{4}Slice() + collected := make([]interface{}, len(arr)) + + v.Each{4}(func(index int, val {1}) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data:collected} +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func Test{4}(t *testing.T) { + + val := {1}( {2} ) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").{4}()) + assert.Equal(t, val, New(m).Get("value").Must{4}()) + assert.Equal(t, {1}({3}), New(m).Get("nothing").{4}()) + assert.Equal(t, val, New(m).Get("nothing").{4}({2})) + + assert.Panics(t, func() { + New(m).Get("age").Must{4}() + }) + +} + +func Test{4}Slice(t *testing.T) { + + val := {1}( {2} ) + m := map[string]interface{}{"value": []{1}{ val }, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").{4}Slice()[0]) + assert.Equal(t, val, New(m).Get("value").Must{4}Slice()[0]) + assert.Equal(t, []{1}(nil), New(m).Get("nothing").{4}Slice()) + assert.Equal(t, val, New(m).Get("nothing").{4}Slice( []{1}{ {1}({2}) } )[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").Must{4}Slice() + }) + +} + +func TestIs{4}(t *testing.T) { + + var v *Value + + v = &Value{data: {1}({2})} + assert.True(t, v.Is{4}()) + + v = &Value{data: []{1}{ {1}({2}) }} + assert.True(t, v.Is{4}Slice()) + +} + +func TestEach{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + count := 0 + replacedVals := make([]{1}, 0) + assert.Equal(t, v, v.Each{4}(func(i int, val {1}) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.Must{4}Slice()[0]) + assert.Equal(t, replacedVals[1], v.Must{4}Slice()[1]) + assert.Equal(t, replacedVals[2], v.Must{4}Slice()[2]) + +} + +func TestWhere{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + selected := v.Where{4}(func(i int, val {1}) bool { + return i%2==0 + }).Must{4}Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroup{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + grouped := v.Group{4}(func(i int, val {1}) string { + return fmt.Sprintf("%v", i%2==0) + }).data.(map[string][]{1}) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplace{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + rawArr := v.Must{4}Slice() + + replaced := v.Replace{4}(func(index int, val {1}) {1} { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.Must{4}Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollect{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + collected := v.Collect{4}(func(index int, val {1}) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/types_list.txt b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/types_list.txt new file mode 100644 index 00000000..069d43d8 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/types_list.txt @@ -0,0 +1,20 @@ +Interface,interface{},"something",nil,Inter +Map,map[string]interface{},map[string]interface{}{"name":"Tyler"},nil,MSI +ObjxMap,(Map),New(1),New(nil),ObjxMap +Bool,bool,true,false,Bool +String,string,"hello","",Str +Int,int,1,0,Int +Int8,int8,1,0,Int8 +Int16,int16,1,0,Int16 +Int32,int32,1,0,Int32 +Int64,int64,1,0,Int64 +Uint,uint,1,0,Uint +Uint8,uint8,1,0,Uint8 +Uint16,uint16,1,0,Uint16 +Uint32,uint32,1,0,Uint32 +Uint64,uint64,1,0,Uint64 +Uintptr,uintptr,1,0,Uintptr +Float32,float32,1,0,Float32 +Float64,float64,1,0,Float64 +Complex64,complex64,1,0,Complex64 +Complex128,complex128,1,0,Complex128 diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/constants.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/constants.go new file mode 100644 index 00000000..f9eb42a2 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/constants.go @@ -0,0 +1,13 @@ +package objx + +const ( + // PathSeparator is the character used to separate the elements + // of the keypath. + // + // For example, `location.address.city` + PathSeparator string = "." + + // SignatureSeparator is the character that is used to + // separate the Base64 string from the security signature. + SignatureSeparator = "_" +) diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions.go new file mode 100644 index 00000000..9cdfa9f9 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions.go @@ -0,0 +1,117 @@ +package objx + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" +) + +// JSON converts the contained object to a JSON string +// representation +func (m Map) JSON() (string, error) { + + result, err := json.Marshal(m) + + if err != nil { + err = errors.New("objx: JSON encode failed with: " + err.Error()) + } + + return string(result), err + +} + +// MustJSON converts the contained object to a JSON string +// representation and panics if there is an error +func (m Map) MustJSON() string { + result, err := m.JSON() + if err != nil { + panic(err.Error()) + } + return result +} + +// Base64 converts the contained object to a Base64 string +// representation of the JSON string representation +func (m Map) Base64() (string, error) { + + var buf bytes.Buffer + + jsonData, err := m.JSON() + if err != nil { + return "", err + } + + encoder := base64.NewEncoder(base64.StdEncoding, &buf) + encoder.Write([]byte(jsonData)) + encoder.Close() + + return buf.String(), nil + +} + +// MustBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and panics +// if there is an error +func (m Map) MustBase64() string { + result, err := m.Base64() + if err != nil { + panic(err.Error()) + } + return result +} + +// SignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key. +func (m Map) SignedBase64(key string) (string, error) { + + base64, err := m.Base64() + if err != nil { + return "", err + } + + sig := HashWithKey(base64, key) + + return base64 + SignatureSeparator + sig, nil + +} + +// MustSignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key and panics if there is an error +func (m Map) MustSignedBase64(key string) string { + result, err := m.SignedBase64(key) + if err != nil { + panic(err.Error()) + } + return result +} + +/* + URL Query + ------------------------------------------------ +*/ + +// URLValues creates a url.Values object from an Obj. This +// function requires that the wrapped object be a map[string]interface{} +func (m Map) URLValues() url.Values { + + vals := make(url.Values) + + for k, v := range m { + //TODO: can this be done without sprintf? + vals.Set(k, fmt.Sprintf("%v", v)) + } + + return vals +} + +// URLQuery gets an encoded URL query representing the given +// Obj. This function requires that the wrapped object be a +// map[string]interface{} +func (m Map) URLQuery() (string, error) { + return m.URLValues().Encode(), nil +} diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/doc.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/doc.go new file mode 100644 index 00000000..47bf85e4 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/doc.go @@ -0,0 +1,72 @@ +// objx - Go package for dealing with maps, slices, JSON and other data. +// +// Overview +// +// Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes +// a powerful `Get` method (among others) that allows you to easily and quickly get +// access to data within the map, without having to worry too much about type assertions, +// missing data, default values etc. +// +// Pattern +// +// Objx uses a preditable pattern to make access data from within `map[string]interface{}'s +// easy. +// +// Call one of the `objx.` functions to create your `objx.Map` to get going: +// +// m, err := objx.FromJSON(json) +// +// NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, +// the rest will be optimistic and try to figure things out without panicking. +// +// Use `Get` to access the value you're interested in. You can use dot and array +// notation too: +// +// m.Get("places[0].latlng") +// +// Once you have saught the `Value` you're interested in, you can use the `Is*` methods +// to determine its type. +// +// if m.Get("code").IsStr() { /* ... */ } +// +// Or you can just assume the type, and use one of the strong type methods to +// extract the real value: +// +// m.Get("code").Int() +// +// If there's no value there (or if it's the wrong type) then a default value +// will be returned, or you can be explicit about the default value. +// +// Get("code").Int(-1) +// +// If you're dealing with a slice of data as a value, Objx provides many useful +// methods for iterating, manipulating and selecting that data. You can find out more +// by exploring the index below. +// +// Reading data +// +// A simple example of how to use Objx: +// +// // use MustFromJSON to make an objx.Map from some JSON +// m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) +// +// // get the details +// name := m.Get("name").Str() +// age := m.Get("age").Int() +// +// // get their nickname (or use their name if they +// // don't have one) +// nickname := m.Get("nickname").Str(name) +// +// Ranging +// +// Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For +// example, to `range` the data, do what you would expect: +// +// m := objx.MustFromJSON(json) +// for key, value := range m { +// +// /* ... do your magic ... */ +// +// } +package objx diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map.go new file mode 100644 index 00000000..eb6ed8e2 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map.go @@ -0,0 +1,222 @@ +package objx + +import ( + "encoding/base64" + "encoding/json" + "errors" + "io/ioutil" + "net/url" + "strings" +) + +// MSIConvertable is an interface that defines methods for converting your +// custom types to a map[string]interface{} representation. +type MSIConvertable interface { + // MSI gets a map[string]interface{} (msi) representing the + // object. + MSI() map[string]interface{} +} + +// Map provides extended functionality for working with +// untyped data, in particular map[string]interface (msi). +type Map map[string]interface{} + +// Value returns the internal value instance +func (m Map) Value() *Value { + return &Value{data: m} +} + +// Nil represents a nil Map. +var Nil Map = New(nil) + +// New creates a new Map containing the map[string]interface{} in the data argument. +// If the data argument is not a map[string]interface, New attempts to call the +// MSI() method on the MSIConvertable interface to create one. +func New(data interface{}) Map { + if _, ok := data.(map[string]interface{}); !ok { + if converter, ok := data.(MSIConvertable); ok { + data = converter.MSI() + } else { + return nil + } + } + return Map(data.(map[string]interface{})) +} + +// MSI creates a map[string]interface{} and puts it inside a new Map. +// +// The arguments follow a key, value pattern. +// +// Panics +// +// Panics if any key arugment is non-string or if there are an odd number of arguments. +// +// Example +// +// To easily create Maps: +// +// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) +// +// // creates an Map equivalent to +// m := objx.New(map[string]interface{}{"name": "Mat", "age": 29, "subobj": map[string]interface{}{"active": true}}) +func MSI(keyAndValuePairs ...interface{}) Map { + + newMap := make(map[string]interface{}) + keyAndValuePairsLen := len(keyAndValuePairs) + + if keyAndValuePairsLen%2 != 0 { + panic("objx: MSI must have an even number of arguments following the 'key, value' pattern.") + } + + for i := 0; i < keyAndValuePairsLen; i = i + 2 { + + key := keyAndValuePairs[i] + value := keyAndValuePairs[i+1] + + // make sure the key is a string + keyString, keyStringOK := key.(string) + if !keyStringOK { + panic("objx: MSI must follow 'string, interface{}' pattern. " + keyString + " is not a valid key.") + } + + newMap[keyString] = value + + } + + return New(newMap) +} + +// ****** Conversion Constructors + +// MustFromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Panics if the JSON is invalid. +func MustFromJSON(jsonString string) Map { + o, err := FromJSON(jsonString) + + if err != nil { + panic("objx: MustFromJSON failed with error: " + err.Error()) + } + + return o +} + +// FromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Returns an error if the JSON is invalid. +func FromJSON(jsonString string) (Map, error) { + + var data interface{} + err := json.Unmarshal([]byte(jsonString), &data) + + if err != nil { + return Nil, err + } + + return New(data), nil + +} + +// FromBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by Base64 +func FromBase64(base64String string) (Map, error) { + + decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) + + decoded, err := ioutil.ReadAll(decoder) + if err != nil { + return nil, err + } + + return FromJSON(string(decoded)) +} + +// MustFromBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromBase64(base64String string) Map { + + result, err := FromBase64(base64String) + + if err != nil { + panic("objx: MustFromBase64 failed with error: " + err.Error()) + } + + return result +} + +// FromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by SignedBase64 +func FromSignedBase64(base64String, key string) (Map, error) { + parts := strings.Split(base64String, SignatureSeparator) + if len(parts) != 2 { + return nil, errors.New("objx: Signed base64 string is malformed.") + } + + sig := HashWithKey(parts[0], key) + if parts[1] != sig { + return nil, errors.New("objx: Signature for base64 data does not match.") + } + + return FromBase64(parts[0]) +} + +// MustFromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromSignedBase64(base64String, key string) Map { + + result, err := FromSignedBase64(base64String, key) + + if err != nil { + panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) + } + + return result +} + +// FromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +func FromURLQuery(query string) (Map, error) { + + vals, err := url.ParseQuery(query) + + if err != nil { + return nil, err + } + + m := make(map[string]interface{}) + for k, vals := range vals { + m[k] = vals[0] + } + + return New(m), nil +} + +// MustFromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +// +// Panics if it encounters an error +func MustFromURLQuery(query string) Map { + + o, err := FromURLQuery(query) + + if err != nil { + panic("objx: MustFromURLQuery failed with error: " + err.Error()) + } + + return o + +} diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations.go new file mode 100644 index 00000000..b35c8639 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations.go @@ -0,0 +1,81 @@ +package objx + +// Exclude returns a new Map with the keys in the specified []string +// excluded. +func (d Map) Exclude(exclude []string) Map { + + excluded := make(Map) + for k, v := range d { + var shouldInclude bool = true + for _, toExclude := range exclude { + if k == toExclude { + shouldInclude = false + break + } + } + if shouldInclude { + excluded[k] = v + } + } + + return excluded +} + +// Copy creates a shallow copy of the Obj. +func (m Map) Copy() Map { + copied := make(map[string]interface{}) + for k, v := range m { + copied[k] = v + } + return New(copied) +} + +// Merge blends the specified map with a copy of this map and returns the result. +// +// Keys that appear in both will be selected from the specified map. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) Merge(merge Map) Map { + return m.Copy().MergeHere(merge) +} + +// Merge blends the specified map with this map and returns the current map. +// +// Keys that appear in both will be selected from the specified map. The original map +// will be modified. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) MergeHere(merge Map) Map { + + for k, v := range merge { + m[k] = v + } + + return m + +} + +// Transform builds a new Obj giving the transformer a chance +// to change the keys and values as it goes. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { + newMap := make(map[string]interface{}) + for k, v := range m { + modifiedKey, modifiedVal := transformer(k, v) + newMap[modifiedKey] = modifiedVal + } + return New(newMap) +} + +// TransformKeys builds a new map using the specified key mapping. +// +// Unspecified keys will be unaltered. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) TransformKeys(mapping map[string]string) Map { + return m.Transform(func(key string, value interface{}) (string, interface{}) { + + if newKey, ok := mapping[key]; ok { + return newKey, value + } + + return key, value + }) +} diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security.go new file mode 100644 index 00000000..fdd6be9c --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security.go @@ -0,0 +1,14 @@ +package objx + +import ( + "crypto/sha1" + "encoding/hex" +) + +// HashWithKey hashes the specified string using the security +// key. +func HashWithKey(data, key string) string { + hash := sha1.New() + hash.Write([]byte(data + ":" + key)) + return hex.EncodeToString(hash.Sum(nil)) +} diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/tests.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/tests.go new file mode 100644 index 00000000..d9e0b479 --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/tests.go @@ -0,0 +1,17 @@ +package objx + +// Has gets whether there is something at the specified selector +// or not. +// +// If m is nil, Has will always return false. +func (m Map) Has(selector string) bool { + if m == nil { + return false + } + return !m.Get(selector).IsNil() +} + +// IsNil gets whether the data is nil or not. +func (v *Value) IsNil() bool { + return v == nil || v.data == nil +} diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen.go new file mode 100644 index 00000000..f3ecb29b --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen.go @@ -0,0 +1,2881 @@ +package objx + +/* + Inter (interface{} and []interface{}) + -------------------------------------------------- +*/ + +// Inter gets the value as a interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Inter(optionalDefault ...interface{}) interface{} { + if s, ok := v.data.(interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInter gets the value as a interface{}. +// +// Panics if the object is not a interface{}. +func (v *Value) MustInter() interface{} { + return v.data.(interface{}) +} + +// InterSlice gets the value as a []interface{}, returns the optionalDefault +// value or nil if the value is not a []interface{}. +func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} { + if s, ok := v.data.([]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInterSlice gets the value as a []interface{}. +// +// Panics if the object is not a []interface{}. +func (v *Value) MustInterSlice() []interface{} { + return v.data.([]interface{}) +} + +// IsInter gets whether the object contained is a interface{} or not. +func (v *Value) IsInter() bool { + _, ok := v.data.(interface{}) + return ok +} + +// IsInterSlice gets whether the object contained is a []interface{} or not. +func (v *Value) IsInterSlice() bool { + _, ok := v.data.([]interface{}) + return ok +} + +// EachInter calls the specified callback for each object +// in the []interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { + + for index, val := range v.MustInterSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInter uses the specified decider function to select items +// from the []interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { + + var selected []interface{} + + v.EachInter(func(index int, val interface{}) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInter uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]interface{}. +func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { + + groups := make(map[string][]interface{}) + + v.EachInter(func(index int, val interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInter uses the specified function to replace each interface{}s +// by iterating each item. The data in the returned result will be a +// []interface{} containing the replaced items. +func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { + + arr := v.MustInterSlice() + replaced := make([]interface{}, len(arr)) + + v.EachInter(func(index int, val interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInter uses the specified collector function to collect a value +// for each of the interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { + + arr := v.MustInterSlice() + collected := make([]interface{}, len(arr)) + + v.EachInter(func(index int, val interface{}) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + MSI (map[string]interface{} and []map[string]interface{}) + -------------------------------------------------- +*/ + +// MSI gets the value as a map[string]interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSI gets the value as a map[string]interface{}. +// +// Panics if the object is not a map[string]interface{}. +func (v *Value) MustMSI() map[string]interface{} { + return v.data.(map[string]interface{}) +} + +// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault +// value or nil if the value is not a []map[string]interface{}. +func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { + if s, ok := v.data.([]map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSISlice gets the value as a []map[string]interface{}. +// +// Panics if the object is not a []map[string]interface{}. +func (v *Value) MustMSISlice() []map[string]interface{} { + return v.data.([]map[string]interface{}) +} + +// IsMSI gets whether the object contained is a map[string]interface{} or not. +func (v *Value) IsMSI() bool { + _, ok := v.data.(map[string]interface{}) + return ok +} + +// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. +func (v *Value) IsMSISlice() bool { + _, ok := v.data.([]map[string]interface{}) + return ok +} + +// EachMSI calls the specified callback for each object +// in the []map[string]interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { + + for index, val := range v.MustMSISlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereMSI uses the specified decider function to select items +// from the []map[string]interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { + + var selected []map[string]interface{} + + v.EachMSI(func(index int, val map[string]interface{}) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupMSI uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]map[string]interface{}. +func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { + + groups := make(map[string][]map[string]interface{}) + + v.EachMSI(func(index int, val map[string]interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]map[string]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceMSI uses the specified function to replace each map[string]interface{}s +// by iterating each item. The data in the returned result will be a +// []map[string]interface{} containing the replaced items. +func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { + + arr := v.MustMSISlice() + replaced := make([]map[string]interface{}, len(arr)) + + v.EachMSI(func(index int, val map[string]interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectMSI uses the specified collector function to collect a value +// for each of the map[string]interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { + + arr := v.MustMSISlice() + collected := make([]interface{}, len(arr)) + + v.EachMSI(func(index int, val map[string]interface{}) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + ObjxMap ((Map) and [](Map)) + -------------------------------------------------- +*/ + +// ObjxMap gets the value as a (Map), returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { + if s, ok := v.data.((Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return New(nil) +} + +// MustObjxMap gets the value as a (Map). +// +// Panics if the object is not a (Map). +func (v *Value) MustObjxMap() Map { + return v.data.((Map)) +} + +// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault +// value or nil if the value is not a [](Map). +func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { + if s, ok := v.data.([](Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustObjxMapSlice gets the value as a [](Map). +// +// Panics if the object is not a [](Map). +func (v *Value) MustObjxMapSlice() [](Map) { + return v.data.([](Map)) +} + +// IsObjxMap gets whether the object contained is a (Map) or not. +func (v *Value) IsObjxMap() bool { + _, ok := v.data.((Map)) + return ok +} + +// IsObjxMapSlice gets whether the object contained is a [](Map) or not. +func (v *Value) IsObjxMapSlice() bool { + _, ok := v.data.([](Map)) + return ok +} + +// EachObjxMap calls the specified callback for each object +// in the [](Map). +// +// Panics if the object is the wrong type. +func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { + + for index, val := range v.MustObjxMapSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereObjxMap uses the specified decider function to select items +// from the [](Map). The object contained in the result will contain +// only the selected items. +func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { + + var selected [](Map) + + v.EachObjxMap(func(index int, val Map) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupObjxMap uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][](Map). +func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { + + groups := make(map[string][](Map)) + + v.EachObjxMap(func(index int, val Map) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([](Map), 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceObjxMap uses the specified function to replace each (Map)s +// by iterating each item. The data in the returned result will be a +// [](Map) containing the replaced items. +func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { + + arr := v.MustObjxMapSlice() + replaced := make([](Map), len(arr)) + + v.EachObjxMap(func(index int, val Map) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectObjxMap uses the specified collector function to collect a value +// for each of the (Map)s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { + + arr := v.MustObjxMapSlice() + collected := make([]interface{}, len(arr)) + + v.EachObjxMap(func(index int, val Map) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Bool (bool and []bool) + -------------------------------------------------- +*/ + +// Bool gets the value as a bool, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Bool(optionalDefault ...bool) bool { + if s, ok := v.data.(bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return false +} + +// MustBool gets the value as a bool. +// +// Panics if the object is not a bool. +func (v *Value) MustBool() bool { + return v.data.(bool) +} + +// BoolSlice gets the value as a []bool, returns the optionalDefault +// value or nil if the value is not a []bool. +func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool { + if s, ok := v.data.([]bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustBoolSlice gets the value as a []bool. +// +// Panics if the object is not a []bool. +func (v *Value) MustBoolSlice() []bool { + return v.data.([]bool) +} + +// IsBool gets whether the object contained is a bool or not. +func (v *Value) IsBool() bool { + _, ok := v.data.(bool) + return ok +} + +// IsBoolSlice gets whether the object contained is a []bool or not. +func (v *Value) IsBoolSlice() bool { + _, ok := v.data.([]bool) + return ok +} + +// EachBool calls the specified callback for each object +// in the []bool. +// +// Panics if the object is the wrong type. +func (v *Value) EachBool(callback func(int, bool) bool) *Value { + + for index, val := range v.MustBoolSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereBool uses the specified decider function to select items +// from the []bool. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereBool(decider func(int, bool) bool) *Value { + + var selected []bool + + v.EachBool(func(index int, val bool) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupBool uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]bool. +func (v *Value) GroupBool(grouper func(int, bool) string) *Value { + + groups := make(map[string][]bool) + + v.EachBool(func(index int, val bool) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]bool, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceBool uses the specified function to replace each bools +// by iterating each item. The data in the returned result will be a +// []bool containing the replaced items. +func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { + + arr := v.MustBoolSlice() + replaced := make([]bool, len(arr)) + + v.EachBool(func(index int, val bool) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectBool uses the specified collector function to collect a value +// for each of the bools in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { + + arr := v.MustBoolSlice() + collected := make([]interface{}, len(arr)) + + v.EachBool(func(index int, val bool) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Str (string and []string) + -------------------------------------------------- +*/ + +// Str gets the value as a string, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Str(optionalDefault ...string) string { + if s, ok := v.data.(string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return "" +} + +// MustStr gets the value as a string. +// +// Panics if the object is not a string. +func (v *Value) MustStr() string { + return v.data.(string) +} + +// StrSlice gets the value as a []string, returns the optionalDefault +// value or nil if the value is not a []string. +func (v *Value) StrSlice(optionalDefault ...[]string) []string { + if s, ok := v.data.([]string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustStrSlice gets the value as a []string. +// +// Panics if the object is not a []string. +func (v *Value) MustStrSlice() []string { + return v.data.([]string) +} + +// IsStr gets whether the object contained is a string or not. +func (v *Value) IsStr() bool { + _, ok := v.data.(string) + return ok +} + +// IsStrSlice gets whether the object contained is a []string or not. +func (v *Value) IsStrSlice() bool { + _, ok := v.data.([]string) + return ok +} + +// EachStr calls the specified callback for each object +// in the []string. +// +// Panics if the object is the wrong type. +func (v *Value) EachStr(callback func(int, string) bool) *Value { + + for index, val := range v.MustStrSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereStr uses the specified decider function to select items +// from the []string. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereStr(decider func(int, string) bool) *Value { + + var selected []string + + v.EachStr(func(index int, val string) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupStr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]string. +func (v *Value) GroupStr(grouper func(int, string) string) *Value { + + groups := make(map[string][]string) + + v.EachStr(func(index int, val string) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]string, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceStr uses the specified function to replace each strings +// by iterating each item. The data in the returned result will be a +// []string containing the replaced items. +func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { + + arr := v.MustStrSlice() + replaced := make([]string, len(arr)) + + v.EachStr(func(index int, val string) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectStr uses the specified collector function to collect a value +// for each of the strings in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { + + arr := v.MustStrSlice() + collected := make([]interface{}, len(arr)) + + v.EachStr(func(index int, val string) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int (int and []int) + -------------------------------------------------- +*/ + +// Int gets the value as a int, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int(optionalDefault ...int) int { + if s, ok := v.data.(int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt gets the value as a int. +// +// Panics if the object is not a int. +func (v *Value) MustInt() int { + return v.data.(int) +} + +// IntSlice gets the value as a []int, returns the optionalDefault +// value or nil if the value is not a []int. +func (v *Value) IntSlice(optionalDefault ...[]int) []int { + if s, ok := v.data.([]int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustIntSlice gets the value as a []int. +// +// Panics if the object is not a []int. +func (v *Value) MustIntSlice() []int { + return v.data.([]int) +} + +// IsInt gets whether the object contained is a int or not. +func (v *Value) IsInt() bool { + _, ok := v.data.(int) + return ok +} + +// IsIntSlice gets whether the object contained is a []int or not. +func (v *Value) IsIntSlice() bool { + _, ok := v.data.([]int) + return ok +} + +// EachInt calls the specified callback for each object +// in the []int. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt(callback func(int, int) bool) *Value { + + for index, val := range v.MustIntSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt uses the specified decider function to select items +// from the []int. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt(decider func(int, int) bool) *Value { + + var selected []int + + v.EachInt(func(index int, val int) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int. +func (v *Value) GroupInt(grouper func(int, int) string) *Value { + + groups := make(map[string][]int) + + v.EachInt(func(index int, val int) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt uses the specified function to replace each ints +// by iterating each item. The data in the returned result will be a +// []int containing the replaced items. +func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { + + arr := v.MustIntSlice() + replaced := make([]int, len(arr)) + + v.EachInt(func(index int, val int) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt uses the specified collector function to collect a value +// for each of the ints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { + + arr := v.MustIntSlice() + collected := make([]interface{}, len(arr)) + + v.EachInt(func(index int, val int) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int8 (int8 and []int8) + -------------------------------------------------- +*/ + +// Int8 gets the value as a int8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int8(optionalDefault ...int8) int8 { + if s, ok := v.data.(int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt8 gets the value as a int8. +// +// Panics if the object is not a int8. +func (v *Value) MustInt8() int8 { + return v.data.(int8) +} + +// Int8Slice gets the value as a []int8, returns the optionalDefault +// value or nil if the value is not a []int8. +func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 { + if s, ok := v.data.([]int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt8Slice gets the value as a []int8. +// +// Panics if the object is not a []int8. +func (v *Value) MustInt8Slice() []int8 { + return v.data.([]int8) +} + +// IsInt8 gets whether the object contained is a int8 or not. +func (v *Value) IsInt8() bool { + _, ok := v.data.(int8) + return ok +} + +// IsInt8Slice gets whether the object contained is a []int8 or not. +func (v *Value) IsInt8Slice() bool { + _, ok := v.data.([]int8) + return ok +} + +// EachInt8 calls the specified callback for each object +// in the []int8. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt8(callback func(int, int8) bool) *Value { + + for index, val := range v.MustInt8Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt8 uses the specified decider function to select items +// from the []int8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { + + var selected []int8 + + v.EachInt8(func(index int, val int8) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int8. +func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { + + groups := make(map[string][]int8) + + v.EachInt8(func(index int, val int8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt8 uses the specified function to replace each int8s +// by iterating each item. The data in the returned result will be a +// []int8 containing the replaced items. +func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { + + arr := v.MustInt8Slice() + replaced := make([]int8, len(arr)) + + v.EachInt8(func(index int, val int8) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt8 uses the specified collector function to collect a value +// for each of the int8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { + + arr := v.MustInt8Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt8(func(index int, val int8) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int16 (int16 and []int16) + -------------------------------------------------- +*/ + +// Int16 gets the value as a int16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int16(optionalDefault ...int16) int16 { + if s, ok := v.data.(int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt16 gets the value as a int16. +// +// Panics if the object is not a int16. +func (v *Value) MustInt16() int16 { + return v.data.(int16) +} + +// Int16Slice gets the value as a []int16, returns the optionalDefault +// value or nil if the value is not a []int16. +func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 { + if s, ok := v.data.([]int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt16Slice gets the value as a []int16. +// +// Panics if the object is not a []int16. +func (v *Value) MustInt16Slice() []int16 { + return v.data.([]int16) +} + +// IsInt16 gets whether the object contained is a int16 or not. +func (v *Value) IsInt16() bool { + _, ok := v.data.(int16) + return ok +} + +// IsInt16Slice gets whether the object contained is a []int16 or not. +func (v *Value) IsInt16Slice() bool { + _, ok := v.data.([]int16) + return ok +} + +// EachInt16 calls the specified callback for each object +// in the []int16. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt16(callback func(int, int16) bool) *Value { + + for index, val := range v.MustInt16Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt16 uses the specified decider function to select items +// from the []int16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { + + var selected []int16 + + v.EachInt16(func(index int, val int16) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int16. +func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { + + groups := make(map[string][]int16) + + v.EachInt16(func(index int, val int16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt16 uses the specified function to replace each int16s +// by iterating each item. The data in the returned result will be a +// []int16 containing the replaced items. +func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { + + arr := v.MustInt16Slice() + replaced := make([]int16, len(arr)) + + v.EachInt16(func(index int, val int16) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt16 uses the specified collector function to collect a value +// for each of the int16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { + + arr := v.MustInt16Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt16(func(index int, val int16) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int32 (int32 and []int32) + -------------------------------------------------- +*/ + +// Int32 gets the value as a int32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int32(optionalDefault ...int32) int32 { + if s, ok := v.data.(int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt32 gets the value as a int32. +// +// Panics if the object is not a int32. +func (v *Value) MustInt32() int32 { + return v.data.(int32) +} + +// Int32Slice gets the value as a []int32, returns the optionalDefault +// value or nil if the value is not a []int32. +func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 { + if s, ok := v.data.([]int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt32Slice gets the value as a []int32. +// +// Panics if the object is not a []int32. +func (v *Value) MustInt32Slice() []int32 { + return v.data.([]int32) +} + +// IsInt32 gets whether the object contained is a int32 or not. +func (v *Value) IsInt32() bool { + _, ok := v.data.(int32) + return ok +} + +// IsInt32Slice gets whether the object contained is a []int32 or not. +func (v *Value) IsInt32Slice() bool { + _, ok := v.data.([]int32) + return ok +} + +// EachInt32 calls the specified callback for each object +// in the []int32. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt32(callback func(int, int32) bool) *Value { + + for index, val := range v.MustInt32Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt32 uses the specified decider function to select items +// from the []int32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { + + var selected []int32 + + v.EachInt32(func(index int, val int32) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int32. +func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { + + groups := make(map[string][]int32) + + v.EachInt32(func(index int, val int32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt32 uses the specified function to replace each int32s +// by iterating each item. The data in the returned result will be a +// []int32 containing the replaced items. +func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { + + arr := v.MustInt32Slice() + replaced := make([]int32, len(arr)) + + v.EachInt32(func(index int, val int32) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt32 uses the specified collector function to collect a value +// for each of the int32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { + + arr := v.MustInt32Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt32(func(index int, val int32) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int64 (int64 and []int64) + -------------------------------------------------- +*/ + +// Int64 gets the value as a int64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int64(optionalDefault ...int64) int64 { + if s, ok := v.data.(int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt64 gets the value as a int64. +// +// Panics if the object is not a int64. +func (v *Value) MustInt64() int64 { + return v.data.(int64) +} + +// Int64Slice gets the value as a []int64, returns the optionalDefault +// value or nil if the value is not a []int64. +func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 { + if s, ok := v.data.([]int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt64Slice gets the value as a []int64. +// +// Panics if the object is not a []int64. +func (v *Value) MustInt64Slice() []int64 { + return v.data.([]int64) +} + +// IsInt64 gets whether the object contained is a int64 or not. +func (v *Value) IsInt64() bool { + _, ok := v.data.(int64) + return ok +} + +// IsInt64Slice gets whether the object contained is a []int64 or not. +func (v *Value) IsInt64Slice() bool { + _, ok := v.data.([]int64) + return ok +} + +// EachInt64 calls the specified callback for each object +// in the []int64. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt64(callback func(int, int64) bool) *Value { + + for index, val := range v.MustInt64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt64 uses the specified decider function to select items +// from the []int64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { + + var selected []int64 + + v.EachInt64(func(index int, val int64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int64. +func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { + + groups := make(map[string][]int64) + + v.EachInt64(func(index int, val int64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt64 uses the specified function to replace each int64s +// by iterating each item. The data in the returned result will be a +// []int64 containing the replaced items. +func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { + + arr := v.MustInt64Slice() + replaced := make([]int64, len(arr)) + + v.EachInt64(func(index int, val int64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt64 uses the specified collector function to collect a value +// for each of the int64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { + + arr := v.MustInt64Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt64(func(index int, val int64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint (uint and []uint) + -------------------------------------------------- +*/ + +// Uint gets the value as a uint, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint(optionalDefault ...uint) uint { + if s, ok := v.data.(uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint gets the value as a uint. +// +// Panics if the object is not a uint. +func (v *Value) MustUint() uint { + return v.data.(uint) +} + +// UintSlice gets the value as a []uint, returns the optionalDefault +// value or nil if the value is not a []uint. +func (v *Value) UintSlice(optionalDefault ...[]uint) []uint { + if s, ok := v.data.([]uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintSlice gets the value as a []uint. +// +// Panics if the object is not a []uint. +func (v *Value) MustUintSlice() []uint { + return v.data.([]uint) +} + +// IsUint gets whether the object contained is a uint or not. +func (v *Value) IsUint() bool { + _, ok := v.data.(uint) + return ok +} + +// IsUintSlice gets whether the object contained is a []uint or not. +func (v *Value) IsUintSlice() bool { + _, ok := v.data.([]uint) + return ok +} + +// EachUint calls the specified callback for each object +// in the []uint. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint(callback func(int, uint) bool) *Value { + + for index, val := range v.MustUintSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint uses the specified decider function to select items +// from the []uint. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint(decider func(int, uint) bool) *Value { + + var selected []uint + + v.EachUint(func(index int, val uint) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint. +func (v *Value) GroupUint(grouper func(int, uint) string) *Value { + + groups := make(map[string][]uint) + + v.EachUint(func(index int, val uint) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint uses the specified function to replace each uints +// by iterating each item. The data in the returned result will be a +// []uint containing the replaced items. +func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { + + arr := v.MustUintSlice() + replaced := make([]uint, len(arr)) + + v.EachUint(func(index int, val uint) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint uses the specified collector function to collect a value +// for each of the uints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { + + arr := v.MustUintSlice() + collected := make([]interface{}, len(arr)) + + v.EachUint(func(index int, val uint) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint8 (uint8 and []uint8) + -------------------------------------------------- +*/ + +// Uint8 gets the value as a uint8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint8(optionalDefault ...uint8) uint8 { + if s, ok := v.data.(uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint8 gets the value as a uint8. +// +// Panics if the object is not a uint8. +func (v *Value) MustUint8() uint8 { + return v.data.(uint8) +} + +// Uint8Slice gets the value as a []uint8, returns the optionalDefault +// value or nil if the value is not a []uint8. +func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 { + if s, ok := v.data.([]uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint8Slice gets the value as a []uint8. +// +// Panics if the object is not a []uint8. +func (v *Value) MustUint8Slice() []uint8 { + return v.data.([]uint8) +} + +// IsUint8 gets whether the object contained is a uint8 or not. +func (v *Value) IsUint8() bool { + _, ok := v.data.(uint8) + return ok +} + +// IsUint8Slice gets whether the object contained is a []uint8 or not. +func (v *Value) IsUint8Slice() bool { + _, ok := v.data.([]uint8) + return ok +} + +// EachUint8 calls the specified callback for each object +// in the []uint8. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { + + for index, val := range v.MustUint8Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint8 uses the specified decider function to select items +// from the []uint8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { + + var selected []uint8 + + v.EachUint8(func(index int, val uint8) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint8. +func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { + + groups := make(map[string][]uint8) + + v.EachUint8(func(index int, val uint8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint8 uses the specified function to replace each uint8s +// by iterating each item. The data in the returned result will be a +// []uint8 containing the replaced items. +func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { + + arr := v.MustUint8Slice() + replaced := make([]uint8, len(arr)) + + v.EachUint8(func(index int, val uint8) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint8 uses the specified collector function to collect a value +// for each of the uint8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { + + arr := v.MustUint8Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint8(func(index int, val uint8) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint16 (uint16 and []uint16) + -------------------------------------------------- +*/ + +// Uint16 gets the value as a uint16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint16(optionalDefault ...uint16) uint16 { + if s, ok := v.data.(uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint16 gets the value as a uint16. +// +// Panics if the object is not a uint16. +func (v *Value) MustUint16() uint16 { + return v.data.(uint16) +} + +// Uint16Slice gets the value as a []uint16, returns the optionalDefault +// value or nil if the value is not a []uint16. +func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 { + if s, ok := v.data.([]uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint16Slice gets the value as a []uint16. +// +// Panics if the object is not a []uint16. +func (v *Value) MustUint16Slice() []uint16 { + return v.data.([]uint16) +} + +// IsUint16 gets whether the object contained is a uint16 or not. +func (v *Value) IsUint16() bool { + _, ok := v.data.(uint16) + return ok +} + +// IsUint16Slice gets whether the object contained is a []uint16 or not. +func (v *Value) IsUint16Slice() bool { + _, ok := v.data.([]uint16) + return ok +} + +// EachUint16 calls the specified callback for each object +// in the []uint16. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { + + for index, val := range v.MustUint16Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint16 uses the specified decider function to select items +// from the []uint16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { + + var selected []uint16 + + v.EachUint16(func(index int, val uint16) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint16. +func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { + + groups := make(map[string][]uint16) + + v.EachUint16(func(index int, val uint16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint16 uses the specified function to replace each uint16s +// by iterating each item. The data in the returned result will be a +// []uint16 containing the replaced items. +func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { + + arr := v.MustUint16Slice() + replaced := make([]uint16, len(arr)) + + v.EachUint16(func(index int, val uint16) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint16 uses the specified collector function to collect a value +// for each of the uint16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { + + arr := v.MustUint16Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint16(func(index int, val uint16) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint32 (uint32 and []uint32) + -------------------------------------------------- +*/ + +// Uint32 gets the value as a uint32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint32(optionalDefault ...uint32) uint32 { + if s, ok := v.data.(uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint32 gets the value as a uint32. +// +// Panics if the object is not a uint32. +func (v *Value) MustUint32() uint32 { + return v.data.(uint32) +} + +// Uint32Slice gets the value as a []uint32, returns the optionalDefault +// value or nil if the value is not a []uint32. +func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 { + if s, ok := v.data.([]uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint32Slice gets the value as a []uint32. +// +// Panics if the object is not a []uint32. +func (v *Value) MustUint32Slice() []uint32 { + return v.data.([]uint32) +} + +// IsUint32 gets whether the object contained is a uint32 or not. +func (v *Value) IsUint32() bool { + _, ok := v.data.(uint32) + return ok +} + +// IsUint32Slice gets whether the object contained is a []uint32 or not. +func (v *Value) IsUint32Slice() bool { + _, ok := v.data.([]uint32) + return ok +} + +// EachUint32 calls the specified callback for each object +// in the []uint32. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { + + for index, val := range v.MustUint32Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint32 uses the specified decider function to select items +// from the []uint32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { + + var selected []uint32 + + v.EachUint32(func(index int, val uint32) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint32. +func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { + + groups := make(map[string][]uint32) + + v.EachUint32(func(index int, val uint32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint32 uses the specified function to replace each uint32s +// by iterating each item. The data in the returned result will be a +// []uint32 containing the replaced items. +func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { + + arr := v.MustUint32Slice() + replaced := make([]uint32, len(arr)) + + v.EachUint32(func(index int, val uint32) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint32 uses the specified collector function to collect a value +// for each of the uint32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { + + arr := v.MustUint32Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint32(func(index int, val uint32) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint64 (uint64 and []uint64) + -------------------------------------------------- +*/ + +// Uint64 gets the value as a uint64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint64(optionalDefault ...uint64) uint64 { + if s, ok := v.data.(uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint64 gets the value as a uint64. +// +// Panics if the object is not a uint64. +func (v *Value) MustUint64() uint64 { + return v.data.(uint64) +} + +// Uint64Slice gets the value as a []uint64, returns the optionalDefault +// value or nil if the value is not a []uint64. +func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 { + if s, ok := v.data.([]uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint64Slice gets the value as a []uint64. +// +// Panics if the object is not a []uint64. +func (v *Value) MustUint64Slice() []uint64 { + return v.data.([]uint64) +} + +// IsUint64 gets whether the object contained is a uint64 or not. +func (v *Value) IsUint64() bool { + _, ok := v.data.(uint64) + return ok +} + +// IsUint64Slice gets whether the object contained is a []uint64 or not. +func (v *Value) IsUint64Slice() bool { + _, ok := v.data.([]uint64) + return ok +} + +// EachUint64 calls the specified callback for each object +// in the []uint64. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { + + for index, val := range v.MustUint64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint64 uses the specified decider function to select items +// from the []uint64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { + + var selected []uint64 + + v.EachUint64(func(index int, val uint64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint64. +func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { + + groups := make(map[string][]uint64) + + v.EachUint64(func(index int, val uint64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint64 uses the specified function to replace each uint64s +// by iterating each item. The data in the returned result will be a +// []uint64 containing the replaced items. +func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { + + arr := v.MustUint64Slice() + replaced := make([]uint64, len(arr)) + + v.EachUint64(func(index int, val uint64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint64 uses the specified collector function to collect a value +// for each of the uint64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { + + arr := v.MustUint64Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint64(func(index int, val uint64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uintptr (uintptr and []uintptr) + -------------------------------------------------- +*/ + +// Uintptr gets the value as a uintptr, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr { + if s, ok := v.data.(uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUintptr gets the value as a uintptr. +// +// Panics if the object is not a uintptr. +func (v *Value) MustUintptr() uintptr { + return v.data.(uintptr) +} + +// UintptrSlice gets the value as a []uintptr, returns the optionalDefault +// value or nil if the value is not a []uintptr. +func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr { + if s, ok := v.data.([]uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintptrSlice gets the value as a []uintptr. +// +// Panics if the object is not a []uintptr. +func (v *Value) MustUintptrSlice() []uintptr { + return v.data.([]uintptr) +} + +// IsUintptr gets whether the object contained is a uintptr or not. +func (v *Value) IsUintptr() bool { + _, ok := v.data.(uintptr) + return ok +} + +// IsUintptrSlice gets whether the object contained is a []uintptr or not. +func (v *Value) IsUintptrSlice() bool { + _, ok := v.data.([]uintptr) + return ok +} + +// EachUintptr calls the specified callback for each object +// in the []uintptr. +// +// Panics if the object is the wrong type. +func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { + + for index, val := range v.MustUintptrSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUintptr uses the specified decider function to select items +// from the []uintptr. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { + + var selected []uintptr + + v.EachUintptr(func(index int, val uintptr) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUintptr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uintptr. +func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { + + groups := make(map[string][]uintptr) + + v.EachUintptr(func(index int, val uintptr) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uintptr, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUintptr uses the specified function to replace each uintptrs +// by iterating each item. The data in the returned result will be a +// []uintptr containing the replaced items. +func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { + + arr := v.MustUintptrSlice() + replaced := make([]uintptr, len(arr)) + + v.EachUintptr(func(index int, val uintptr) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUintptr uses the specified collector function to collect a value +// for each of the uintptrs in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { + + arr := v.MustUintptrSlice() + collected := make([]interface{}, len(arr)) + + v.EachUintptr(func(index int, val uintptr) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Float32 (float32 and []float32) + -------------------------------------------------- +*/ + +// Float32 gets the value as a float32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float32(optionalDefault ...float32) float32 { + if s, ok := v.data.(float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat32 gets the value as a float32. +// +// Panics if the object is not a float32. +func (v *Value) MustFloat32() float32 { + return v.data.(float32) +} + +// Float32Slice gets the value as a []float32, returns the optionalDefault +// value or nil if the value is not a []float32. +func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 { + if s, ok := v.data.([]float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat32Slice gets the value as a []float32. +// +// Panics if the object is not a []float32. +func (v *Value) MustFloat32Slice() []float32 { + return v.data.([]float32) +} + +// IsFloat32 gets whether the object contained is a float32 or not. +func (v *Value) IsFloat32() bool { + _, ok := v.data.(float32) + return ok +} + +// IsFloat32Slice gets whether the object contained is a []float32 or not. +func (v *Value) IsFloat32Slice() bool { + _, ok := v.data.([]float32) + return ok +} + +// EachFloat32 calls the specified callback for each object +// in the []float32. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { + + for index, val := range v.MustFloat32Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereFloat32 uses the specified decider function to select items +// from the []float32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { + + var selected []float32 + + v.EachFloat32(func(index int, val float32) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupFloat32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float32. +func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { + + groups := make(map[string][]float32) + + v.EachFloat32(func(index int, val float32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceFloat32 uses the specified function to replace each float32s +// by iterating each item. The data in the returned result will be a +// []float32 containing the replaced items. +func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { + + arr := v.MustFloat32Slice() + replaced := make([]float32, len(arr)) + + v.EachFloat32(func(index int, val float32) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectFloat32 uses the specified collector function to collect a value +// for each of the float32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { + + arr := v.MustFloat32Slice() + collected := make([]interface{}, len(arr)) + + v.EachFloat32(func(index int, val float32) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Float64 (float64 and []float64) + -------------------------------------------------- +*/ + +// Float64 gets the value as a float64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float64(optionalDefault ...float64) float64 { + if s, ok := v.data.(float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat64 gets the value as a float64. +// +// Panics if the object is not a float64. +func (v *Value) MustFloat64() float64 { + return v.data.(float64) +} + +// Float64Slice gets the value as a []float64, returns the optionalDefault +// value or nil if the value is not a []float64. +func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 { + if s, ok := v.data.([]float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat64Slice gets the value as a []float64. +// +// Panics if the object is not a []float64. +func (v *Value) MustFloat64Slice() []float64 { + return v.data.([]float64) +} + +// IsFloat64 gets whether the object contained is a float64 or not. +func (v *Value) IsFloat64() bool { + _, ok := v.data.(float64) + return ok +} + +// IsFloat64Slice gets whether the object contained is a []float64 or not. +func (v *Value) IsFloat64Slice() bool { + _, ok := v.data.([]float64) + return ok +} + +// EachFloat64 calls the specified callback for each object +// in the []float64. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { + + for index, val := range v.MustFloat64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereFloat64 uses the specified decider function to select items +// from the []float64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { + + var selected []float64 + + v.EachFloat64(func(index int, val float64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupFloat64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float64. +func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { + + groups := make(map[string][]float64) + + v.EachFloat64(func(index int, val float64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceFloat64 uses the specified function to replace each float64s +// by iterating each item. The data in the returned result will be a +// []float64 containing the replaced items. +func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { + + arr := v.MustFloat64Slice() + replaced := make([]float64, len(arr)) + + v.EachFloat64(func(index int, val float64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectFloat64 uses the specified collector function to collect a value +// for each of the float64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { + + arr := v.MustFloat64Slice() + collected := make([]interface{}, len(arr)) + + v.EachFloat64(func(index int, val float64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Complex64 (complex64 and []complex64) + -------------------------------------------------- +*/ + +// Complex64 gets the value as a complex64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex64(optionalDefault ...complex64) complex64 { + if s, ok := v.data.(complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex64 gets the value as a complex64. +// +// Panics if the object is not a complex64. +func (v *Value) MustComplex64() complex64 { + return v.data.(complex64) +} + +// Complex64Slice gets the value as a []complex64, returns the optionalDefault +// value or nil if the value is not a []complex64. +func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 { + if s, ok := v.data.([]complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex64Slice gets the value as a []complex64. +// +// Panics if the object is not a []complex64. +func (v *Value) MustComplex64Slice() []complex64 { + return v.data.([]complex64) +} + +// IsComplex64 gets whether the object contained is a complex64 or not. +func (v *Value) IsComplex64() bool { + _, ok := v.data.(complex64) + return ok +} + +// IsComplex64Slice gets whether the object contained is a []complex64 or not. +func (v *Value) IsComplex64Slice() bool { + _, ok := v.data.([]complex64) + return ok +} + +// EachComplex64 calls the specified callback for each object +// in the []complex64. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { + + for index, val := range v.MustComplex64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereComplex64 uses the specified decider function to select items +// from the []complex64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { + + var selected []complex64 + + v.EachComplex64(func(index int, val complex64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupComplex64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex64. +func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { + + groups := make(map[string][]complex64) + + v.EachComplex64(func(index int, val complex64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceComplex64 uses the specified function to replace each complex64s +// by iterating each item. The data in the returned result will be a +// []complex64 containing the replaced items. +func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { + + arr := v.MustComplex64Slice() + replaced := make([]complex64, len(arr)) + + v.EachComplex64(func(index int, val complex64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectComplex64 uses the specified collector function to collect a value +// for each of the complex64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { + + arr := v.MustComplex64Slice() + collected := make([]interface{}, len(arr)) + + v.EachComplex64(func(index int, val complex64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Complex128 (complex128 and []complex128) + -------------------------------------------------- +*/ + +// Complex128 gets the value as a complex128, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex128(optionalDefault ...complex128) complex128 { + if s, ok := v.data.(complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex128 gets the value as a complex128. +// +// Panics if the object is not a complex128. +func (v *Value) MustComplex128() complex128 { + return v.data.(complex128) +} + +// Complex128Slice gets the value as a []complex128, returns the optionalDefault +// value or nil if the value is not a []complex128. +func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 { + if s, ok := v.data.([]complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex128Slice gets the value as a []complex128. +// +// Panics if the object is not a []complex128. +func (v *Value) MustComplex128Slice() []complex128 { + return v.data.([]complex128) +} + +// IsComplex128 gets whether the object contained is a complex128 or not. +func (v *Value) IsComplex128() bool { + _, ok := v.data.(complex128) + return ok +} + +// IsComplex128Slice gets whether the object contained is a []complex128 or not. +func (v *Value) IsComplex128Slice() bool { + _, ok := v.data.([]complex128) + return ok +} + +// EachComplex128 calls the specified callback for each object +// in the []complex128. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { + + for index, val := range v.MustComplex128Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereComplex128 uses the specified decider function to select items +// from the []complex128. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { + + var selected []complex128 + + v.EachComplex128(func(index int, val complex128) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupComplex128 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex128. +func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { + + groups := make(map[string][]complex128) + + v.EachComplex128(func(index int, val complex128) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex128, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceComplex128 uses the specified function to replace each complex128s +// by iterating each item. The data in the returned result will be a +// []complex128 containing the replaced items. +func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { + + arr := v.MustComplex128Slice() + replaced := make([]complex128, len(arr)) + + v.EachComplex128(func(index int, val complex128) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectComplex128 uses the specified collector function to collect a value +// for each of the complex128s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { + + arr := v.MustComplex128Slice() + collected := make([]interface{}, len(arr)) + + v.EachComplex128(func(index int, val complex128) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} diff --git a/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value.go b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value.go new file mode 100644 index 00000000..7aaef06b --- /dev/null +++ b/src/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value.go @@ -0,0 +1,13 @@ +package objx + +// Value provides methods for extracting interface{} data in various +// types. +type Value struct { + // data contains the raw data being managed by this Value + data interface{} +} + +// Data returns the raw data contained by this Value +func (v *Value) Data() interface{} { + return v.data +} diff --git a/src/vendor/github.com/xelabs/go-mydumper/.gitignore b/src/vendor/github.com/xelabs/go-mydumper/.gitignore new file mode 100644 index 00000000..860ba155 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mydumper/.gitignore @@ -0,0 +1,6 @@ +bin/* +pkg/* +src/github.com/ + +tags +coverage.out diff --git a/src/vendor/github.com/xelabs/go-mydumper/.travis.yml b/src/vendor/github.com/xelabs/go-mydumper/.travis.yml new file mode 100644 index 00000000..ea706b6d --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mydumper/.travis.yml @@ -0,0 +1,16 @@ +language: go +sudo: required +go: + - 1.8 + +before_install: + - go get github.com/pierrre/gotestcover + - go get github.com/stretchr/testify/assert + +script: + - make + - make coverage + +after_success: + # send coverage reports to Codecov + - bash <(curl -s https://codecov.io/bash) -f "!mock.go" diff --git a/src/vendor/github.com/xelabs/go-mydumper/LICENSE b/src/vendor/github.com/xelabs/go-mydumper/LICENSE new file mode 100644 index 00000000..9cecc1d4 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mydumper/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + {one line to give the program's name and a brief idea of what it does.} + Copyright (C) {year} {name of author} + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + {project} Copyright (C) {year} {fullname} + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/src/vendor/github.com/xelabs/go-mydumper/README.md b/src/vendor/github.com/xelabs/go-mydumper/README.md new file mode 100644 index 00000000..0e24837c --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mydumper/README.md @@ -0,0 +1,302 @@ +[![Build Status](https://travis-ci.org/XeLabs/go-mydumper.png)](https://travis-ci.org/XeLabs/go-mydumper) [![Go Report Card](https://goreportcard.com/badge/github.com/XeLabs/go-mydumper)](https://goreportcard.com/report/github.com/XeLabs/go-mydumper) [![codecov.io](https://codecov.io/gh/XeLabs/go-mydumper/graphs/badge.svg)](https://codecov.io/gh/XeLabs/go-mydumper/branch/master) + +# go-mydumper + +***go-mydumper*** is a multi-threaded MySQL backup and restore tool, and it is compatible with [maxbube/mydumper](https://github.com/maxbube/mydumper) in the layout. + + +## Build + +``` +$git clone https://github.com/XeLabs/go-mydumper +$cd go-mydumper +$make +$./bin/mydumper --help +$./bin/myloader --help +$./bin/mystreamer --help +``` + +## Test + +``` +$make test +``` + +## Usage + +### mydumper + +``` +./bin/mydumper --help +Usage: ./bin/mydumper -h [HOST] -P [PORT] -u [USER] -p [PASSWORD] -db [DATABASE] -o [OUTDIR] + -F int + Split tables into chunks of this output file size. This value is in MB (default 128) + -P int + TCP/IP port to connect to (default 3306) + -db string + Database to dump + -h string + The host to connect to + -o string + Directory to output files to + -p string + User password + -s int + Attempted size of INSERT statement in bytes (default 1000000) + -t int + Number of threads to use (default 16) + -table string + Table to dump + -u string + Username with privileges to run the dump + +Examples: +$./bin/mydumper -h 192.168.0.1 -P 3306 -u mock -p mock -db sbtest -o sbtest.sql + 2017/10/25 13:12:52.933391 dumper.go:35: [INFO] dumping.database[sbtest].schema... + 2017/10/25 13:12:52.937743 dumper.go:45: [INFO] dumping.table[sbtest.benchyou0].schema... + 2017/10/25 13:12:52.937791 dumper.go:168: [INFO] dumping.table[sbtest.benchyou0].datas.thread[1]... + 2017/10/25 13:12:52.939008 dumper.go:45: [INFO] dumping.table[sbtest.benchyou1].schema... + 2017/10/25 13:12:52.939055 dumper.go:168: [INFO] dumping.table[sbtest.benchyou1].datas.thread[2]... + 2017/10/25 13:12:55.611905 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[633987].bytes[128MB].part[1].thread[1] + 2017/10/25 13:12:55.765127 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[633987].bytes[128MB].part[1].thread[2] + 2017/10/25 13:12:58.146093 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[1266050].bytes[256MB].part[2].thread[1] + 2017/10/25 13:12:58.253219 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[1266054].bytes[256MB].part[2].thread[2] + 2017/10/25 13:13:00.545536 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[1896681].bytes[384MB].part[3].thread[1] + 2017/10/25 13:13:00.669499 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[1896682].bytes[384MB].part[3].thread[2] + 2017/10/25 13:13:02.939278 dumper.go:182: [INFO] dumping.allbytes[1024MB].allrows[5054337].time[10.01sec].rates[102.34MB/sec]... + 2017/10/25 13:13:03.012645 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[2527168].bytes[512MB].part[4].thread[1] + 2017/10/25 13:13:03.098937 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[2527169].bytes[512MB].part[4].thread[2] + 2017/10/25 13:13:05.344051 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[3157657].bytes[640MB].part[5].thread[1] + 2017/10/25 13:13:05.429411 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[3157655].bytes[640MB].part[5].thread[2] + 2017/10/25 13:13:07.649915 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[3788144].bytes[768MB].part[6].thread[2] + 2017/10/25 13:13:07.709657 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[3788144].bytes[768MB].part[6].thread[1] + 2017/10/25 13:13:09.839178 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[4418631].bytes[896MB].part[7].thread[2] + 2017/10/25 13:13:09.899665 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[4418633].bytes[896MB].part[7].thread[1] + 2017/10/25 13:13:12.046545 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[5049118].bytes[1024MB].part[8].thread[2] + 2017/10/25 13:13:12.109284 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[5049121].bytes[1024MB].part[8].thread[1] + 2017/10/25 13:13:12.939307 dumper.go:182: [INFO] dumping.allbytes[2150MB].allrows[10604068].time[20.01sec].rates[107.47MB/sec]... + 2017/10/25 13:13:14.404759 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[5679606].bytes[1152MB].part[9].thread[2] + 2017/10/25 13:13:14.467313 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[5679607].bytes[1152MB].part[9].thread[1] + 2017/10/25 13:13:16.788106 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[6310093].bytes[1280MB].part[10].thread[2] + 2017/10/25 13:13:16.894142 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[6310093].bytes[1280MB].part[10].thread[1] + 2017/10/25 13:13:19.125115 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[6940580].bytes[1408MB].part[11].thread[2] + 2017/10/25 13:13:19.196091 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[6940582].bytes[1408MB].part[11].thread[1] + 2017/10/25 13:13:21.401179 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[7571067].bytes[1536MB].part[12].thread[2] + 2017/10/25 13:13:21.489994 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[7571070].bytes[1536MB].part[12].thread[1] + 2017/10/25 13:13:22.939262 dumper.go:182: [INFO] dumping.allbytes[3256MB].allrows[16050014].time[30.01sec].rates[108.51MB/sec]... + 2017/10/25 13:13:23.705833 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[8201555].bytes[1664MB].part[13].thread[2] + 2017/10/25 13:13:23.796722 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[8201556].bytes[1664MB].part[13].thread[1] + 2017/10/25 13:13:26.088441 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[8832041].bytes[1792MB].part[14].thread[2] + 2017/10/25 13:13:26.185251 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[8832044].bytes[1792MB].part[14].thread[1] + 2017/10/25 13:13:28.338164 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[9462159].bytes[1920MB].part[15].thread[2] + 2017/10/25 13:13:28.392105 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[9462164].bytes[1920MB].part[15].thread[1] + 2017/10/25 13:13:30.658287 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[10089992].bytes[2048MB].part[16].thread[2] + 2017/10/25 13:13:30.708556 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[10089998].bytes[2048MB].part[16].thread[1] + 2017/10/25 13:13:32.939424 dumper.go:182: [INFO] dumping.allbytes[4352MB].allrows[21435655].time[40.01sec].rates[108.78MB/sec]... + 2017/10/25 13:13:32.964288 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[10717825].bytes[2176MB].part[17].thread[2] + 2017/10/25 13:13:33.110688 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[10717830].bytes[2176MB].part[17].thread[1] + 2017/10/25 13:13:35.408958 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[11345661].bytes[2304MB].part[18].thread[1] + 2017/10/25 13:13:35.496439 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[11345659].bytes[2304MB].part[18].thread[2] + 2017/10/25 13:13:37.627178 dumper.go:105: [INFO] dumping.table[sbtest.benchyou0].rows[11974624].bytes[2432MB].part[19].thread[1] + 2017/10/25 13:13:37.753966 dumper.go:105: [INFO] dumping.table[sbtest.benchyou1].rows[11974630].bytes[2432MB].part[19].thread[2] + 2017/10/25 13:13:39.453430 dumper.go:122: [INFO] dumping.table[sbtest.benchyou0].done.allrows[12486842].allbytes[2536MB].thread[1]... + 2017/10/25 13:13:39.453462 dumper.go:170: [INFO] dumping.table[sbtest.benchyou0].datas.thread[1].done... + 2017/10/25 13:13:39.622390 dumper.go:122: [INFO] dumping.table[sbtest.benchyou1].done.allrows[12484135].allbytes[2535MB].thread[2]... + 2017/10/25 13:13:39.622423 dumper.go:170: [INFO] dumping.table[sbtest.benchyou1].datas.thread[2].done... + 2017/10/25 13:13:39.622454 dumper.go:188: [INFO] dumping.all.done.cost[46.69sec].allrows[24970977].allbytes[5318557708].rate[108.63MB/s] +``` + +The dump files: +``` +$ ls sbtest.sql/ +metadata sbtest.benchyou0.00009.sql sbtest.benchyou0.00018.sql sbtest.benchyou1.00006.sql sbtest.benchyou1.00015.sql +sbtest.benchyou0.00001.sql sbtest.benchyou0.00010.sql sbtest.benchyou0.00019.sql sbtest.benchyou1.00007.sql sbtest.benchyou1.00016.sql +sbtest.benchyou0.00002.sql sbtest.benchyou0.00011.sql sbtest.benchyou0.00020.sql sbtest.benchyou1.00008.sql sbtest.benchyou1.00017.sql +sbtest.benchyou0.00003.sql sbtest.benchyou0.00012.sql sbtest.benchyou0-schema.sql sbtest.benchyou1.00009.sql sbtest.benchyou1.00018.sql +sbtest.benchyou0.00004.sql sbtest.benchyou0.00013.sql sbtest.benchyou1.00001.sql sbtest.benchyou1.00010.sql sbtest.benchyou1.00019.sql +sbtest.benchyou0.00005.sql sbtest.benchyou0.00014.sql sbtest.benchyou1.00002.sql sbtest.benchyou1.00011.sql sbtest.benchyou1.00020.sql +sbtest.benchyou0.00006.sql sbtest.benchyou0.00015.sql sbtest.benchyou1.00003.sql sbtest.benchyou1.00012.sql sbtest.benchyou1-schema.sql +sbtest.benchyou0.00007.sql sbtest.benchyou0.00016.sql sbtest.benchyou1.00004.sql sbtest.benchyou1.00013.sql sbtest-schema-create.sql +sbtest.benchyou0.00008.sql sbtest.benchyou0.00017.sql sbtest.benchyou1.00005.sql sbtest.benchyou1.00014.sql +``` + +### myloader + +``` +$ ./bin/myloader --help +Usage: ./bin/myloader -h [HOST] -P [PORT] -u [USER] -p [PASSWORD] -d [DIR] + -P int + TCP/IP port to connect to (default 3306) + -d string + Directory of the dump to import + -h string + The host to connect to + -p string + User password + -t int + Number of threads to use (default 16) + -u string + Username with privileges to run the loader + +Examples: +$./bin/myloader -h 192.168.0.2 -P 3306 -u mock -p mock -d sbtest.sql + 2017/10/25 13:04:17.396002 loader.go:75: [INFO] restoring.database[sbtest] + 2017/10/25 13:04:17.458076 loader.go:99: [INFO] restoring.schema[sbtest.benchyou0] + 2017/10/25 13:04:17.516236 loader.go:99: [INFO] restoring.schema[sbtest.benchyou1] + 2017/10/25 13:04:17.516389 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00015].thread[1] + 2017/10/25 13:04:17.516456 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00005].thread[2] + 2017/10/25 13:04:17.516486 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00020].thread[3] + 2017/10/25 13:04:17.516523 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00009].thread[4] + 2017/10/25 13:04:17.516550 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00018].thread[5] + 2017/10/25 13:04:17.516572 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00020].thread[6] + 2017/10/25 13:04:17.516606 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00019].thread[7] + 2017/10/25 13:04:17.516655 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00002].thread[8] + 2017/10/25 13:04:17.516692 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00011].thread[9] + 2017/10/25 13:04:17.516718 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00009].thread[10] + 2017/10/25 13:04:17.516739 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00017].thread[11] + 2017/10/25 13:04:17.516772 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00010].thread[12] + 2017/10/25 13:04:17.516797 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00008].thread[13] + 2017/10/25 13:04:17.516818 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00002].thread[14] + 2017/10/25 13:04:17.516854 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00003].thread[15] + 2017/10/25 13:04:17.516893 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00013].thread[0] + 2017/10/25 13:04:39.820177 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00020].thread[3].done... + 2017/10/25 13:04:39.820255 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00012].thread[3] + 2017/10/25 13:04:41.483128 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00020].thread[6].done... + 2017/10/25 13:04:41.483198 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00010].thread[6] + 2017/10/25 13:04:46.865340 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00015].thread[1].done... + 2017/10/25 13:04:46.865421 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00014].thread[1] + 2017/10/25 13:04:47.211336 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00005].thread[2].done... + 2017/10/25 13:04:47.211415 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00007].thread[2] + 2017/10/25 13:04:47.399552 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00009].thread[10].done... + 2017/10/25 13:04:47.399638 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00007].thread[10] + 2017/10/25 13:04:47.835980 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00002].thread[8].done... + 2017/10/25 13:04:47.836068 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00003].thread[8] + 2017/10/25 13:04:49.079171 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00009].thread[4].done... + 2017/10/25 13:04:49.079249 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00015].thread[4] + 2017/10/25 13:04:49.279380 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00017].thread[11].done... + 2017/10/25 13:04:49.279457 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00018].thread[11] + 2017/10/25 13:04:49.539250 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00008].thread[13].done... + 2017/10/25 13:04:49.539329 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00012].thread[13] + 2017/10/25 13:04:49.574229 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00018].thread[5].done... + 2017/10/25 13:04:49.574314 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00016].thread[5] + 2017/10/25 13:04:49.834601 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00010].thread[12].done... + 2017/10/25 13:04:49.834682 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00006].thread[12] +2017/10/25 13:04:49.834682 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00006].thread[12] + 2017/10/25 13:04:49.851738 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00002].thread[14].done... + 2017/10/25 13:04:49.851815 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00017].thread[14] + 2017/10/25 13:04:50.101755 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00019].thread[7].done... + 2017/10/25 13:04:50.101831 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00004].thread[7] + 2017/10/25 13:04:50.476413 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00013].thread[0].done... + 2017/10/25 13:04:50.476499 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00001].thread[0] + 2017/10/25 13:04:50.667836 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00003].thread[15].done... + 2017/10/25 13:04:50.667916 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00013].thread[15] + 2017/10/25 13:04:50.702259 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00011].thread[9].done... + 2017/10/25 13:04:50.702397 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00005].thread[9] + 2017/10/25 13:05:17.111718 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00012].thread[3].done... + 2017/10/25 13:05:17.111804 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00001].thread[3] + 2017/10/25 13:05:17.374860 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00010].thread[6].done... + 2017/10/25 13:05:17.374944 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00016].thread[6] + 2017/10/25 13:05:21.331044 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00007].thread[2].done... + 2017/10/25 13:05:21.331130 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00014].thread[2] + 2017/10/25 13:05:23.707516 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00014].thread[1].done... + 2017/10/25 13:05:23.707602 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00011].thread[1] + 2017/10/25 13:05:23.857451 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00007].thread[10].done... + 2017/10/25 13:05:23.857561 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00008].thread[10] + 2017/10/25 13:05:24.975366 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00001].thread[0].done... + 2017/10/25 13:05:24.975446 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00004].thread[0] + 2017/10/25 13:05:25.026926 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00003].thread[8].done... + 2017/10/25 13:05:25.027012 loader.go:115: [INFO] restoring.tables[benchyou0].parts[00019].thread[8] + 2017/10/25 13:05:26.133469 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00018].thread[11].done... + 2017/10/25 13:05:26.133612 loader.go:115: [INFO] restoring.tables[benchyou1].parts[00006].thread[11] + 2017/10/25 13:05:26.696523 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00015].thread[4].done... + 2017/10/25 13:05:26.855668 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00012].thread[13].done... + 2017/10/25 13:05:26.878910 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00006].thread[12].done... + 2017/10/25 13:05:26.979248 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00016].thread[5].done... + 2017/10/25 13:05:26.995089 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00017].thread[14].done... + 2017/10/25 13:05:27.107230 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00004].thread[7].done... + 2017/10/25 13:05:27.160083 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00013].thread[15].done... + 2017/10/25 13:05:27.783560 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00005].thread[9].done... + 2017/10/25 13:05:36.133758 loader.go:181: [INFO] restoring.allbytes[4087MB].time[78.62sec].rates[51.99MB/sec]... + 2017/10/25 13:05:44.759183 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00001].thread[3].done... + 2017/10/25 13:05:46.133728 loader.go:181: [INFO] restoring.allbytes[4216MB].time[88.62sec].rates[47.58MB/sec]... + 2017/10/25 13:05:46.567156 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00016].thread[6].done... + 2017/10/25 13:05:50.612200 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00008].thread[10].done... + 2017/10/25 13:05:51.131155 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00014].thread[2].done... + 2017/10/25 13:05:51.185629 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00011].thread[1].done... + 2017/10/25 13:05:51.836354 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00004].thread[0].done... + 2017/10/25 13:05:52.286931 loader.go:131: [INFO] restoring.tables[benchyou1].parts[00006].thread[11].done... + 2017/10/25 13:05:52.602444 loader.go:131: [INFO] restoring.tables[benchyou0].parts[00019].thread[8].done... + 2017/10/25 13:05:52.602573 loader.go:187: [INFO] restoring.all.done.cost[95.09sec].allbytes[5120.00MB].rate[53.85MB/s] +``` + +### mystreamer + +Streaming mode, dumps datas from upstream to downstream in parallel instead of dumping to the out directory. +``` +$./bin/mystreamer +Usage: ./bin/mystreamer -h [HOST] -P [PORT] -u [USER] -p [PASSWORD] -db [DATABASE] -2h [DOWNSTREAM-HOST] -2P [DOWNSTREAM-PORT] -2u [DOWNSTREAM-USER] -2p [DOWNSTREAM-PASSWORD] [-2db DOWNSTREAM-DATABASE] [-o] + -2P int + Downstream TCP/IP port to connect to (default 3306) + -2db string + Downstream database, default is same as upstream db + -2h string + The downstream host to connect to + -2p string + Downstream user password + -2u string + Downstream username with privileges to run the streamer + -P int + Upstream TCP/IP port to connect to (default 3306) + -db string + Database to stream + -h string + The upstream host to connect to + -o Drop tables if they already exist + -p string + Upstream user password + -s int + Attempted size of INSERT statement in bytes (default 1000000) + -t int + Number of threads to use (default 16) + -table string + Table to stream + -u string + Upstream username with privileges to run the streamer + +Examples: +$./bin/mystreamer -h 192.168.0.2 -P 3306 -u mock -p mock -2h 192.168.0.3 -2P 3306 -2u mock -2p mock -db sbtest + 2017/11/17 23:52:46.424107 streamer.go:30: [INFO] streaming.database[sbtest].schema... + 2017/11/17 23:52:46.458807 streamer.go:50: [INFO] streaming.table[sbtest.benchyou0].schema... + 2017/11/17 23:52:46.458892 streamer.go:165: [INFO] streaming.table[sbtest.benchyou0].datas.thread[1]... + 2017/11/17 23:52:46.509911 streamer.go:50: [INFO] streaming.table[sbtest.benchyou1].schema... + 2017/11/17 23:52:46.510008 streamer.go:165: [INFO] streaming.table[sbtest.benchyou1].datas.thread[2]... + 2017/11/17 23:52:46.557443 streamer.go:50: [INFO] streaming.table[sbtest.benchyou2].schema... + 2017/11/17 23:52:46.557499 streamer.go:165: [INFO] streaming.table[sbtest.benchyou2].datas.thread[3]... + 2017/11/17 23:52:46.608464 streamer.go:50: [INFO] streaming.table[sbtest.benchyou3].schema... + 2017/11/17 23:52:46.608526 streamer.go:165: [INFO] streaming.table[sbtest.benchyou3].datas.thread[4]... + 2017/11/17 23:52:46.667519 streamer.go:50: [INFO] streaming.table[sbtest.benchyou4].schema... + 2017/11/17 23:52:46.667584 streamer.go:165: [INFO] streaming.table[sbtest.benchyou4].datas.thread[5]... + 2017/11/17 23:52:46.807810 streamer.go:50: [INFO] streaming.table[sbtest.benchyou5].schema... + 2017/11/17 23:52:46.808157 streamer.go:165: [INFO] streaming.table[sbtest.benchyou5].datas.thread[6]... + 2017/11/17 23:52:46.946838 streamer.go:50: [INFO] streaming.table[sbtest.benchyou6].schema... + 2017/11/17 23:52:46.946902 streamer.go:165: [INFO] streaming.table[sbtest.benchyou6].datas.thread[7]... + 2017/11/17 23:52:47.114519 streamer.go:50: [INFO] streaming.table[sbtest.benchyou7].schema... + 2017/11/17 23:52:47.114609 streamer.go:165: [INFO] streaming.table[sbtest.benchyou7].datas.thread[8]... + 2017/11/17 23:52:52.198910 streamer.go:121: [INFO] streaming.table[sbtest.benchyou0].done.allrows[133342].allbytes[28MB].thread[1]... + 2017/11/17 23:52:52.198965 streamer.go:167: [INFO] streaming.table[sbtest.benchyou0].datas.thread[1].done... + 2017/11/17 23:52:52.728184 streamer.go:121: [INFO] streaming.table[sbtest.benchyou1].done.allrows[133875].allbytes[28MB].thread[2]... + 2017/11/17 23:52:52.728221 streamer.go:167: [INFO] streaming.table[sbtest.benchyou1].datas.thread[2].done... + 2017/11/17 23:52:52.774523 streamer.go:121: [INFO] streaming.table[sbtest.benchyou3].done.allrows[133459].allbytes[28MB].thread[4]... + 2017/11/17 23:52:52.774792 streamer.go:167: [INFO] streaming.table[sbtest.benchyou3].datas.thread[4].done... + 2017/11/17 23:52:52.887324 streamer.go:121: [INFO] streaming.table[sbtest.benchyou2].done.allrows[133849].allbytes[28MB].thread[3]... + 2017/11/17 23:52:52.887564 streamer.go:167: [INFO] streaming.table[sbtest.benchyou2].datas.thread[3].done... + 2017/11/17 23:52:53.056884 streamer.go:121: [INFO] streaming.table[sbtest.benchyou4].done.allrows[133995].allbytes[28MB].thread[5]... + 2017/11/17 23:52:53.056923 streamer.go:167: [INFO] streaming.table[sbtest.benchyou4].datas.thread[5].done... + 2017/11/17 23:52:53.319846 streamer.go:121: [INFO] streaming.table[sbtest.benchyou5].done.allrows[134490].allbytes[28MB].thread[6]... + 2017/11/17 23:52:53.319881 streamer.go:167: [INFO] streaming.table[sbtest.benchyou5].datas.thread[6].done... + 2017/11/17 23:52:53.338505 streamer.go:121: [INFO] streaming.table[sbtest.benchyou6].done.allrows[134035].allbytes[28MB].thread[7]... + 2017/11/17 23:52:53.338553 streamer.go:167: [INFO] streaming.table[sbtest.benchyou6].datas.thread[7].done... + 2017/11/17 23:52:53.465916 streamer.go:121: [INFO] streaming.table[sbtest.benchyou7].done.allrows[133240].allbytes[28MB].thread[8]... + 2017/11/17 23:52:53.465958 streamer.go:167: [INFO] streaming.table[sbtest.benchyou7].datas.thread[8].done... + 2017/11/17 23:52:53.465999 streamer.go:185: [INFO] streaming.all.done.cost[7.04sec].allrows[1070285].allbytes[240555976].rate[32.52MB/s] +``` diff --git a/src/vendor/github.com/xelabs/go-mydumper/makefile b/src/vendor/github.com/xelabs/go-mydumper/makefile new file mode 100644 index 00000000..db5ed986 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mydumper/makefile @@ -0,0 +1,42 @@ +export GOPATH := $(shell pwd) +export PATH := $(GOPATH)/bin:$(PATH) + +all: get build test + +get: + @echo "--> go get..." + go get github.com/XeLabs/go-mysqlstack/driver + go get github.com/stretchr/testify/assert + go get github.com/pierrre/gotestcover + +build: + @echo "--> Building..." + go build -v -o bin/mydumper src/mydumper/main.go + go build -v -o bin/myloader src/myloader/main.go + go build -v -o bin/mystreamer src/mystreamer/main.go + @chmod 755 bin/* + +clean: + @echo "--> Cleaning..." + @go clean + @rm -f bin/* + +fmt: + go fmt ./... + go vet ./... + +test: + @echo "--> Testing..." + @$(MAKE) testcommon + +testcommon: + go test -race -v common + +# code coverage +COVPKGS = common +coverage: + go build -v -o bin/gotestcover \ + src/github.com/pierrre/gotestcover/*.go; + gotestcover -coverprofile=coverage.out -v $(COVPKGS) + go tool cover -html=coverage.out +.PHONY: get build clean fmt test coverage diff --git a/src/vendor/github.com/xelabs/go-mydumper/src/common/common.go b/src/vendor/github.com/xelabs/go-mydumper/src/common/common.go new file mode 100644 index 00000000..1db96995 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mydumper/src/common/common.go @@ -0,0 +1,108 @@ +/* + * go-mydumper + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package common + +import ( + "io" + "io/ioutil" + "os" + + "github.com/xelabs/go-mysqlstack/common" +) + +// Args tuple. +type Args struct { + User string + Password string + Address string + ToUser string + ToPassword string + ToAddress string + ToDatabase string + ToEngine string + Database string + Table string + Outdir string + Threads int + ChunksizeInMB int + StmtSize int + Allbytes uint64 + Allrows uint64 + OverwriteTables bool + + // Interval in millisecond. + IntervalMs int +} + +// WriteFile used to write datas to file. +func WriteFile(file string, data string) error { + flag := os.O_RDWR | os.O_TRUNC + if _, err := os.Stat(file); os.IsNotExist(err) { + flag |= os.O_CREATE + } + f, err := os.OpenFile(file, flag, 0644) + if err != nil { + return err + } + defer f.Close() + + n, err := f.Write(common.StringToBytes(data)) + if err != nil { + return err + } + if n != len(data) { + return io.ErrShortWrite + } + return nil +} + +// ReadFile used to read datas from file. +func ReadFile(file string) ([]byte, error) { + return ioutil.ReadFile(file) +} + +// AssertNil used to assert the error. +func AssertNil(err error) { + if err != nil { + panic(err) + } +} + +// EscapeBytes used to escape the literal byte. +func EscapeBytes(bytes []byte) []byte { + buffer := common.NewBuffer(128) + for _, b := range bytes { + // See https://dev.mysql.com/doc/refman/5.7/en/string-literals.html + // for more information on how to escape string literals in MySQL. + switch b { + case 0: + buffer.WriteString(`\0`) + case '\'': + buffer.WriteString(`\'`) + case '"': + buffer.WriteString(`\"`) + case '\b': + buffer.WriteString(`\b`) + case '\n': + buffer.WriteString(`\n`) + case '\r': + buffer.WriteString(`\r`) + case '\t': + buffer.WriteString(`\t`) + case 0x1A: + buffer.WriteString(`\Z`) + case '\\': + buffer.WriteString(`\\`) + default: + buffer.WriteU8(b) + } + } + return buffer.Datas() +} diff --git a/src/vendor/github.com/xelabs/go-mydumper/src/common/common_test.go b/src/vendor/github.com/xelabs/go-mydumper/src/common/common_test.go new file mode 100644 index 00000000..62e53734 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mydumper/src/common/common_test.go @@ -0,0 +1,56 @@ +/* + * go-mydumper + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package common + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWriteReadFile(t *testing.T) { + file := "/tmp/xx.txt" + defer os.Remove(file) + + { + err := WriteFile(file, "fake") + assert.Nil(t, err) + } + + { + got, err := ReadFile(file) + assert.Nil(t, err) + want := []byte("fake") + assert.Equal(t, want, got) + } + + { + err := WriteFile("/xxu01/xx.txt", "fake") + assert.NotNil(t, err) + } +} + +func TestEscapeBytes(t *testing.T) { + tests := []struct { + v []byte + exp []byte + }{ + {[]byte("simple"), []byte("simple")}, + {[]byte(`simplers's "world"`), []byte(`simplers\'s \"world\"`)}, + {[]byte("\x00'\"\b\n\r"), []byte(`\0\'\"\b\n\r`)}, + {[]byte("\t\x1A\\"), []byte(`\t\Z\\`)}, + } + for _, tt := range tests { + got := EscapeBytes(tt.v) + want := tt.exp + assert.Equal(t, want, got) + } +} diff --git a/src/vendor/github.com/xelabs/go-mydumper/src/common/dumper.go b/src/vendor/github.com/xelabs/go-mydumper/src/common/dumper.go new file mode 100644 index 00000000..2041e47e --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mydumper/src/common/dumper.go @@ -0,0 +1,192 @@ +/* + * go-mydumper + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package common + +import ( + "fmt" + "strings" + "sync" + "sync/atomic" + "time" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func writeMetaData(args *Args) { + file := fmt.Sprintf("%s/metadata", args.Outdir) + WriteFile(file, "") +} + +func dumpDatabaseSchema(log *xlog.Log, conn *Connection, args *Args) { + err := conn.Execute(fmt.Sprintf("USE `%s`", args.Database)) + AssertNil(err) + + schema := fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s`;", args.Database) + file := fmt.Sprintf("%s/%s-schema-create.sql", args.Outdir, args.Database) + WriteFile(file, schema) + log.Info("dumping.database[%s].schema...", args.Database) +} + +func dumpTableSchema(log *xlog.Log, conn *Connection, args *Args, table string) { + qr, err := conn.Fetch(fmt.Sprintf("SHOW CREATE TABLE `%s`.`%s`", args.Database, table)) + AssertNil(err) + schema := qr.Rows[0][1].String() + ";\n" + + file := fmt.Sprintf("%s/%s.%s-schema.sql", args.Outdir, args.Database, table) + WriteFile(file, schema) + log.Info("dumping.table[%s.%s].schema...", args.Database, table) +} + +func dumpTable(log *xlog.Log, conn *Connection, args *Args, table string) { + var allBytes uint64 + var allRows uint64 + + cursor, err := conn.StreamFetch(fmt.Sprintf("SELECT /*backup*/ * FROM `%s`.`%s`", args.Database, table)) + AssertNil(err) + + fields := make([]string, 0, 16) + flds := cursor.Fields() + for _, fld := range flds { + fields = append(fields, fmt.Sprintf("`%s`", fld.Name)) + } + + fileNo := 1 + stmtsize := 0 + chunkbytes := 0 + rows := make([]string, 0, 256) + inserts := make([]string, 0, 256) + for cursor.Next() { + row, err := cursor.RowValues() + AssertNil(err) + + values := make([]string, 0, 16) + for _, v := range row { + if v.Raw() == nil { + values = append(values, "NULL") + } else { + str := v.String() + switch { + case v.IsSigned(), v.IsUnsigned(), v.IsFloat(), v.IsIntegral(), v.Type() == querypb.Type_DECIMAL: + values = append(values, str) + default: + values = append(values, fmt.Sprintf("\"%s\"", EscapeBytes(v.Raw()))) + } + } + } + r := "(" + strings.Join(values, ",") + ")" + rows = append(rows, r) + + allRows++ + stmtsize += len(r) + chunkbytes += len(r) + allBytes += uint64(len(r)) + atomic.AddUint64(&args.Allbytes, uint64(len(r))) + atomic.AddUint64(&args.Allrows, 1) + + if stmtsize >= args.StmtSize { + insertone := fmt.Sprintf("INSERT INTO `%s`(%s) VALUES\n%s", table, strings.Join(fields, ","), strings.Join(rows, ",\n")) + inserts = append(inserts, insertone) + rows = rows[:0] + stmtsize = 0 + } + + if (chunkbytes / 1024 / 1024) >= args.ChunksizeInMB { + query := strings.Join(inserts, ";\n") + ";\n" + file := fmt.Sprintf("%s/%s.%s.%05d.sql", args.Outdir, args.Database, table, fileNo) + WriteFile(file, query) + + log.Info("dumping.table[%s.%s].rows[%v].bytes[%vMB].part[%v].thread[%d]", args.Database, table, allRows, (allBytes / 1024 / 1024), fileNo, conn.ID) + inserts = inserts[:0] + chunkbytes = 0 + fileNo++ + } + } + if chunkbytes > 0 { + insertone := fmt.Sprintf("INSERT INTO `%s`(%s) VALUES\n%s", table, strings.Join(fields, ","), strings.Join(rows, ",\n")) + inserts = append(inserts, insertone) + + query := strings.Join(inserts, ";\n") + ";\n" + file := fmt.Sprintf("%s/%s.%s.%05d.sql", args.Outdir, args.Database, table, fileNo) + WriteFile(file, query) + } + err = cursor.Close() + AssertNil(err) + + log.Info("dumping.table[%s.%s].done.allrows[%v].allbytes[%vMB].thread[%d]...", args.Database, table, allRows, (allBytes / 1024 / 1024), conn.ID) +} + +func allTables(log *xlog.Log, conn *Connection, args *Args) []string { + qr, err := conn.Fetch(fmt.Sprintf("SHOW TABLES FROM `%s`", args.Database)) + AssertNil(err) + + tables := make([]string, 0, 128) + for _, t := range qr.Rows { + tables = append(tables, t[0].String()) + } + return tables +} + +// Dumper used to start the dumper worker. +func Dumper(log *xlog.Log, args *Args) { + pool, err := NewPool(log, args.Threads, args.Address, args.User, args.Password) + AssertNil(err) + defer pool.Close() + + // Meta data. + writeMetaData(args) + + // database. + conn := pool.Get() + dumpDatabaseSchema(log, conn, args) + + // tables. + var wg sync.WaitGroup + var tables []string + t := time.Now() + if args.Table != "" { + tables = strings.Split(args.Table, ",") + } else { + tables = allTables(log, conn, args) + } + pool.Put(conn) + + for _, table := range tables { + conn := pool.Get() + dumpTableSchema(log, conn, args, table) + + wg.Add(1) + go func(conn *Connection, table string) { + defer func() { + wg.Done() + pool.Put(conn) + }() + log.Info("dumping.table[%s.%s].datas.thread[%d]...", args.Database, table, conn.ID) + dumpTable(log, conn, args, table) + log.Info("dumping.table[%s.%s].datas.thread[%d].done...", args.Database, table, conn.ID) + }(conn, table) + } + + tick := time.NewTicker(time.Millisecond * time.Duration(args.IntervalMs)) + defer tick.Stop() + go func() { + for range tick.C { + diff := time.Since(t).Seconds() + allbytesMB := float64(atomic.LoadUint64(&args.Allbytes) / 1024 / 1024) + allrows := atomic.LoadUint64(&args.Allrows) + rates := allbytesMB / diff + log.Info("dumping.allbytes[%vMB].allrows[%v].time[%.2fsec].rates[%.2fMB/sec]...", allbytesMB, allrows, diff, rates) + } + }() + + wg.Wait() + elapsed := time.Since(t).Seconds() + log.Info("dumping.all.done.cost[%.2fsec].allrows[%v].allbytes[%v].rate[%.2fMB/s]", elapsed, args.Allrows, args.Allbytes, (float64(args.Allbytes/1024/1024) / elapsed)) +} diff --git a/src/vendor/github.com/xelabs/go-mydumper/src/common/dumper_test.go b/src/vendor/github.com/xelabs/go-mydumper/src/common/dumper_test.go new file mode 100644 index 00000000..f73e7ec5 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mydumper/src/common/dumper_test.go @@ -0,0 +1,142 @@ +/* + * go-mydumper + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package common + +import ( + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestDumper(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.INFO)) + fakedbs := driver.NewTestHandler(log) + server, err := driver.MockMysqlServer(log, fakedbs) + assert.Nil(t, err) + defer server.Close() + address := server.Addr() + + selectResult := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + { + Name: "namei1", + Type: querypb.Type_VARCHAR, + }, + { + Name: "null", + Type: querypb.Type_NULL_TYPE, + }, + { + Name: "decimal", + Type: querypb.Type_DECIMAL, + }, + { + Name: "datetime", + Type: querypb.Type_DATETIME, + }, + }, + Rows: make([][]sqltypes.Value, 0, 256)} + + for i := 0; i < 201710; i++ { + row := []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("11")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("11\"xx\"")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("")), + sqltypes.MakeTrusted(querypb.Type_NULL_TYPE, nil), + sqltypes.MakeTrusted(querypb.Type_DECIMAL, []byte("210.01")), + sqltypes.NULL, + } + selectResult.Rows = append(selectResult.Rows, row) + } + + schemaResult := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "Table", + Type: querypb.Type_VARCHAR, + }, + { + Name: "Create Table", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("t1")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("CREATE TABLE `t1` (`a` int(11) DEFAULT NULL,`b` varchar(100) DEFAULT NULL) ENGINE=InnoDB")), + }, + }} + + tablesResult := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "Tables_in_test", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("t1")), + }, + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("t2")), + }, + }} + + // fakedbs. + { + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("show create table .*", schemaResult) + fakedbs.AddQueryPattern("show tables from .*", tablesResult) + fakedbs.AddQueryPattern("select .*", selectResult) + } + + args := &Args{ + Database: "test", + Outdir: "/tmp/dumpertest", + User: "mock", + Password: "mock", + Address: address, + ChunksizeInMB: 1, + Threads: 16, + StmtSize: 10000, + IntervalMs: 500, + } + + os.RemoveAll(args.Outdir) + if _, err := os.Stat(args.Outdir); os.IsNotExist(err) { + x := os.MkdirAll(args.Outdir, 0777) + AssertNil(x) + } + + // Dumper. + { + Dumper(log, args) + } + dat, err := ioutil.ReadFile(args.Outdir + "/test.t1.00001.sql") + assert.Nil(t, err) + want := strings.Contains(string(dat), `(11,"11\"xx\"","",NULL,210.01,NULL)`) + assert.True(t, want) +} diff --git a/src/vendor/github.com/xelabs/go-mydumper/src/common/loader.go b/src/vendor/github.com/xelabs/go-mydumper/src/common/loader.go new file mode 100644 index 00000000..5626400a --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mydumper/src/common/loader.go @@ -0,0 +1,194 @@ +/* + * go-mydumper + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package common + +import ( + "fmt" + "math/rand" + "os" + "path/filepath" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/xlog" +) + +// Files tuple. +type Files struct { + databases []string + schemas []string + tables []string +} + +var ( + dbSuffix = "-schema-create.sql" + schemaSuffix = "-schema.sql" + tableSuffix = ".sql" +) + +func loadFiles(log *xlog.Log, dir string) *Files { + files := &Files{} + if err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + log.Panicf("loader.file.walk.error:%+v", err) + } + + if !info.IsDir() { + switch { + case strings.HasSuffix(path, dbSuffix): + files.databases = append(files.databases, path) + case strings.HasSuffix(path, schemaSuffix): + files.schemas = append(files.schemas, path) + default: + if strings.HasSuffix(path, tableSuffix) { + files.tables = append(files.tables, path) + } + } + } + return nil + }); err != nil { + log.Panicf("loader.file.walk.error:%+v", err) + } + return files +} + +func restoreDatabaseSchema(log *xlog.Log, dbs []string, conn *Connection) { + for _, db := range dbs { + base := filepath.Base(db) + name := strings.TrimSuffix(base, dbSuffix) + + data, err := ReadFile(db) + AssertNil(err) + sql := common.BytesToString(data) + + err = conn.Execute(sql) + AssertNil(err) + log.Info("restoring.database[%s]", name) + } +} + +func restoreTableSchema(log *xlog.Log, overwrite bool, tables []string, conn *Connection) { + for _, table := range tables { + // use + base := filepath.Base(table) + name := strings.TrimSuffix(base, schemaSuffix) + db := strings.Split(name, ".")[0] + + err := conn.Execute(fmt.Sprintf("USE `%s`", db)) + AssertNil(err) + + data, err := ReadFile(table) + AssertNil(err) + query1 := common.BytesToString(data) + querys := strings.Split(query1, ";\n") + for _, query := range querys { + if !strings.HasPrefix(query, "/*") && query != "" { + if overwrite { + dropQuery := fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", db, name) + err = conn.Execute(dropQuery) + AssertNil(err) + } + err = conn.Execute(query) + AssertNil(err) + } + } + log.Info("restoring.schema[%s]", name) + } +} + +func restoreTable(log *xlog.Log, table string, conn *Connection) int { + bytes := 0 + part := "0" + base := filepath.Base(table) + name := strings.TrimSuffix(base, tableSuffix) + splits := strings.Split(name, ".") + db := splits[0] + tbl := splits[1] + if len(splits) > 2 { + part = splits[2] + } + + log.Info("restoring.tables[%s].parts[%s].thread[%d]", tbl, part, conn.ID) + err := conn.Execute(fmt.Sprintf("USE `%s`", db)) + AssertNil(err) + + data, err := ReadFile(table) + AssertNil(err) + query1 := common.BytesToString(data) + querys := strings.Split(query1, ";\n") + bytes = len(query1) + for _, query := range querys { + if !strings.HasPrefix(query, "/*") && query != "" { + err = conn.Execute(query) + AssertNil(err) + } + } + log.Info("restoring.tables[%s].parts[%s].thread[%d].done...", tbl, part, conn.ID) + return bytes +} + +// Loader used to start the loader worker. +func Loader(log *xlog.Log, args *Args) { + pool, err := NewPool(log, args.Threads, args.Address, args.User, args.Password) + AssertNil(err) + defer pool.Close() + + files := loadFiles(log, args.Outdir) + + // database. + conn := pool.Get() + restoreDatabaseSchema(log, files.databases, conn) + pool.Put(conn) + + // tables. + conn = pool.Get() + restoreTableSchema(log, args.OverwriteTables, files.schemas, conn) + pool.Put(conn) + + // Shuffle the tables + for i := range files.tables { + j := rand.Intn(i + 1) + files.tables[i], files.tables[j] = files.tables[j], files.tables[i] + } + + var wg sync.WaitGroup + var bytes uint64 + t := time.Now() + for _, table := range files.tables { + conn := pool.Get() + wg.Add(1) + go func(conn *Connection, table string) { + defer func() { + wg.Done() + pool.Put(conn) + }() + r := restoreTable(log, table, conn) + atomic.AddUint64(&bytes, uint64(r)) + }(conn, table) + } + + tick := time.NewTicker(time.Millisecond * time.Duration(args.IntervalMs)) + defer tick.Stop() + go func() { + for range tick.C { + diff := time.Since(t).Seconds() + bytes := float64(atomic.LoadUint64(&bytes) / 1024 / 1024) + rates := bytes / diff + log.Info("restoring.allbytes[%vMB].time[%.2fsec].rates[%.2fMB/sec]...", bytes, diff, rates) + } + }() + + wg.Wait() + elapsed := time.Since(t).Seconds() + log.Info("restoring.all.done.cost[%.2fsec].allbytes[%.2fMB].rate[%.2fMB/s]", elapsed, float64(bytes/1024/1024), (float64(bytes/1024/1024) / elapsed)) +} diff --git a/src/vendor/github.com/xelabs/go-mydumper/src/common/loader_test.go b/src/vendor/github.com/xelabs/go-mydumper/src/common/loader_test.go new file mode 100644 index 00000000..a37aba12 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mydumper/src/common/loader_test.go @@ -0,0 +1,51 @@ +/* + * go-mydumper + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestLoader(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.DEBUG)) + fakedbs := driver.NewTestHandler(log) + server, err := driver.MockMysqlServer(log, fakedbs) + assert.Nil(t, err) + defer server.Close() + address := server.Addr() + + // fakedbs. + { + fakedbs.AddQuery("create database if not exists `test`", &sqltypes.Result{}) + fakedbs.AddQuery("create table `t1` (`a` int(11) default null,`b` varchar(100) default null) engine=innodb", &sqltypes.Result{}) + fakedbs.AddQueryPattern("use .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("insert into .*", &sqltypes.Result{}) + fakedbs.AddQueryPattern("drop table .*", &sqltypes.Result{}) + } + + args := &Args{ + Outdir: "/tmp/dumpertest", + User: "mock", + Password: "mock", + Threads: 16, + Address: address, + IntervalMs: 500, + OverwriteTables: true, + } + // Loader. + { + Loader(log, args) + } +} diff --git a/src/vendor/github.com/xelabs/go-mydumper/src/common/pool.go b/src/vendor/github.com/xelabs/go-mydumper/src/common/pool.go new file mode 100644 index 00000000..ef0a1bd4 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mydumper/src/common/pool.go @@ -0,0 +1,103 @@ +/* + * go-mydumper + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package common + +import ( + "sync" + + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/xlog" + + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +// Pool tuple. +type Pool struct { + mu sync.RWMutex + log *xlog.Log + conns chan *Connection +} + +// Connection tuple. +type Connection struct { + ID int + client driver.Conn +} + +// Execute used to executes the query. +func (conn *Connection) Execute(query string) error { + return conn.client.Exec(query) +} + +// Fetch used to fetch the results. +func (conn *Connection) Fetch(query string) (*sqltypes.Result, error) { + return conn.client.FetchAll(query, -1) +} + +// StreamFetch used to the results with streaming. +func (conn *Connection) StreamFetch(query string) (driver.Rows, error) { + return conn.client.Query(query) +} + +// NewPool creates the new pool. +func NewPool(log *xlog.Log, cap int, address string, user string, password string) (*Pool, error) { + conns := make(chan *Connection, cap) + for i := 0; i < cap; i++ { + client, err := driver.NewConn(user, password, address, "", "utf8") + if err != nil { + return nil, err + } + conns <- &Connection{ID: i, client: client} + } + + return &Pool{ + log: log, + conns: conns, + }, nil +} + +// Get used to get one connection from the pool. +func (p *Pool) Get() *Connection { + conns := p.getConns() + if conns == nil { + return nil + } + conn := <-conns + return conn +} + +// Put used to put one connection to the pool. +func (p *Pool) Put(conn *Connection) { + p.mu.RLock() + defer p.mu.RUnlock() + + if p.conns == nil { + return + } + p.conns <- conn +} + +// Close used to close the pool and the connections. +func (p *Pool) Close() { + p.mu.Lock() + defer p.mu.Unlock() + + close(p.conns) + for conn := range p.conns { + conn.client.Close() + } + p.conns = nil +} + +func (p *Pool) getConns() chan *Connection { + p.mu.Lock() + defer p.mu.Unlock() + return p.conns +} diff --git a/src/vendor/github.com/xelabs/go-mydumper/src/common/pool_test.go b/src/vendor/github.com/xelabs/go-mydumper/src/common/pool_test.go new file mode 100644 index 00000000..b6fba510 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mydumper/src/common/pool_test.go @@ -0,0 +1,98 @@ +/* + * go-mydumper + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package common + +import ( + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestPool(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.INFO)) + fakedbs := driver.NewTestHandler(log) + server, err := driver.MockMysqlServer(log, fakedbs) + assert.Nil(t, err) + defer server.Close() + address := server.Addr() + + // fakedbs. + { + fakedbs.AddQueryPattern("select .*", &sqltypes.Result{}) + } + + pool, err := NewPool(log, 8, address, "mock", "mock") + assert.Nil(t, err) + + var wg sync.WaitGroup + ch1 := make(chan struct{}) + ch2 := make(chan struct{}) + { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-ch1: + return + default: + conn := pool.Get() + err := conn.Execute("select 1") + assert.Nil(t, err) + + _, err = conn.Fetch("select 1") + assert.Nil(t, err) + + _, err = conn.StreamFetch("select 1") + assert.Nil(t, err) + + pool.Put(conn) + } + } + }() + } + + { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-ch2: + return + default: + conn := pool.Get() + conn.Execute("select 2") + assert.Nil(t, err) + + conn.Fetch("select 2") + assert.Nil(t, err) + + _, err = conn.StreamFetch("select 1") + assert.Nil(t, err) + + pool.Put(conn) + } + } + }() + } + + time.Sleep(time.Second) + close(ch1) + close(ch2) + pool.Close() + + wg.Wait() +} diff --git a/src/vendor/github.com/xelabs/go-mydumper/src/common/streamer.go b/src/vendor/github.com/xelabs/go-mydumper/src/common/streamer.go new file mode 100644 index 00000000..5d7e73a9 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mydumper/src/common/streamer.go @@ -0,0 +1,198 @@ +/* + * go-mydumper + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package common + +import ( + "fmt" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/xelabs/go-mysqlstack/sqlparser" + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func streamDatabaseSchema(log *xlog.Log, db string, todb string, from *Connection, to *Connection) { + err := from.Execute(fmt.Sprintf("USE `%s`", db)) + AssertNil(err) + + query := fmt.Sprintf("CREATE DATABASE IF NOT EXISTS `%s`;", todb) + err = to.Execute(query) + AssertNil(err) + log.Info("streaming.database[%s].schema...", todb) +} + +func streamTableSchema(log *xlog.Log, db string, todb string, toengine string, tbl string, overwrite bool, from *Connection, to *Connection) { + qr, err := from.Fetch(fmt.Sprintf("SHOW CREATE TABLE `%s`.`%s`", db, tbl)) + AssertNil(err) + query := qr.Rows[0][1].String() + + if overwrite { + dropQuery := fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", todb, tbl) + err = to.Execute(dropQuery) + AssertNil(err) + } + + // Rewrite the table engine. + if toengine != "" { + node, err := sqlparser.Parse(query) + AssertNil(err) + if ddl, ok := node.(*sqlparser.DDL); ok { + ddl.TableSpec.Options.Engine = toengine + query = sqlparser.String(ddl) + log.Warning("streaming.schema.engine.rewritten:%v", query) + } + } + err = to.Execute(fmt.Sprintf("USE `%s`", todb)) + AssertNil(err) + + err = to.Execute(query) + AssertNil(err) + log.Info("streaming.table[%s.%s].schema...", todb, tbl) +} + +func streamTable(log *xlog.Log, db string, todb string, tbl string, from *Connection, to *Connection, args *Args) { + var allRows uint64 + var allBytes uint64 + + cursor, err := from.StreamFetch(fmt.Sprintf("SELECT /*backup*/ * FROM `%s`.`%s`", db, tbl)) + AssertNil(err) + + fields := make([]string, 0, 16) + flds := cursor.Fields() + for _, fld := range flds { + fields = append(fields, fmt.Sprintf("`%s`", fld.Name)) + } + + err = to.Execute(fmt.Sprintf("USE `%s`", todb)) + AssertNil(err) + + stmtsize := 0 + rows := make([]string, 0, 256) + for cursor.Next() { + row, err := cursor.RowValues() + AssertNil(err) + + values := make([]string, 0, 16) + for _, v := range row { + if v.Raw() == nil { + values = append(values, "NULL") + } else { + str := v.String() + switch { + case v.IsSigned(), v.IsUnsigned(), v.IsFloat(), v.IsIntegral(), v.Type() == querypb.Type_DECIMAL: + values = append(values, str) + default: + values = append(values, fmt.Sprintf("\"%s\"", EscapeBytes(v.Raw()))) + } + } + } + r := "(" + strings.Join(values, ",") + ")" + rows = append(rows, r) + + allRows++ + stmtsize += len(r) + allBytes += uint64(len(r)) + atomic.AddUint64(&args.Allbytes, uint64(len(r))) + atomic.AddUint64(&args.Allrows, 1) + + if stmtsize >= args.StmtSize { + query := fmt.Sprintf("INSERT INTO `%s`(%s) VALUES\n%s", tbl, strings.Join(fields, ","), strings.Join(rows, ",\n")) + err = to.Execute(query) + AssertNil(err) + + rows = rows[:0] + stmtsize = 0 + } + } + + if stmtsize > 0 { + query := fmt.Sprintf("INSERT INTO `%s`(%s) VALUES\n%s", tbl, strings.Join(fields, ","), strings.Join(rows, ",\n")) + err = to.Execute(query) + AssertNil(err) + } + + err = cursor.Close() + AssertNil(err) + log.Info("streaming.table[%s.%s].done.allrows[%v].allbytes[%vMB].thread[%d]...", todb, tbl, allRows, (allBytes / 1024 / 1024), from.ID) +} + +// Streamer used to start the streamer worker. +func Streamer(log *xlog.Log, args *Args) { + var tables []string + var wg sync.WaitGroup + + fromPool, err := NewPool(log, args.Threads, args.Address, args.User, args.Password) + AssertNil(err) + defer fromPool.Close() + + toPool, err := NewPool(log, args.Threads, args.ToAddress, args.ToUser, args.ToPassword) + AssertNil(err) + defer toPool.Close() + + // database. + db := args.Database + todb := args.ToDatabase + if todb == "" { + todb = db + } + toengine := args.ToEngine + + from := fromPool.Get() + to := toPool.Get() + streamDatabaseSchema(log, db, todb, from, to) + + // tables. + t := time.Now() + if args.Table != "" { + tables = strings.Split(args.Table, ",") + } else { + tables = allTables(log, from, args) + } + fromPool.Put(from) + toPool.Put(to) + + // datas. + for _, tbl := range tables { + from := fromPool.Get() + to := toPool.Get() + streamTableSchema(log, db, todb, toengine, tbl, args.OverwriteTables, from, to) + + wg.Add(1) + go func(db string, tbl string, from *Connection, to *Connection, args *Args) { + defer func() { + wg.Done() + fromPool.Put(from) + toPool.Put(to) + }() + log.Info("streaming.table[%s.%s].datas.thread[%d]...", db, tbl, from.ID) + streamTable(log, db, todb, tbl, from, to, args) + log.Info("streaming.table[%s.%s].datas.thread[%d].done...", db, tbl, from.ID) + }(db, tbl, from, to, args) + } + + tick := time.NewTicker(time.Millisecond * time.Duration(args.IntervalMs)) + defer tick.Stop() + go func() { + for range tick.C { + diff := time.Since(t).Seconds() + allbytesMB := float64(atomic.LoadUint64(&args.Allbytes) / 1024 / 1024) + allrows := atomic.LoadUint64(&args.Allrows) + rates := allbytesMB / diff + log.Info("streaming.allbytes[%vMB].allrows[%v].time[%.2fsec].rates[%.2fMB/sec]...", allbytesMB, allrows, diff, rates) + } + }() + + wg.Wait() + elapsed := time.Since(t).Seconds() + log.Info("streaming.all.done.cost[%.2fsec].allrows[%v].allbytes[%v].rate[%.2fMB/s]", elapsed, args.Allrows, args.Allbytes, (float64(args.Allbytes/1024/1024) / elapsed)) +} diff --git a/src/vendor/github.com/xelabs/go-mydumper/src/common/streamer_test.go b/src/vendor/github.com/xelabs/go-mydumper/src/common/streamer_test.go new file mode 100644 index 00000000..81abb0dd --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mydumper/src/common/streamer_test.go @@ -0,0 +1,189 @@ +/* + * go-mydumper + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/driver" + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestStreamer(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.INFO)) + fromFakedbs := driver.NewTestHandler(log) + toFakedbs := driver.NewTestHandler(log) + + fromSvr, err := driver.MockMysqlServer(log, fromFakedbs) + assert.Nil(t, err) + defer fromSvr.Close() + fromAddr := fromSvr.Addr() + + toSvr, err := driver.MockMysqlServer(log, toFakedbs) + assert.Nil(t, err) + defer toSvr.Close() + toAddr := toSvr.Addr() + + selectResult := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + { + Name: "namei1", + Type: querypb.Type_VARCHAR, + }, + { + Name: "null", + Type: querypb.Type_NULL_TYPE, + }, + { + Name: "decimal", + Type: querypb.Type_DECIMAL, + }, + { + Name: "datetime", + Type: querypb.Type_DATETIME, + }, + }, + Rows: make([][]sqltypes.Value, 0, 256)} + + for i := 0; i < 201710; i++ { + row := []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("11")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("11\"xx\"")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("")), + sqltypes.MakeTrusted(querypb.Type_NULL_TYPE, nil), + sqltypes.MakeTrusted(querypb.Type_DECIMAL, []byte("210.01")), + sqltypes.NULL, + } + selectResult.Rows = append(selectResult.Rows, row) + } + + schemaResult := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "Table", + Type: querypb.Type_VARCHAR, + }, + { + Name: "Create Table", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("t1")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("CREATE TABLE `t1` (`a` int(11) DEFAULT NULL,`b` varchar(100) DEFAULT NULL) ENGINE=InnoDB")), + }, + }} + + tablesResult := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "Tables_in_test", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("t1")), + }, + { + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("t2")), + }, + }} + + // fakedbs. + { + fromFakedbs.AddQueryPattern("USE `test`", &sqltypes.Result{}) + fromFakedbs.AddQueryPattern("SHOW CREATE TABLE `test`..*", schemaResult) + fromFakedbs.AddQueryPattern("SHOW TABLES FROM `test`", tablesResult) + fromFakedbs.AddQueryPattern("SELECT .*", selectResult) + + toFakedbs.AddQueryPattern("USE `test`", &sqltypes.Result{}) + toFakedbs.AddQueryPattern("CREATE DATABASE IF NOT EXISTS `test`", &sqltypes.Result{}) + toFakedbs.AddQueryPattern("CREATE TABLE .*", &sqltypes.Result{}) + toFakedbs.AddQueryPattern("INSERT INTO .*", &sqltypes.Result{}) + toFakedbs.AddQueryPattern("DROP TABLE .*", &sqltypes.Result{}) + + // To Database. + toFakedbs.AddQueryPattern("USE `totest`", &sqltypes.Result{}) + toFakedbs.AddQueryPattern("CREATE DATABASE IF NOT EXISTS `totest`", &sqltypes.Result{}) + } + + // Streamer. + { + args := &Args{ + Database: "test", + User: "mock", + Password: "mock", + Address: fromAddr, + ToUser: "mock", + ToPassword: "mock", + ToAddress: toAddr, + ChunksizeInMB: 1, + Threads: 16, + StmtSize: 10000, + IntervalMs: 500, + OverwriteTables: true, + } + Streamer(log, args) + } + + // Streamer with 2db. + { + args := &Args{ + Database: "test", + User: "mock", + Password: "mock", + Address: fromAddr, + ToDatabase: "totest", + ToUser: "mock", + ToPassword: "mock", + ToAddress: toAddr, + ChunksizeInMB: 1, + Threads: 16, + StmtSize: 10000, + IntervalMs: 500, + OverwriteTables: true, + } + Streamer(log, args) + } + + // Streamer with toengine. + { + args := &Args{ + Database: "test", + User: "mock", + Password: "mock", + Address: fromAddr, + ToDatabase: "totest", + ToEngine: "tokudb", + ToUser: "mock", + ToPassword: "mock", + ToAddress: toAddr, + ChunksizeInMB: 1, + Threads: 16, + StmtSize: 10000, + IntervalMs: 500, + OverwriteTables: true, + } + Streamer(log, args) + } +} diff --git a/src/vendor/github.com/xelabs/go-mydumper/src/mydumper/main.go b/src/vendor/github.com/xelabs/go-mydumper/src/mydumper/main.go new file mode 100644 index 00000000..fed46c28 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mydumper/src/mydumper/main.go @@ -0,0 +1,74 @@ +/* + * go-mydumper + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package main + +import ( + "common" + "flag" + "fmt" + "os" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + flagChunksize, flagThreads, flagPort, flagStmtSize int + flagUser, flagPasswd, flagHost, flagDb, flagTable, flagDir string + + log = xlog.NewStdLog(xlog.Level(xlog.INFO)) +) + +func init() { + flag.StringVar(&flagUser, "u", "", "Username with privileges to run the dump") + flag.StringVar(&flagPasswd, "p", "", "User password") + flag.StringVar(&flagHost, "h", "", "The host to connect to") + flag.IntVar(&flagPort, "P", 3306, "TCP/IP port to connect to") + flag.StringVar(&flagDb, "db", "", "Database to dump") + flag.StringVar(&flagTable, "table", "", "Table to dump") + flag.StringVar(&flagDir, "o", "", "Directory to output files to") + flag.IntVar(&flagChunksize, "F", 128, "Split tables into chunks of this output file size. This value is in MB") + flag.IntVar(&flagThreads, "t", 16, "Number of threads to use") + flag.IntVar(&flagStmtSize, "s", 1000000, "Attempted size of INSERT statement in bytes") +} + +func usage() { + fmt.Println("Usage: " + os.Args[0] + " -h [HOST] -P [PORT] -u [USER] -p [PASSWORD] -db [DATABASE] -o [OUTDIR]") + flag.PrintDefaults() +} + +func main() { + flag.Usage = func() { usage() } + flag.Parse() + + if flagHost == "" || flagUser == "" || flagPasswd == "" || flagDb == "" { + usage() + os.Exit(0) + } + + if _, err := os.Stat(flagDir); os.IsNotExist(err) { + x := os.MkdirAll(flagDir, 0777) + common.AssertNil(x) + } + + args := &common.Args{ + User: flagUser, + Password: flagPasswd, + Address: fmt.Sprintf("%s:%d", flagHost, flagPort), + Database: flagDb, + Table: flagTable, + Outdir: flagDir, + ChunksizeInMB: flagChunksize, + Threads: flagThreads, + StmtSize: flagStmtSize, + IntervalMs: 10 * 1000, + } + + common.Dumper(log, args) +} diff --git a/src/vendor/github.com/xelabs/go-mydumper/src/myloader/main.go b/src/vendor/github.com/xelabs/go-mydumper/src/myloader/main.go new file mode 100644 index 00000000..ce5cde11 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mydumper/src/myloader/main.go @@ -0,0 +1,63 @@ +/* + * go-mydumper + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package main + +import ( + "common" + "flag" + "fmt" + "os" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + flagOverwriteTables bool + flagPort, flagThreads int + flagUser, flagPasswd, flagHost, flagDir string + + log = xlog.NewStdLog(xlog.Level(xlog.INFO)) +) + +func init() { + flag.StringVar(&flagUser, "u", "", "Username with privileges to run the loader") + flag.StringVar(&flagPasswd, "p", "", "User password") + flag.StringVar(&flagHost, "h", "", "The host to connect to") + flag.IntVar(&flagPort, "P", 3306, "TCP/IP port to connect to") + flag.StringVar(&flagDir, "d", "", "Directory of the dump to import") + flag.IntVar(&flagThreads, "t", 16, "Number of threads to use") + flag.BoolVar(&flagOverwriteTables, "o", false, "Drop tables if they already exist") +} + +func usage() { + fmt.Println("Usage: " + os.Args[0] + " -h [HOST] -P [PORT] -u [USER] -p [PASSWORD] -d [DIR] [-o]") + flag.PrintDefaults() +} + +func main() { + flag.Usage = func() { usage() } + flag.Parse() + + if flagHost == "" || flagUser == "" || flagPasswd == "" || flagDir == "" { + usage() + os.Exit(0) + } + + args := &common.Args{ + User: flagUser, + Password: flagPasswd, + Address: fmt.Sprintf("%s:%d", flagHost, flagPort), + Outdir: flagDir, + Threads: flagThreads, + IntervalMs: 10 * 1000, + OverwriteTables: flagOverwriteTables, + } + common.Loader(log, args) +} diff --git a/src/vendor/github.com/xelabs/go-mydumper/src/mystreamer/main.go b/src/vendor/github.com/xelabs/go-mydumper/src/mystreamer/main.go new file mode 100644 index 00000000..9b285a5b --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mydumper/src/mystreamer/main.go @@ -0,0 +1,78 @@ +/* + * go-mydumper + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package main + +import ( + "common" + "flag" + "fmt" + "os" + + "github.com/xelabs/go-mysqlstack/xlog" +) + +var ( + flagOverwriteTables bool + flagThreads, flagPort, flag2port, flagStmtSize int + flagUser, flagPasswd, flagHost, flag2user, flag2passwd, flag2host, flagDb, flag2Db, flag2Engine, flagTable string + + log = xlog.NewStdLog(xlog.Level(xlog.INFO)) +) + +func init() { + flag.StringVar(&flagUser, "u", "", "Upstream username with privileges to run the streamer") + flag.StringVar(&flagPasswd, "p", "", "Upstream user password") + flag.StringVar(&flagHost, "h", "", "The upstream host to connect to") + flag.IntVar(&flagPort, "P", 3306, "Upstream TCP/IP port to connect to") + flag.StringVar(&flag2user, "2u", "", "Downstream username with privileges to run the streamer") + flag.StringVar(&flag2passwd, "2p", "", "Downstream user password") + flag.StringVar(&flag2host, "2h", "", "The downstream host to connect to") + flag.IntVar(&flag2port, "2P", 3306, "Downstream TCP/IP port to connect to") + flag.StringVar(&flag2Db, "2db", "", "Downstream database, default is same as upstream db") + flag.StringVar(&flag2Engine, "2engine", "", "Downstream table engine") + flag.StringVar(&flagDb, "db", "", "Database to stream") + flag.StringVar(&flagTable, "table", "", "Table to stream") + flag.IntVar(&flagThreads, "t", 16, "Number of threads to use") + flag.IntVar(&flagStmtSize, "s", 1000000, "Attempted size of INSERT statement in bytes") + flag.BoolVar(&flagOverwriteTables, "o", false, "Drop tables if they already exist") +} + +func usage() { + fmt.Println("Usage: " + os.Args[0] + " -h [HOST] -P [PORT] -u [USER] -p [PASSWORD] -db [DATABASE] -2h [DOWNSTREAM-HOST] -2P [DOWNSTREAM-PORT] -2u [DOWNSTREAM-USER] -2p [DOWNSTREAM-PASSWORD] [-2db DOWNSTREAM-DATABASE] [-o]") + flag.PrintDefaults() +} + +func main() { + flag.Usage = func() { usage() } + flag.Parse() + + if flagHost == "" || flagUser == "" || flagPasswd == "" || flagDb == "" || flag2host == "" || flag2user == "" || flag2passwd == "" { + usage() + os.Exit(0) + } + + args := &common.Args{ + User: flagUser, + Password: flagPasswd, + Address: fmt.Sprintf("%s:%d", flagHost, flagPort), + ToUser: flag2user, + ToPassword: flag2passwd, + ToAddress: fmt.Sprintf("%s:%d", flag2host, flag2port), + Database: flagDb, + ToDatabase: flag2Db, + ToEngine: flag2Engine, + Table: flagTable, + Threads: flagThreads, + StmtSize: flagStmtSize, + IntervalMs: 10 * 1000, + OverwriteTables: flagOverwriteTables, + } + common.Streamer(log, args) +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/.gitignore b/src/vendor/github.com/xelabs/go-mysqlstack/.gitignore new file mode 100644 index 00000000..e660fd93 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/.gitignore @@ -0,0 +1 @@ +bin/ diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/.travis.yml b/src/vendor/github.com/xelabs/go-mysqlstack/.travis.yml new file mode 100644 index 00000000..159ce7f4 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/.travis.yml @@ -0,0 +1,16 @@ +language: go +sudo: required +go: + - 1.6 + +before_install: + - go get github.com/pierrre/gotestcover + - go get github.com/stretchr/testify/assert + +script: + - make test + - make coverage + +after_success: + # send coverage reports to Codecov + - bash <(curl -s https://codecov.io/bash) diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/LICENSE b/src/vendor/github.com/xelabs/go-mysqlstack/LICENSE new file mode 100644 index 00000000..94a9ed02 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/README.md b/src/vendor/github.com/xelabs/go-mysqlstack/README.md new file mode 100644 index 00000000..bcd3aed0 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/README.md @@ -0,0 +1,41 @@ +[![Build Status](https://travis-ci.org/xelabs/go-mysqlstack.png)](https://travis-ci.org/xelabs/go-mysqlstack) [![Go Report Card](https://goreportcard.com/badge/github.com/xelabs/go-mysqlstack)](https://goreportcard.com/report/github.com/xelabs/go-mysqlstack) [![codecov.io](https://codecov.io/gh/xelabs/go-mysqlstack/graphs/badge.svg)](https://codecov.io/gh/xelabs/go-mysqlstack/branch/master) + +# go-mysqlstack + +***go-mysqlstack*** is an MySQL protocol library implementing in Go (golang). + +Protocol is based on [mysqlproto-go](https://github.com/pubnative/mysqlproto-go) and [go-sql-driver](https://github.com/go-sql-driver/mysql) + +## Running Tests + +``` +$ mkdir src +$ export GOPATH=`pwd` +$ go get -u github.com/xelabs/go-mysqlstack/driver +$ cd src/github.com/xelabs/go-mysqlstack/ +$ make test +``` + +## Examples + +1. ***examples/mysqld.go*** mocks a MySQL server by running: + +``` +$ go run example/mysqld.go + 2018/01/26 16:02:02.304376 mysqld.go:52: [INFO] mysqld.server.start.address[:4407] +``` + +2. ***examples/client.go*** mocks a client and query from the mock MySQL server: + +``` +$ go run example/client.go + 2018/01/26 16:06:10.779340 client.go:32: [INFO] results:[[[10 nice name]]] +``` + +## Status + +go-mysqlstack is production ready. + +## License + +go-mysqlstack is released under the GPLv3. See LICENSE diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/common/buffer.go b/src/vendor/github.com/xelabs/go-mysqlstack/common/buffer.go new file mode 100644 index 00000000..f2b60e1a --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/common/buffer.go @@ -0,0 +1,442 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package common + +import ( + "bytes" + "io" +) + +var ( + // ErrIOEOF used for io.EOF. + ErrIOEOF = io.EOF +) + +// Buffer represents the buffer tuple. +type Buffer struct { + pos int + seek int + cap int + buf []byte +} + +// NewBuffer creates a new buffer. +func NewBuffer(cap int) *Buffer { + return &Buffer{pos: 0, + cap: cap, + buf: make([]byte, cap), + } +} + +// ReadBuffer used to read buffer from datas. +func ReadBuffer(b []byte) *Buffer { + return &Buffer{ + buf: b, + pos: len(b), + } +} + +// Reset used to reset a buffer. +func (b *Buffer) Reset(data []byte) { + b.buf = data + b.pos = len(data) + b.seek = 0 +} + +// Datas returns the datas of the buffer. +func (b *Buffer) Datas() []byte { + return b.buf[:b.pos] +} + +// Length returns the last position of the buffer. +func (b *Buffer) Length() int { + return b.pos +} + +// Seek returns the seek position of the buffer. +func (b *Buffer) Seek() int { + return b.seek +} + +func (b *Buffer) extend(n int) { + if (b.pos + n) > b.cap { + // allocate double what's needed, for future growth + b.cap = (b.pos + n) * 2 + t := make([]byte, b.cap) + copy(t, b.buf) + b.buf = t + } +} + +// WriteU8 used to write uint8. +func (b *Buffer) WriteU8(v uint8) { + b.extend(1) + b.buf[b.pos] = v + b.pos++ +} + +// ReadU8 used read uint8. +func (b *Buffer) ReadU8() (v uint8, err error) { + if (b.seek + 1) > b.pos { + err = ErrIOEOF + return + } + + v = uint8(b.buf[b.seek]) + b.seek++ + return +} + +// WriteU16 used to write uint16. +func (b *Buffer) WriteU16(v uint16) { + b.extend(2) + b.buf[b.pos] = byte(v) + b.buf[b.pos+1] = byte(v >> 8) + b.pos += 2 +} + +// ReadU16 used to read uint16. +func (b *Buffer) ReadU16() (v uint16, err error) { + if (b.seek + 2) > b.pos { + err = ErrIOEOF + return + } + + v = uint16(b.buf[b.seek]) | + uint16(b.buf[b.seek+1])<<8 + b.seek += 2 + return +} + +// WriteU24 used to write uint24. +func (b *Buffer) WriteU24(v uint32) { + b.extend(3) + b.buf[b.pos] = byte(v) + b.buf[b.pos+1] = byte(v >> 8) + b.buf[b.pos+2] = byte(v >> 16) + b.pos += 3 +} + +// ReadU24 used to read uint24. +func (b *Buffer) ReadU24() (v uint32, err error) { + if (b.seek + 3) > b.pos { + err = ErrIOEOF + return + } + + v = uint32(b.buf[b.seek]) | + uint32(b.buf[b.seek+1])<<8 | + uint32(b.buf[b.seek+2])<<16 + b.seek += 3 + return +} + +// WriteU32 used to write uint32. +func (b *Buffer) WriteU32(v uint32) { + b.extend(4) + b.buf[b.pos] = byte(v) + b.buf[b.pos+1] = byte(v >> 8) + b.buf[b.pos+2] = byte(v >> 16) + b.buf[b.pos+3] = byte(v >> 24) + b.pos += 4 +} + +// ReadU32 used to read uint32. +func (b *Buffer) ReadU32() (v uint32, err error) { + if (b.seek + 4) > b.pos { + err = ErrIOEOF + return + } + + v = uint32(b.buf[b.seek]) | + uint32(b.buf[b.seek+1])<<8 | + uint32(b.buf[b.seek+2])<<16 | + uint32(b.buf[b.seek+3])<<24 + b.seek += 4 + return +} + +// WriteU64 used to write uint64. +func (b *Buffer) WriteU64(v uint64) { + b.extend(8) + b.buf[b.pos] = byte(v) + b.buf[b.pos+1] = byte(v >> 8) + b.buf[b.pos+2] = byte(v >> 16) + b.buf[b.pos+3] = byte(v >> 24) + b.buf[b.pos+4] = byte(v >> 32) + b.buf[b.pos+5] = byte(v >> 40) + b.buf[b.pos+6] = byte(v >> 48) + b.buf[b.pos+7] = byte(v >> 56) + b.pos += 8 +} + +// ReadU64 used to read uint64. +func (b *Buffer) ReadU64() (v uint64, err error) { + if (b.seek + 8) > b.pos { + err = ErrIOEOF + return + } + + v = uint64(b.buf[b.seek]) | + uint64(b.buf[b.seek+1])<<8 | + uint64(b.buf[b.seek+2])<<16 | + uint64(b.buf[b.seek+3])<<24 | + uint64(b.buf[b.seek+4])<<32 | + uint64(b.buf[b.seek+5])<<40 | + uint64(b.buf[b.seek+6])<<48 | + uint64(b.buf[b.seek+7])<<56 + b.seek += 8 + return +} + +// WriteLenEncode used to write variable length. +// https://dev.mysql.com/doc/internals/en/integer.html#length-encoded-integer +func (b *Buffer) WriteLenEncode(v uint64) { + switch { + case v < 251: + b.WriteU8(uint8(v)) + + case v >= 251 && v < (1<<16): + b.WriteU8(0xfc) + b.WriteU16(uint16(v)) + + case v >= (1<<16) && v < (1<<24): + b.WriteU8(0xfd) + b.WriteU24(uint32(v)) + + default: + b.WriteU8(0xfe) + b.WriteU64(v) + } +} + +// WriteLenEncodeNUL used to write NUL> +// 0xfb is represents a NULL in a ProtocolText::ResultsetRow +func (b *Buffer) WriteLenEncodeNUL() { + b.WriteU8(0xfb) +} + +// ReadLenEncode used to read variable length. +func (b *Buffer) ReadLenEncode() (v uint64, err error) { + var u8 uint8 + var u16 uint16 + var u24 uint32 + + if u8, err = b.ReadU8(); err != nil { + return + } + + switch u8 { + case 0xfb: + // nil value + // we set the length to maxuint64. + v = ^uint64(0) + return + + case 0xfc: + if u16, err = b.ReadU16(); err != nil { + return + } + v = uint64(u16) + return + + case 0xfd: + if u24, err = b.ReadU24(); err != nil { + return + } + v = uint64(u24) + return + + case 0xfe: + if v, err = b.ReadU64(); err != nil { + return + } + return + + default: + return uint64(u8), nil + } +} + +// WriteLenEncodeString used to write variable string. +func (b *Buffer) WriteLenEncodeString(s string) { + l := len(s) + b.WriteLenEncode(uint64(l)) + b.WriteString(s) +} + +// ReadLenEncodeString used to read variable string. +func (b *Buffer) ReadLenEncodeString() (s string, err error) { + var l uint64 + + if l, err = b.ReadLenEncode(); err != nil { + return + } + + if s, err = b.ReadString(int(l)); err != nil { + return + } + return +} + +// WriteLenEncodeBytes used to write variable bytes. +func (b *Buffer) WriteLenEncodeBytes(v []byte) { + l := len(v) + b.WriteLenEncode(uint64(l)) + b.WriteBytes(v) +} + +// ReadLenEncodeBytes used to read variable bytes. +func (b *Buffer) ReadLenEncodeBytes() (v []byte, err error) { + var l uint64 + + if l, err = b.ReadLenEncode(); err != nil { + return + } + + // nil value. + if l == ^uint64(0) { + return + } + + if l == 0 { + return []byte{}, nil + } + if v, err = b.ReadBytes(int(l)); err != nil { + return + } + return +} + +// WriteEOF used to write EOF. +func (b *Buffer) WriteEOF(n int) { + b.extend(n) + for i := 0; i < n; i++ { + b.buf[b.pos] = 0xfe + b.pos++ + } +} + +// ReadEOF used to read EOF. +func (b *Buffer) ReadEOF(n int) (err error) { + return b.ReadZero(n) +} + +// WriteZero used to write zero. +func (b *Buffer) WriteZero(n int) { + b.extend(n) + for i := 0; i < n; i++ { + b.buf[b.pos] = 0 + b.pos++ + } +} + +// ReadZero used to read zero. +func (b *Buffer) ReadZero(n int) (err error) { + if (b.seek + n) > b.pos { + err = ErrIOEOF + return + } + b.seek += n + return +} + +// WriteString used to write string. +func (b *Buffer) WriteString(s string) { + n := len(s) + b.extend(n) + copy(b.buf[b.pos:], s) + b.pos += n +} + +// ReadString used to read string. +func (b *Buffer) ReadString(n int) (s string, err error) { + if (b.seek + n) > b.pos { + err = ErrIOEOF + return + } + + s = string(b.buf[b.seek:(b.seek + n)]) + b.seek += n + return +} + +// ReadStringNUL reads until the first NUL in the buffer +// returning a string containing the data up to and not including the NUL +func (b *Buffer) ReadStringNUL() (s string, err error) { + var v []byte + + if v, err = b.readBytesWithToken(0x00); err != nil { + return + } + s = string(v) + return +} + +// ReadStringEOF reads until the first EOF in the buffer +// returning a string containing the data up to and not including the EOF +func (b *Buffer) ReadStringEOF() (s string, err error) { + var v []byte + + if v, err = b.readBytesWithToken(0xfe); err != nil { + return + } + s = string(v) + return +} + +// ReadBytesNUL reads until the first NUL in the buffer +// returning a byte slice containing the data up to and not including the NUL +func (b *Buffer) ReadBytesNUL() (v []byte, err error) { + return b.readBytesWithToken(0x00) +} + +// ReadBytesEOF reads until the first EOF in the buffer +// returning a byte slice containing the data up to and not including the EOF +func (b *Buffer) ReadBytesEOF() (v []byte, err error) { + return b.readBytesWithToken(0xfe) +} + +func (b *Buffer) readBytesWithToken(token uint8) (v []byte, err error) { + i := bytes.IndexByte(b.buf[b.seek:], token) + end := b.seek + i + 1 + if i < 0 { + b.seek = len(b.buf) + err = ErrIOEOF + return + } + v = b.buf[b.seek : end-1] + b.seek = end + return +} + +// WriteBytes used to write bytes. +func (b *Buffer) WriteBytes(bs []byte) { + n := len(bs) + b.extend(n) + copy(b.buf[b.pos:], bs) + b.pos += n +} + +// ReadBytes used to read bytes. +func (b *Buffer) ReadBytes(n int) (v []byte, err error) { + if n == 0 { + return nil, nil + } + + if (b.seek + n) > b.pos { + err = ErrIOEOF + return + } + + v = b.buf[b.seek:(b.seek + n)] + b.seek += n + return +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/common/buffer_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/common/buffer_test.go new file mode 100644 index 00000000..4717884a --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/common/buffer_test.go @@ -0,0 +1,503 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package common + +import ( + "github.com/stretchr/testify/assert" + "io" + "testing" +) + +func TestBuffer(t *testing.T) { + writer := NewBuffer(6) + writer.WriteU32(22222232) + writer.WriteU32(31) + writer.WriteU32(30) + writer.WriteU8(208) + writer.WriteU16(65535) + writer.WriteBytes([]byte{1, 2, 3, 4, 5}) + writer.WriteZero(3) + writer.WriteString("abc") + writer.WriteEOF(1) + writer.WriteString("xyz") + writer.WriteEOF(2) + writer.WriteU24(1024) + + { + want := (4 + 4 + 4 + 1 + 2 + 5 + 3 + 3 + 1 + 3 + 2 + 3) + got := writer.Length() + assert.Equal(t, want, got) + } + + { + want := uint32(22222232) + got, _ := writer.ReadU32() + assert.Equal(t, want, got) + } + + { + want := uint32(31) + got, _ := writer.ReadU32() + assert.Equal(t, want, got) + } + + { + want := uint32(30) + got, _ := writer.ReadU32() + assert.Equal(t, want, got) + } + + { + want := uint8(208) + got, _ := writer.ReadU8() + assert.Equal(t, want, got) + } + + { + want := uint16(65535) + got, _ := writer.ReadU16() + assert.Equal(t, want, got) + } + + { + want := []byte{1, 2, 3, 4, 5} + got, _ := writer.ReadBytes(5) + assert.Equal(t, want, got) + } + + { + writer.ReadZero(3) + } + + { + want := "abc" + got, _ := writer.ReadString(3) + assert.Equal(t, want, got) + } + + { + writer.ReadEOF(1) + } + + { + want := "xyz" + got, _ := writer.ReadStringEOF() + assert.Equal(t, want, got) + } + + { + writer.ReadEOF(1) + } + + { + want := uint32(1024) + got, _ := writer.ReadU24() + assert.Equal(t, want, got) + } + + { + want := writer.Length() + got := writer.Seek() + assert.Equal(t, want, got) + } + +} + +func TestBufferDatas(t *testing.T) { + writer := NewBuffer(100) + writer.WriteU32(22222232) + writer.WriteString("abc") + writer.WriteZero(2) + + { + want := len(writer.Datas()) + got := writer.Length() + assert.Equal(t, want, got) + } + + { + want := []byte{152, 21, 83, 1, 97, 98, 99, 0, 0} + got := writer.Datas() + assert.Equal(t, want, got) + } +} + +func TestBufferRead(t *testing.T) { + data := []byte{152, 21, 83, 1, 97, 98, 99, 0, 0} + writer := ReadBuffer(data) + { + want := uint32(22222232) + got, _ := writer.ReadU32() + assert.Equal(t, want, got) + } + + { + want := "abc" + got, _ := writer.ReadString(3) + assert.Equal(t, want, got) + } +} + +func TestBufferReadError(t *testing.T) { + { + data := []byte{152} + writer := ReadBuffer(data) + _, err := writer.ReadU8() + assert.Nil(t, err) + } + + { + data := []byte{152} + writer := ReadBuffer(data) + want := io.EOF + _, got := writer.ReadU16() + assert.Equal(t, want.Error(), got.Error()) + } + + { + data := []byte{152, 154} + writer := ReadBuffer(data) + want := io.EOF + _, got := writer.ReadU24() + assert.Equal(t, want.Error(), got.Error()) + } + + { + data := []byte{152, 154, 155} + writer := ReadBuffer(data) + want := io.EOF + _, got := writer.ReadU32() + assert.Equal(t, want.Error(), got.Error()) + } + + { + data := []byte{152, 154, 155} + writer := ReadBuffer(data) + want := io.EOF + got := writer.ReadZero(4) + assert.Equal(t, want.Error(), got.Error()) + } + + { + data := []byte{152, 154, 155} + writer := ReadBuffer(data) + want := io.EOF + _, got := writer.ReadString(4) + assert.Equal(t, want.Error(), got.Error()) + } + + { + data := []byte{152, 154, 155} + writer := ReadBuffer(data) + want := io.EOF + _, got := writer.ReadStringNUL() + assert.Equal(t, want.Error(), got.Error()) + } + + { + data := []byte{152, 154, 155} + writer := ReadBuffer(data) + want := io.EOF + _, got := writer.ReadBytes(4) + assert.Equal(t, want.Error(), got.Error()) + } +} + +func TestBufferReadString(t *testing.T) { + data := []byte{ + 0x98, 0x15, 0x53, 0x01, 0x61, 0x62, 0x63, 0xff, + 0xff, 0x61, 0x62, 0x63, 0x00, 0x00, 0xff, 0xff} + writer := ReadBuffer(data) + + { + want := 0 + got := writer.seek + assert.Equal(t, want, got) + } + + { + want := 16 + got := writer.pos + assert.Equal(t, want, got) + } + + { + want := 16 + got := writer.pos + assert.Equal(t, want, got) + } + + { + want := uint32(22222232) + got, _ := writer.ReadU32() + assert.Equal(t, want, got) + } + + { + want := "abc" + got, _ := writer.ReadString(3) + assert.Equal(t, want, got) + } + + { + want := uint16(65535) + got, _ := writer.ReadU16() + assert.Equal(t, want, got) + } + + { + want := "abc" + got, _ := writer.ReadStringNUL() + assert.Equal(t, want, got) + } + + { + want := 13 + got := writer.seek + assert.Equal(t, want, got) + writer.ReadZero(1) + } + + // here, we inject a ReadStringWithNUL + // we never got it since here is ReadU16() + { + + want := "EOF" + _, err := writer.ReadStringNUL() + got := err.Error() + assert.Equal(t, want, got) + } + + { + want := 16 + got := writer.seek + assert.Equal(t, want, got) + } +} + +func TestBufferLenEncode(t *testing.T) { + writer := NewBuffer(6) + + { + v := uint64(250) + writer.WriteLenEncode(v) + } + + { + v := uint64(252) + writer.WriteLenEncode(v) + } + + { + v := uint64(1 << 16) + writer.WriteLenEncode(v) + } + + { + writer.WriteLenEncodeNUL() + } + + { + v := uint64(1 << 24) + writer.WriteLenEncode(v) + } + + { + v := uint64(1<<24 + 1) + writer.WriteLenEncode(v) + } + + { + v := uint64(0) + writer.WriteLenEncode(v) + } + + read := ReadBuffer(writer.Datas()) + + { + v, err := read.ReadLenEncode() + assert.Nil(t, err) + assert.Equal(t, v, uint64(250)) + } + + { + v, err := read.ReadLenEncode() + assert.Nil(t, err) + assert.Equal(t, v, uint64(252)) + } + + { + v, err := read.ReadLenEncode() + assert.Nil(t, err) + assert.Equal(t, v, uint64(1<<16)) + } + + { + v, err := read.ReadLenEncode() + assert.Nil(t, err) + assert.Equal(t, v, ^uint64(0)) + } + + { + v, err := read.ReadLenEncode() + assert.Nil(t, err) + assert.Equal(t, v, uint64(1<<24)) + } + + { + v, err := read.ReadLenEncode() + assert.Nil(t, err) + assert.Equal(t, v, uint64(1<<24+1)) + } + { + v, err := read.ReadLenEncode() + assert.Nil(t, err) + assert.Equal(t, v, uint64(0)) + } + +} + +func TestBufferLenEncodeString(t *testing.T) { + writer := NewBuffer(6) + reader := NewBuffer(6) + + s1 := "BohuTANG" + b1 := []byte{0x01, 0x02} + b11 := []byte{} + b12 := []byte(nil) + { + v := uint64(len(s1)) + writer.WriteLenEncode(v) + writer.WriteString(s1) + writer.WriteLenEncodeString(s1) + writer.WriteLenEncodeBytes(b1) + writer.WriteLenEncodeNUL() + writer.WriteLenEncodeBytes(b11) + reader.Reset(writer.Datas()) + } + + { + got, err := reader.ReadLenEncodeString() + assert.Nil(t, err) + assert.Equal(t, s1, got) + } + + { + got, err := reader.ReadLenEncodeString() + assert.Nil(t, err) + assert.Equal(t, s1, got) + } + + { + got, err := reader.ReadLenEncodeBytes() + assert.Nil(t, err) + assert.Equal(t, b1, got) + } + + { + got, err := reader.ReadLenEncodeBytes() + assert.Nil(t, err) + assert.Equal(t, b12, got) + } + + { + got, err := reader.ReadLenEncodeBytes() + assert.Nil(t, err) + assert.Equal(t, b11, got) + } +} + +func TestBufferNULEOF(t *testing.T) { + writer := NewBuffer(16) + data1 := "BohuTANG" + data2 := "radon" + + { + writer.WriteString(data1) + writer.WriteZero(1) + } + + { + writer.WriteString(data2) + writer.WriteZero(1) + } + + { + writer.WriteString(data1) + writer.WriteEOF(1) + } + + { + writer.WriteString(data2) + writer.WriteEOF(1) + } + + reader := ReadBuffer(writer.Datas()) + { + got, _ := reader.ReadStringNUL() + assert.Equal(t, data1, got) + } + + { + got, _ := reader.ReadBytesNUL() + assert.Equal(t, StringToBytes(data2), got) + } + + { + got, _ := reader.ReadStringEOF() + assert.Equal(t, data1, got) + } + + { + got, _ := reader.ReadBytesEOF() + assert.Equal(t, StringToBytes(data2), got) + } +} + +func TestBufferReset(t *testing.T) { + writer := NewBuffer(6) + writer.WriteU32(31) + writer.WriteU32(30) + + { + want := uint32(31) + got, _ := writer.ReadU32() + assert.Equal(t, want, got) + assert.Equal(t, writer.seek, 4) + } + + { + data := []byte{0x00, 0x00, 0x00, 0x01} + writer.Reset(data) + assert.Equal(t, writer.pos, 4) + assert.Equal(t, writer.seek, 0) + } +} + +func TestBufferNUL(t *testing.T) { + writer := NewBuffer(6) + + { + writer.WriteLenEncodeNUL() + got, _ := writer.ReadLenEncodeBytes() + assert.Nil(t, got) + } +} + +func TestWriteBytesNil(t *testing.T) { + writer := NewBuffer(6) + + { + writer.WriteBytes(nil) + reader := ReadBuffer(writer.Datas()) + got, _ := reader.ReadBytes(0) + assert.Nil(t, got) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/common/unsafe.go b/src/vendor/github.com/xelabs/go-mysqlstack/common/unsafe.go new file mode 100644 index 00000000..e6b51f2f --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/common/unsafe.go @@ -0,0 +1,39 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package common + +import ( + "reflect" + "unsafe" +) + +// BytesToString casts slice to string without copy +func BytesToString(b []byte) (s string) { + if len(b) == 0 { + return "" + } + + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + sh := reflect.StringHeader{Data: bh.Data, Len: bh.Len} + + return *(*string)(unsafe.Pointer(&sh)) +} + +// StringToBytes casts string to slice without copy +func StringToBytes(s string) []byte { + if len(s) == 0 { + return []byte{} + } + + sh := (*reflect.StringHeader)(unsafe.Pointer(&s)) + bh := reflect.SliceHeader{Data: sh.Data, Len: sh.Len, Cap: sh.Len} + + return *(*[]byte)(unsafe.Pointer(&bh)) +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/common/unsafe_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/common/unsafe_test.go new file mode 100644 index 00000000..7f1d5b78 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/common/unsafe_test.go @@ -0,0 +1,53 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package common + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestBytesToString(t *testing.T) { + { + bs := []byte{0x61, 0x62} + want := "ab" + got := BytesToString(bs) + assert.Equal(t, want, got) + } + + { + bs := []byte{} + want := "" + got := BytesToString(bs) + assert.Equal(t, want, got) + } +} + +func TestSting(t *testing.T) { + { + want := []byte{0x61, 0x62} + got := StringToBytes("ab") + assert.Equal(t, want, got) + } + + { + want := []byte{} + got := StringToBytes("") + assert.Equal(t, want, got) + } +} + +func TestStingToBytes(t *testing.T) { + { + want := []byte{0x53, 0x45, 0x4c, 0x45, 0x43, 0x54, 0x20, 0x2a, 0x20, 0x46, 0x52, 0x4f, 0x4d, 0x20, 0x74, 0x32} + got := StringToBytes("SELECT * FROM t2") + assert.Equal(t, want, got) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/driver/client.go b/src/vendor/github.com/xelabs/go-mysqlstack/driver/client.go new file mode 100644 index 00000000..b6c592b7 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/driver/client.go @@ -0,0 +1,365 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package driver + +import ( + "context" + "net" + "strings" + "time" + + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/packet" + "github.com/xelabs/go-mysqlstack/proto" + "github.com/xelabs/go-mysqlstack/sqldb" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +var _ Conn = &conn{} + +// Conn interface. +type Conn interface { + Ping() error + Quit() + Close() error + Closed() bool + Cleanup() + NextPacket() ([]byte, error) + + // ConnectionID is the connection id at greeting. + ConnectionID() uint32 + + InitDB(db string) error + Command(command byte) error + // Query get the row cursor. + Query(sql string) (Rows, error) + Exec(sql string) error + + // FetchAll fetchs all results. + FetchAll(sql string, maxrows int) (*sqltypes.Result, error) + + // FetchAllWithFunc fetchs all results but the row cursor can be interrupted by the fn. + FetchAllWithFunc(sql string, maxrows int, fn Func) (*sqltypes.Result, error) +} + +type conn struct { + netConn net.Conn + auth *proto.Auth + greeting *proto.Greeting + packets *packet.Packets +} + +func (c *conn) handleErrorPacket(data []byte) error { + if data[0] == proto.ERR_PACKET { + return c.packets.ParseERR(data) + } + return nil +} + +func (c *conn) handShake(username, password, database, charset string) error { + var err error + var data []byte + + //Parses the initial handshake from the server. + { + // greeting read + if data, err = c.packets.Next(); err != nil { + return err + } + + // check greeting packet + if err = c.handleErrorPacket(data); err != nil { + return err + } + + // unpack greeting packet + if err = c.greeting.UnPack(data); err != nil { + return err + } + + // check greating Capability + if c.greeting.Capability&sqldb.CLIENT_PROTOCOL_41 == 0 { + err = sqldb.NewSQLError(sqldb.CR_VERSION_ERROR, "cannot connect to servers earlier than 4.1") + return err + } + } + + { + cs, ok := sqldb.CharacterSetMap[strings.ToLower(charset)] + if !ok { + cs = sqldb.CharacterSetUtf8 + } + // auth pack + data := c.auth.Pack( + proto.DefaultClientCapability, + cs, + username, + password, + c.greeting.Salt, + database, + ) + + // auth write + if err = c.packets.Write(data); err != nil { + return err + } + + // clean the authreponse bytes to improve the gc pause. + c.auth.CleanAuthResponse() + } + + { + // read + if data, err = c.packets.Next(); err != nil { + return err + } + + if err = c.handleErrorPacket(data); err != nil { + return err + } + } + return nil +} + +// NewConn used to create a new client connection. +// The timeout is 30 seconds. +func NewConn(username, password, address, database, charset string) (Conn, error) { + var err error + c := &conn{} + timeout := time.Duration(30) * time.Second + if c.netConn, err = net.DialTimeout("tcp", address, timeout); err != nil { + return nil, err + } + + // Set KeepAlive to True and period to 180s. + if tcpConn, ok := c.netConn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(time.Second * 180) + c.netConn = tcpConn + } + + defer func() { + if err != nil { + c.Cleanup() + } + }() + // Set timeouts, make the handshake timeout if the underflying connection blocked. + // This timeout only used in handshake, we will disable(set zero time) it at last. + c.netConn.SetReadDeadline(time.Now().Add(timeout)) + defer c.netConn.SetReadDeadline(time.Time{}) + + c.auth = proto.NewAuth() + c.greeting = proto.NewGreeting(0) + c.packets = packet.NewPackets(c.netConn) + if err = c.handShake(username, password, database, charset); err != nil { + return nil, err + } + return c, nil +} + +func (c *conn) query(command byte, sql string) (Rows, error) { + var ok *proto.OK + var myerr, err error + var columns []*querypb.Field + var colNumber int + + // if err != nil means the connection is broken(packet error) + defer func() { + if err != nil { + c.Cleanup() + } + }() + + // Query. + if err = c.packets.WriteCommand(command, common.StringToBytes(sql)); err != nil { + return nil, err + } + + // Read column number. + ok, colNumber, myerr, err = c.packets.ReadComQueryResponse() + if err != nil { + return nil, err + } + if myerr != nil { + return nil, myerr + } + + if colNumber > 0 { + if columns, err = c.packets.ReadColumns(colNumber); err != nil { + return nil, err + } + + // Read EOF. + if (c.greeting.Capability & sqldb.CLIENT_DEPRECATE_EOF) == 0 { + if err = c.packets.ReadEOF(); err != nil { + return nil, err + } + } + } + rows := NewTextRows(c) + rows.rowsAffected = ok.AffectedRows + rows.insertID = ok.LastInsertID + rows.fields = columns + return rows, nil +} + +// ConnectionID is the connection id at greeting +func (c *conn) ConnectionID() uint32 { + return c.greeting.ConnectionID +} + +// Query execute the query and return the row iterator +func (c *conn) Query(sql string) (Rows, error) { + return c.query(sqldb.COM_QUERY, sql) +} + +func (c *conn) Ping() error { + rows, err := c.query(sqldb.COM_PING, "") + if err != nil { + return err + } + return rows.Close() +} + +func (c *conn) InitDB(db string) error { + rows, err := c.query(sqldb.COM_INIT_DB, db) + if err != nil { + return err + } + return rows.Close() +} + +// Exec executes the query and drain the results +func (c *conn) Exec(sql string) error { + rows, err := c.query(sqldb.COM_QUERY, sql) + if err != nil { + return err + } + + if err := rows.Close(); err != nil { + c.Cleanup() + } + return nil +} + +func (c *conn) FetchAll(sql string, maxrows int) (*sqltypes.Result, error) { + return c.FetchAllWithFunc(sql, maxrows, func(rows Rows) error { return nil }) +} + +// Func calls on every rows.Next. +// If func returns error, the row.Next() is interrupted and the error is return. +type Func func(rows Rows) error + +func (c *conn) FetchAllWithFunc(sql string, maxrows int, fn Func) (*sqltypes.Result, error) { + var err error + var iRows Rows + var qrRow []sqltypes.Value + var qrRows [][]sqltypes.Value + + if iRows, err = c.query(sqldb.COM_QUERY, sql); err != nil { + return nil, err + } + + for iRows.Next() { + // callback check. + if err = fn(iRows); err != nil { + break + } + + // Max rows check. + if len(qrRows) == maxrows { + break + } + if qrRow, err = iRows.RowValues(); err != nil { + c.Cleanup() + return nil, err + } + if qrRow != nil { + qrRows = append(qrRows, qrRow) + } + } + + // Drain the results and check last error. + if err := iRows.Close(); err != nil { + c.Cleanup() + return nil, err + } + + rowsAffected := iRows.RowsAffected() + if rowsAffected == 0 { + rowsAffected = uint64(len(qrRows)) + } + qr := &sqltypes.Result{ + Fields: iRows.Fields(), + RowsAffected: rowsAffected, + InsertID: iRows.LastInsertID(), + Rows: qrRows, + } + return qr, err +} + +// NextPacket used to get the next packet +func (c *conn) NextPacket() ([]byte, error) { + return c.packets.Next() +} + +func (c *conn) Command(command byte) error { + rows, err := c.query(command, "") + if err != nil { + return err + } + + if err := rows.Close(); err != nil { + c.Cleanup() + } + return nil +} + +func (c *conn) Quit() { + c.packets.WriteCommand(sqldb.COM_QUIT, nil) +} + +func (c *conn) Cleanup() { + if c.netConn != nil { + c.netConn.Close() + c.netConn = nil + } +} + +// Close closes the connection +func (c *conn) Close() error { + if c != nil && c.netConn != nil { + quitCh := make(chan struct{}) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5)*time.Second) + defer cancel() + + // First to send quit, if quit timeout force to do cleanup. + go func(c *conn) { + c.Quit() + close(quitCh) + }(c) + + select { + case <-ctx.Done(): + c.Cleanup() + close(quitCh) + case <-quitCh: + c.Cleanup() + } + } + return nil +} + +// Closed checks the connection broken or not +func (c *conn) Closed() bool { + return c.netConn == nil +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/driver/client_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/driver/client_test.go new file mode 100644 index 00000000..1622b084 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/driver/client_test.go @@ -0,0 +1,282 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package driver + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestClient(t *testing.T) { + result2 := &sqltypes.Result{ + RowsAffected: 123, + InsertID: 123456789, + } + + log := xlog.NewStdLog(xlog.Level(xlog.ERROR)) + th := NewTestHandler(log) + svr, err := MockMysqlServer(log, th) + assert.Nil(t, err) + defer svr.Close() + address := svr.Addr() + + // query + { + + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + defer client.Close() + + // connection ID + assert.Equal(t, uint32(1), client.ConnectionID()) + + th.AddQuery("SELECT2", result2) + rows, err := client.Query("SELECT2") + assert.Nil(t, err) + + assert.Equal(t, uint64(123), rows.RowsAffected()) + assert.Equal(t, uint64(123456789), rows.LastInsertID()) + } +} + +func TestClientClosed(t *testing.T) { + result2 := &sqltypes.Result{} + + log := xlog.NewStdLog(xlog.Level(xlog.ERROR)) + th := NewTestHandler(log) + svr, err := MockMysqlServer(log, th) + assert.Nil(t, err) + defer svr.Close() + address := svr.Addr() + + { + // create session 1 + client1, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + + th.AddQuery("SELECT2", result2) + r, err := client1.FetchAll("SELECT2", -1) + assert.Nil(t, err) + assert.Equal(t, result2, r) + + // kill session 1 + client2, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + _, err = client2.Query("KILL 1") + assert.Nil(t, err) + + // check client1 connection + err = client1.Ping() + assert.NotNil(t, err) + want := true + got := client1.Closed() + assert.Equal(t, want, got) + } +} + +func TestClientFetchAllWithFunc(t *testing.T) { + result1 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("10")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("nice name")), + }, + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("20")), + sqltypes.NULL, + }, + }, + } + + log := xlog.NewStdLog(xlog.Level(xlog.ERROR)) + th := NewTestHandler(log) + svr, err := MockMysqlServer(log, th) + assert.Nil(t, err) + defer svr.Close() + address := svr.Addr() + + // query + { + + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + defer client.Close() + + th.AddQuery("SELECT2", result1) + checkFunc := func(rows Rows) error { + if rows.Bytes() > 2 { + return errors.New("client.checkFunc.error") + } + return nil + } + _, err = client.FetchAllWithFunc("SELECT2", -1, checkFunc) + want := "client.checkFunc.error" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestClientStream(t *testing.T) { + want := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: make([][]sqltypes.Value, 0, 256)} + + for i := 0; i < 2017; i++ { + row := []sqltypes.Value{ + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("11")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("1nice name")), + } + want.Rows = append(want.Rows, row) + } + + log := xlog.NewStdLog(xlog.Level(xlog.DEBUG)) + th := NewTestHandler(log) + svr, err := MockMysqlServer(log, th) + assert.Nil(t, err) + defer svr.Close() + address := svr.Addr() + + // query + { + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + defer client.Close() + + th.AddQueryStream("SELECT2", want) + rows, err := client.Query("SELECT2") + assert.Nil(t, err) + + got := &sqltypes.Result{ + Fields: rows.Fields(), + Rows: make([][]sqltypes.Value, 0, 256)} + + for rows.Next() { + row, err := rows.RowValues() + assert.Nil(t, err) + got.Rows = append(got.Rows, row) + } + assert.Equal(t, want, got) + } +} + +func TestMock(t *testing.T) { + result1 := &sqltypes.Result{ + RowsAffected: 123, + InsertID: 123456789, + } + result2 := &sqltypes.Result{ + RowsAffected: 123, + InsertID: 123456789, + } + + log := xlog.NewStdLog(xlog.Level(xlog.DEBUG)) + th := NewTestHandler(log) + svr, err := MockMysqlServer(log, th) + assert.Nil(t, err) + defer svr.Close() + address := svr.Addr() + + { + th.AddQuery("SELECT2", result2) + + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + defer client.Close() + + // connection ID + assert.Equal(t, uint32(1), client.ConnectionID()) + + rows, err := client.Query("SELECT2") + assert.Nil(t, err) + + assert.Equal(t, uint64(123), rows.RowsAffected()) + assert.Equal(t, uint64(123456789), rows.LastInsertID()) + } + + { + th.AddQueryPattern("SELECT3 .*", result2) + + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + defer client.Close() + + _, err = client.Query("SELECT3 * from t1") + assert.Nil(t, err) + } + + { + th.AddQueryErrorPattern("SELECT4 .*", errors.New("select4.mock.error")) + + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + defer client.Close() + + _, err = client.Query("SELECT4 * from t1") + assert.NotNil(t, err) + } + + { + th.AddQueryDelay("SELECT5", result2, 10) + + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + defer client.Close() + + _, err = client.Query("SELECT5") + assert.Nil(t, err) + } + + { + th.AddQuerys("s6", result1, result2) + + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + defer client.Close() + + _, err = client.Query("s6") + assert.Nil(t, err) + } + + // Query num. + { + got := th.GetQueryCalledNum("SELECT2") + want := 1 + assert.Equal(t, want, got) + } + + th.ResetPatternErrors() + th.ResetErrors() + th.ResetAll() +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/driver/mock.go b/src/vendor/github.com/xelabs/go-mysqlstack/driver/mock.go new file mode 100644 index 00000000..26a25b77 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/driver/mock.go @@ -0,0 +1,399 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package driver + +import ( + "fmt" + "math/rand" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/xelabs/go-mysqlstack/sqldb" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func randomPort(min int, max int) int { + rand := rand.New(rand.NewSource(time.Now().UnixNano())) + d, delta := min, (max - min) + if delta > 0 { + d += rand.Intn(int(delta)) + } + return d +} + +type exprResult struct { + expr *regexp.Regexp + result *sqltypes.Result + err error +} + +// CondType used for Condition type. +type CondType int + +const ( + // COND_NORMAL enum. + COND_NORMAL CondType = iota + // COND_DELAY enum. + COND_DELAY + // COND_ERROR enum. + COND_ERROR + // COND_PANIC enum. + COND_PANIC + // COND_STREAM enum. + COND_STREAM +) + +// Cond presents a condition tuple. +type Cond struct { + // Cond type. + Type CondType + + // Query string + Query string + + // Query results + Result *sqltypes.Result + + // Panic or Not + Panic bool + + // Return Error if Error is not nil + Error error + + // Delay(ms) for results return + Delay int +} + +// CondList presents a list of Cond. +type CondList struct { + len int + idx int + conds []Cond +} + +// SessionTuple presents a session tuple. +type SessionTuple struct { + session *Session + closed bool + killed chan bool +} + +// TestHandler is the handler for testing. +type TestHandler struct { + log *xlog.Log + mu sync.RWMutex + conds map[string]*Cond + condList map[string]*CondList + ss map[uint32]*SessionTuple + + // patterns is a list of regexp to results. + patterns []exprResult + patternErrors []exprResult + + // How many times a query was called. + queryCalled map[string]int +} + +// NewTestHandler creates new Handler. +func NewTestHandler(log *xlog.Log) *TestHandler { + return &TestHandler{ + log: log, + ss: make(map[uint32]*SessionTuple), + conds: make(map[string]*Cond), + queryCalled: make(map[string]int), + condList: make(map[string]*CondList), + } +} + +func (th *TestHandler) setCond(cond *Cond) { + th.mu.Lock() + defer th.mu.Unlock() + th.conds[strings.ToLower(cond.Query)] = cond + th.queryCalled[strings.ToLower(cond.Query)] = 0 +} + +// ResetAll resets all querys. +func (th *TestHandler) ResetAll() { + th.mu.Lock() + defer th.mu.Unlock() + for k := range th.conds { + delete(th.conds, k) + } + th.patterns = make([]exprResult, 0, 4) + th.patternErrors = make([]exprResult, 0, 4) +} + +// ResetPatternErrors used to reset all the errors pattern. +func (th *TestHandler) ResetPatternErrors() { + th.patternErrors = make([]exprResult, 0, 4) +} + +// ResetErrors used to reset all the errors. +func (th *TestHandler) ResetErrors() { + for k, v := range th.conds { + if v.Type == COND_ERROR { + delete(th.conds, k) + } + } +} + +// SessionCheck implements the interface. +func (th *TestHandler) SessionCheck(s *Session) error { + //th.log.Debug("[%s].coming.db[%s].salt[%v].scramble[%v]", s.Addr(), s.Schema(), s.Salt(), s.Scramble()) + return nil +} + +// AuthCheck implements the interface. +func (th *TestHandler) AuthCheck(s *Session) error { + user := s.User() + if user != "mock" { + return sqldb.NewSQLError(sqldb.ER_ACCESS_DENIED_ERROR, "Access denied for user '%v'", user) + } + return nil +} + +// NewSession implements the interface. +func (th *TestHandler) NewSession(s *Session) { + th.mu.Lock() + defer th.mu.Unlock() + st := &SessionTuple{ + session: s, + killed: make(chan bool, 2), + } + th.ss[s.ID()] = st +} + +// SessionClosed implements the interface. +func (th *TestHandler) SessionClosed(s *Session) { + th.mu.Lock() + defer th.mu.Unlock() + delete(th.ss, s.ID()) +} + +// ComInitDB implements the interface. +func (th *TestHandler) ComInitDB(s *Session, db string) error { + if strings.HasPrefix(db, "xx") { + return fmt.Errorf("mock.cominit.db.error: unkonw database[%s]", db) + } + return nil +} + +// ComQuery implements the interface. +func (th *TestHandler) ComQuery(s *Session, query string, callback func(qr *sqltypes.Result) error) error { + log := th.log + query = strings.ToLower(query) + + th.mu.Lock() + th.queryCalled[query]++ + cond := th.conds[query] + sessTuple := th.ss[s.ID()] + th.mu.Unlock() + + if cond != nil { + switch cond.Type { + case COND_DELAY: + log.Debug("test.handler.delay:%s,time:%dms", query, cond.Delay) + select { + case <-sessTuple.killed: + sessTuple.closed = true + return fmt.Errorf("mock.session[%v].query[%s].was.killed", s.ID(), query) + case <-time.After(time.Millisecond * time.Duration(cond.Delay)): + log.Debug("mock.handler.delay.done...") + } + callback(cond.Result) + return nil + case COND_ERROR: + return cond.Error + case COND_PANIC: + log.Panic("mock.handler.panic....") + case COND_NORMAL: + callback(cond.Result) + return nil + case COND_STREAM: + flds := cond.Result.Fields + // Send Fields for stream. + qr := &sqltypes.Result{Fields: flds, State: sqltypes.RStateFields} + if err := callback(qr); err != nil { + return fmt.Errorf("mock.handler.send.stream.error:%+v", err) + } + + // Send Row by row for stream. + for _, row := range cond.Result.Rows { + qr := &sqltypes.Result{Fields: flds, State: sqltypes.RStateRows} + qr.Rows = append(qr.Rows, row) + if err := callback(qr); err != nil { + return fmt.Errorf("mock.handler.send.stream.error:%+v", err) + } + } + + // Send EOF for stream. + qr = &sqltypes.Result{Fields: flds, State: sqltypes.RStateFinished} + if err := callback(qr); err != nil { + return fmt.Errorf("mock.handler.send.stream.error:%+v", err) + } + return nil + } + } + + // kill filter. + if strings.HasPrefix(query, "kill") { + if id, err := strconv.ParseUint(strings.Split(query, " ")[1], 10, 32); err == nil { + th.mu.Lock() + if sessTuple, ok := th.ss[uint32(id)]; ok { + log.Debug("mock.session[%v].to.kill.the.session[%v]...", s.ID(), id) + if !sessTuple.closed { + sessTuple.killed <- true + } + delete(th.ss, uint32(id)) + sessTuple.session.Close() + } + th.mu.Unlock() + } + callback(&sqltypes.Result{}) + return nil + } + + th.mu.Lock() + defer th.mu.Unlock() + // Check query patterns from AddQueryPattern(). + for _, pat := range th.patternErrors { + if pat.expr.MatchString(query) { + return pat.err + } + } + for _, pat := range th.patterns { + if pat.expr.MatchString(query) { + callback(pat.result) + return nil + } + } + + if v, ok := th.condList[query]; ok { + idx := 0 + if v.idx >= v.len { + v.idx = 0 + } else { + idx = v.idx + v.idx++ + } + callback(v.conds[idx].Result) + return nil + } + return fmt.Errorf("mock.handler.query[%v].error[can.not.found.the.cond.please.set.first]", query) +} + +// AddQuery used to add a query and its expected result. +func (th *TestHandler) AddQuery(query string, result *sqltypes.Result) { + th.setCond(&Cond{Type: COND_NORMAL, Query: query, Result: result}) +} + +// AddQuerys used to add new query rule. +func (th *TestHandler) AddQuerys(query string, results ...*sqltypes.Result) { + cl := &CondList{} + for _, r := range results { + cond := Cond{Type: COND_NORMAL, Query: query, Result: r} + cl.conds = append(cl.conds, cond) + cl.len++ + } + th.condList[query] = cl +} + +// AddQueryDelay used to add a query and returns the expected result after delay_ms. +func (th *TestHandler) AddQueryDelay(query string, result *sqltypes.Result, delayMs int) { + th.setCond(&Cond{Type: COND_DELAY, Query: query, Result: result, Delay: delayMs}) +} + +// AddQueryStream used to add a stream query. +func (th *TestHandler) AddQueryStream(query string, result *sqltypes.Result) { + th.setCond(&Cond{Type: COND_STREAM, Query: query, Result: result}) +} + +// AddQueryError used to add a query which will be rejected by a error. +func (th *TestHandler) AddQueryError(query string, err error) { + th.setCond(&Cond{Type: COND_ERROR, Query: query, Error: err}) +} + +// AddQueryPanic used to add query but underflying blackhearted. +func (th *TestHandler) AddQueryPanic(query string) { + th.setCond(&Cond{Type: COND_PANIC, Query: query}) +} + +// AddQueryPattern adds an expected result for a set of queries. +// These patterns are checked if no exact matches from AddQuery() are found. +// This function forces the addition of begin/end anchors (^$) and turns on +// case-insensitive matching mode. +// This code was derived from https://github.com/youtube/vitess. +func (th *TestHandler) AddQueryPattern(queryPattern string, expectedResult *sqltypes.Result) { + if len(expectedResult.Rows) > 0 && len(expectedResult.Fields) == 0 { + panic(fmt.Errorf("Please add Fields to this Result so it's valid: %v", queryPattern)) + } + expr := regexp.MustCompile("(?is)^" + queryPattern + "$") + result := *expectedResult + th.mu.Lock() + defer th.mu.Unlock() + th.patterns = append(th.patterns, exprResult{expr, &result, nil}) +} + +// AddQueryErrorPattern used to add an query pattern with errors. +func (th *TestHandler) AddQueryErrorPattern(queryPattern string, err error) { + expr := regexp.MustCompile("(?is)^" + queryPattern + "$") + th.mu.Lock() + defer th.mu.Unlock() + th.patternErrors = append(th.patternErrors, exprResult{expr, nil, err}) +} + +// GetQueryCalledNum returns how many times db executes a certain query. +// This code was derived from https://github.com/youtube/vitess. +func (th *TestHandler) GetQueryCalledNum(query string) int { + th.mu.Lock() + defer th.mu.Unlock() + num, ok := th.queryCalled[strings.ToLower(query)] + if !ok { + return 0 + } + return num +} + +// MockMysqlServer creates a new mock mysql server. +func MockMysqlServer(log *xlog.Log, h Handler) (svr *Listener, err error) { + port := randomPort(10000, 20000) + return mockMysqlServer(log, port, h) +} + +// MockMysqlServerWithPort creates a new mock mysql server with port. +func MockMysqlServerWithPort(log *xlog.Log, port int, h Handler) (svr *Listener, err error) { + return mockMysqlServer(log, port, h) +} + +func mockMysqlServer(log *xlog.Log, port int, h Handler) (svr *Listener, err error) { + addr := fmt.Sprintf(":%d", port) + for i := 0; i < 5; i++ { + if svr, err = NewListener(log, addr, h); err != nil { + port = randomPort(5000, 20000) + addr = fmt.Sprintf("127.0.0.1:%d", port) + } else { + break + } + } + if err != nil { + return nil, err + } + + go func() { + svr.Accept() + }() + time.Sleep(100 * time.Millisecond) + log.Debug("mock.server[%v].start...", addr) + return +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/driver/rows.go b/src/vendor/github.com/xelabs/go-mysqlstack/driver/rows.go new file mode 100644 index 00000000..707ab229 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/driver/rows.go @@ -0,0 +1,165 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package driver + +import ( + "errors" + + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/proto" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +var _ Rows = &TextRows{} + +// Rows presents row cursor interface. +type Rows interface { + Next() bool + Close() error + Datas() []byte + Bytes() int + RowsAffected() uint64 + LastInsertID() uint64 + LastError() error + Fields() []*querypb.Field + RowValues() ([]sqltypes.Value, error) +} + +// TextRows presents row tuple. +type TextRows struct { + c Conn + end bool + err error + data []byte + bytes int + rowsAffected uint64 + insertID uint64 + buffer *common.Buffer + fields []*querypb.Field +} + +// NewTextRows creates TextRows. +func NewTextRows(c Conn) *TextRows { + return &TextRows{ + c: c, + buffer: common.NewBuffer(8), + } +} + +// Next implements the Rows interface. +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow +func (r *TextRows) Next() bool { + defer func() { + if r.err != nil { + r.c.Cleanup() + } + }() + + if r.end { + return false + } + + // if fields count is 0 + // the packet is OK-Packet without Resultset. + if len(r.fields) == 0 { + r.end = true + return false + } + + if r.data, r.err = r.c.NextPacket(); r.err != nil { + r.end = true + return false + } + + switch r.data[0] { + case proto.EOF_PACKET: + // This packet may be one of two kinds: + // - an EOF packet, + // - an OK packet with an EOF header if + // sqldb.CLIENT_DEPRECATE_EOF is set. + r.end = true + return false + + case proto.ERR_PACKET: + r.err = proto.UnPackERR(r.data) + r.end = true + return false + } + r.buffer.Reset(r.data) + return true +} + +// Close drain the rest packets and check the error. +func (r *TextRows) Close() error { + for r.Next() { + } + return r.LastError() +} + +// RowValues implements the Rows interface. +// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow +func (r *TextRows) RowValues() ([]sqltypes.Value, error) { + if r.fields == nil { + return nil, errors.New("rows.fields is NIL") + } + + empty := true + colNumber := len(r.fields) + result := make([]sqltypes.Value, colNumber) + for i := 0; i < colNumber; i++ { + v, err := r.buffer.ReadLenEncodeBytes() + if err != nil { + r.c.Cleanup() + return nil, err + } + + if v != nil { + r.bytes += len(v) + result[i] = sqltypes.MakeTrusted(r.fields[i].Type, v) + empty = false + } + } + if empty { + return nil, nil + } + return result, nil +} + +// Datas implements the Rows interface. +func (r *TextRows) Datas() []byte { + return r.buffer.Datas() +} + +// Fields implements the Rows interface. +func (r *TextRows) Fields() []*querypb.Field { + return r.fields +} + +// Bytes returns all the memory usage which read by this row cursor. +func (r *TextRows) Bytes() int { + return r.bytes +} + +// RowsAffected implements the Rows interface. +func (r *TextRows) RowsAffected() uint64 { + return r.rowsAffected +} + +// LastInsertID implements the Rows interface. +func (r *TextRows) LastInsertID() uint64 { + return r.insertID +} + +// LastError implements the Rows interface. +func (r *TextRows) LastError() error { + return r.err +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/driver/rows_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/driver/rows_test.go new file mode 100644 index 00000000..9f0c8918 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/driver/rows_test.go @@ -0,0 +1,90 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package driver + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestRows(t *testing.T) { + result1 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("10")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("nice name")), + }, + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("20")), + sqltypes.NULL, + }, + }, + } + result2 := &sqltypes.Result{ + RowsAffected: 123, + InsertID: 123456789, + } + + log := xlog.NewStdLog(xlog.Level(xlog.ERROR)) + th := NewTestHandler(log) + svr, err := MockMysqlServer(log, th) + assert.Nil(t, err) + defer svr.Close() + address := svr.Addr() + + // query + { + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + defer client.Close() + + th.AddQuery("SELECT2", result2) + rows, err := client.Query("SELECT2") + assert.Nil(t, err) + + assert.Equal(t, uint64(123), rows.RowsAffected()) + assert.Equal(t, uint64(123456789), rows.LastInsertID()) + } + + // query + { + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + defer client.Close() + + th.AddQuery("SELECT1", result1) + rows, err := client.Query("SELECT1") + assert.Nil(t, err) + assert.Equal(t, result1.Fields, rows.Fields()) + for rows.Next() { + _ = rows.Datas() + _, _ = rows.RowValues() + } + + want := 13 + got := int(rows.Bytes()) + assert.Equal(t, want, got) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/driver/server.go b/src/vendor/github.com/xelabs/go-mysqlstack/driver/server.go new file mode 100644 index 00000000..346aec6d --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/driver/server.go @@ -0,0 +1,231 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package driver + +import ( + "net" + "runtime" + "runtime/debug" + + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/sqldb" + "github.com/xelabs/go-mysqlstack/xlog" + + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +// Handler interface. +type Handler interface { + // NewSession is called when a session is coming. + NewSession(session *Session) + + // SessionClosed is called when a session exit. + SessionClosed(session *Session) + + // Check the session. + SessionCheck(session *Session) error + + // Check the Auth request. + AuthCheck(session *Session) error + + // Handle the cominitdb. + ComInitDB(session *Session, database string) error + + // Handle the queries. + ComQuery(session *Session, query string, callback func(*sqltypes.Result) error) error +} + +// Listener is a connection handler. +type Listener struct { + // Logger. + log *xlog.Log + + address string + + // Query handler. + handler Handler + + // This is the main listener socket. + listener net.Listener + + // Incrementing ID for connection id. + connectionID uint32 +} + +// NewListener creates a new Listener. +func NewListener(log *xlog.Log, address string, handler Handler) (*Listener, error) { + listener, err := net.Listen("tcp", address) + if err != nil { + return nil, err + } + + return &Listener{ + log: log, + address: address, + handler: handler, + listener: listener, + connectionID: 1, + }, nil +} + +// Accept runs an accept loop until the listener is closed. +func (l *Listener) Accept() { + runtime.GOMAXPROCS(runtime.NumCPU()) + for { + conn, err := l.listener.Accept() + if err != nil { + // Close() was probably called. + return + } + ID := l.connectionID + l.connectionID++ + go l.handle(conn, ID) + } +} + +func (l *Listener) parserComInitDB(data []byte) string { + return string(data[1:]) +} + +func (l *Listener) parserComQuery(data []byte) string { + // Trim the right. + data = data[1:] + last := len(data) - 1 + if data[last] == ';' { + data = data[:last] + } + return common.BytesToString(data) +} + +// handle is called in a go routine for each client connection. +func (l *Listener) handle(conn net.Conn, ID uint32) { + var err error + var data []byte + var authPkt []byte + var greetingPkt []byte + log := l.log + + // Catch panics, and close the connection in any case. + defer func() { + conn.Close() + if x := recover(); x != nil { + log.Error("server.handle.panic:\n%v\n%s", x, debug.Stack()) + } + }() + session := newSession(log, ID, conn) + // Session check. + if err = l.handler.SessionCheck(session); err != nil { + log.Warning("session[%v].check.failed.error:%+v", ID, err) + session.writeErrFromError(err) + return + } + + // Session register. + l.handler.NewSession(session) + defer l.handler.SessionClosed(session) + + // Greeting packet. + greetingPkt = session.greeting.Pack() + if err = session.packets.Write(greetingPkt); err != nil { + log.Error("server.write.greeting.packet.error: %v", err) + return + } + + // Auth packet. + if authPkt, err = session.packets.Next(); err != nil { + log.Error("server.read.auth.packet.error: %v", err) + return + } + if err = session.auth.UnPack(authPkt); err != nil { + log.Error("server.unpack.auth.error: %v", err) + return + } + + // Auth check. + if err = l.handler.AuthCheck(session); err != nil { + log.Warning("server.user[%+v].auth.check.failed", session.User()) + session.writeErrFromError(err) + return + } + + // Check the database. + db := session.auth.Database() + if db != "" { + if err = l.handler.ComInitDB(session, db); err != nil { + log.Error("server.cominitdb[%s].error:%+v", db, err) + session.writeErrFromError(err) + return + } + session.SetSchema(db) + } + + if err = session.packets.WriteOK(0, 0, session.greeting.Status(), 0); err != nil { + return + } + + for { + // Reset packet sequence ID. + session.packets.ResetSeq() + if data, err = session.packets.Next(); err != nil { + return + } + + switch data[0] { + case sqldb.COM_QUIT: + return + case sqldb.COM_INIT_DB: + db := l.parserComInitDB(data) + if err = l.handler.ComInitDB(session, db); err != nil { + if werr := session.writeErrFromError(err); werr != nil { + return + } + } else { + session.SetSchema(db) + if err = session.packets.WriteOK(0, 0, session.greeting.Status(), 0); err != nil { + return + } + } + case sqldb.COM_PING: + if err = session.packets.WriteOK(0, 0, session.greeting.Status(), 0); err != nil { + return + } + case sqldb.COM_QUERY: + query := l.parserComQuery(data) + if err = l.handler.ComQuery(session, query, func(qr *sqltypes.Result) error { + return session.writeResult(qr) + }); err != nil { + log.Error("server.handle.query.from.session[%v].error:%+v.query[%s]", ID, err, query) + if werr := session.writeErrFromError(err); werr != nil { + return + } + continue + } + default: + cmd := sqldb.CommandString(data[0]) + log.Error("session.command:%s.not.implemented", cmd) + sqlErr := sqldb.NewSQLError(sqldb.ER_UNKNOWN_ERROR, "command handling not implemented yet: %s", cmd) + if err := session.writeErrFromError(sqlErr); err != nil { + return + } + } + // Reset packet sequence ID. + session.packets.ResetSeq() + } +} + +// Addr returns the client address. +func (l *Listener) Addr() string { + return l.address +} + +// Close close the listener and all connections. +func (l *Listener) Close() { + l.listener.Close() +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/driver/server_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/driver/server_test.go new file mode 100644 index 00000000..da92e0e2 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/driver/server_test.go @@ -0,0 +1,245 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package driver + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/sqldb" + "github.com/xelabs/go-mysqlstack/xlog" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +func TestServer(t *testing.T) { + result1 := &sqltypes.Result{ + RowsAffected: 3, + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + { + Name: "extra", + Type: querypb.Type_NULL_TYPE, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("10")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("nice name")), + sqltypes.NULL, + }, + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("20")), + sqltypes.NULL, + sqltypes.NULL, + }, + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("30")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("")), + sqltypes.NULL, + }, + }, + } + result2 := &sqltypes.Result{} + + log := xlog.NewStdLog(xlog.Level(xlog.ERROR)) + th := NewTestHandler(log) + svr, err := MockMysqlServer(log, th) + assert.Nil(t, err) + defer svr.Close() + address := svr.Addr() + + // query + { + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + defer client.Close() + + th.AddQuery("SELECT1", result1) + _, err = client.Query("SELECT1") + assert.Nil(t, err) + } + + // query1 + { + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + + th.AddQuery("SELECT2", result2) + _, err = client.Query("SELECT2") + assert.Nil(t, err) + client.Close() + } + + // exec + { + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + defer client.Close() + + th.AddQuery("SELECT1", result1) + err = client.Exec("SELECT1") + assert.Nil(t, err) + } + + // fetch all + { + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + defer client.Close() + + th.AddQuery("SELECT1", result1) + r, err := client.FetchAll("SELECT1", -1) + assert.Nil(t, err) + want := result1.Copy() + got := r + assert.Equal(t, want.Rows, got.Rows) + } + + // fetch one + { + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + + th.AddQuery("SELECT1", result1) + r, err := client.FetchAll("SELECT1", 1) + assert.Nil(t, err) + defer client.Close() + + want := 1 + got := len(r.Rows) + assert.Equal(t, want, got) + } + + // error + { + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + defer client.Close() + + sqlErr := sqldb.NewSQLError(sqldb.ER_UNKNOWN_ERROR, "query.error") + th.AddQueryError("ERROR1", sqlErr) + err = client.Exec("ERROR1") + assert.NotNil(t, err) + want := "query.error (errno 1105) (sqlstate HY000)" + got := err.Error() + assert.Equal(t, want, got) + } + + // panic + { + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + defer client.Close() + + th.AddQueryPanic("PANIC") + client.Exec("PANIC") + + want := true + got := client.Closed() + assert.Equal(t, want, got) + } + + // ping + { + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + err = client.Ping() + assert.Nil(t, err) + } + + // init db + { + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + err = client.InitDB("test") + assert.Nil(t, err) + } + + // auth denied + { + _, err := NewConn("mockx", "mock", address, "test", "") + want := "Access denied for user 'mockx' (errno 1045) (sqlstate 28000)" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestServerSessionClose(t *testing.T) { + result2 := &sqltypes.Result{} + + log := xlog.NewStdLog(xlog.Level(xlog.ERROR)) + th := NewTestHandler(log) + svr, err := MockMysqlServer(log, th) + assert.Nil(t, err) + address := svr.Addr() + + { + // create session 1 + client1, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + + th.AddQuery("SELECT2", result2) + r, err := client1.FetchAll("SELECT2", -1) + assert.Nil(t, err) + assert.Equal(t, result2, r) + + // kill session 1 + client2, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + _, err = client2.Query("KILL 1") + assert.Nil(t, err) + } +} + +func TestServerComInitDB(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.INFO)) + th := NewTestHandler(log) + svr, err := MockMysqlServer(log, th) + assert.Nil(t, err) + defer svr.Close() + address := svr.Addr() + + // query + { + _, err := NewConn("mock", "mock", address, "xxtest", "") + want := "mock.cominit.db.error: unkonw database[xxtest] (errno 1105) (sqlstate HY000)" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestServerUnsupportedCommand(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.ERROR)) + th := NewTestHandler(log) + svr, err := MockMysqlServer(log, th) + assert.Nil(t, err) + defer svr.Close() + address := svr.Addr() + + // query + { + client, err := NewConn("mock", "mock", address, "", "") + assert.Nil(t, err) + defer client.Close() + err = client.Command(sqldb.COM_SLEEP) + want := "command handling not implemented yet: COM_SLEEP (errno 1105) (sqlstate HY000)" + got := err.Error() + assert.Equal(t, want, got) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/driver/session.go b/src/vendor/github.com/xelabs/go-mysqlstack/driver/session.go new file mode 100644 index 00000000..b0c59250 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/driver/session.go @@ -0,0 +1,211 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package driver + +import ( + "fmt" + "net" + "sync" + + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/packet" + "github.com/xelabs/go-mysqlstack/proto" + "github.com/xelabs/go-mysqlstack/sqldb" + "github.com/xelabs/go-mysqlstack/xlog" + + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +// Session is a client connection with greeting and auth. +type Session struct { + id uint32 + mu sync.RWMutex + log *xlog.Log + conn net.Conn + schema string + auth *proto.Auth + packets *packet.Packets + greeting *proto.Greeting +} + +func newSession(log *xlog.Log, ID uint32, conn net.Conn) *Session { + return &Session{ + id: ID, + log: log, + conn: conn, + auth: proto.NewAuth(), + greeting: proto.NewGreeting(ID), + packets: packet.NewPackets(conn), + } +} + +func (s *Session) writeErrFromError(err error) error { + if se, ok := err.(*sqldb.SQLError); ok { + return s.packets.WriteERR(se.Num, se.State, "%v", se.Message) + } + unknow := sqldb.NewSQLError(sqldb.ER_UNKNOWN_ERROR, "%v", err) + return s.packets.WriteERR(unknow.Num, unknow.State, unknow.Message) +} + +func (s *Session) writeFields(result *sqltypes.Result) error { + // 1. Write columns. + if err := s.packets.AppendColumns(result.Fields); err != nil { + return err + } + + if (s.auth.ClientFlags() & sqldb.CLIENT_DEPRECATE_EOF) == 0 { + if err := s.packets.AppendEOF(); err != nil { + return err + } + } + return nil +} + +func (s *Session) writeRows(result *sqltypes.Result) error { + // 2. Append rows. + for _, row := range result.Rows { + rowBuf := common.NewBuffer(16) + for _, val := range row { + if val.IsNull() { + rowBuf.WriteLenEncodeNUL() + } else { + rowBuf.WriteLenEncodeBytes(val.Raw()) + } + } + if err := s.packets.Append(rowBuf.Datas()); err != nil { + return err + } + } + return nil +} + +func (s *Session) writeFinish(result *sqltypes.Result) error { + // 3. Write EOF. + if (s.auth.ClientFlags() & sqldb.CLIENT_DEPRECATE_EOF) == 0 { + if err := s.packets.AppendEOF(); err != nil { + return err + } + } else { + if err := s.packets.AppendOKWithEOFHeader(result.RowsAffected, result.InsertID, s.greeting.Status(), result.Warnings); err != nil { + return err + } + } + return nil +} + +func (s *Session) flush() error { + // 4. Write to stream. + return s.packets.Flush() +} + +func (s *Session) writeResult(result *sqltypes.Result) error { + if len(result.Fields) == 0 { + if result.State == sqltypes.RStateNone { + // This is just an INSERT result, send an OK packet. + return s.packets.WriteOK(result.RowsAffected, result.InsertID, s.greeting.Status(), result.Warnings) + } + return fmt.Errorf("unexpected: result.without.no.fields.but.has.rows.result:%+v", result) + } + + switch result.State { + case sqltypes.RStateNone: + if err := s.writeFields(result); err != nil { + return err + } + if err := s.writeRows(result); err != nil { + return err + } + if err := s.writeFinish(result); err != nil { + return err + } + case sqltypes.RStateFields: + if err := s.writeFields(result); err != nil { + return err + } + case sqltypes.RStateRows: + if err := s.writeRows(result); err != nil { + return err + } + case sqltypes.RStateFinished: + if err := s.writeFinish(result); err != nil { + return err + } + } + return s.flush() +} + +// Close used to close the connection. +func (s *Session) Close() { + s.mu.RLock() + defer s.mu.RUnlock() + if s.conn != nil { + s.conn.Close() + s.conn = nil + } +} + +// ID returns the connection ID. +func (s *Session) ID() uint32 { + s.mu.RLock() + defer s.mu.RUnlock() + return s.id +} + +// Addr returns the remote address. +func (s *Session) Addr() string { + s.mu.RLock() + defer s.mu.RUnlock() + if s.conn != nil { + return s.conn.RemoteAddr().String() + } + return "unknow" +} + +// SetSchema used to set the schema. +func (s *Session) SetSchema(schema string) { + s.mu.Lock() + defer s.mu.Unlock() + s.schema = schema +} + +// Schema returns the schema. +func (s *Session) Schema() string { + s.mu.RLock() + defer s.mu.RUnlock() + return s.schema +} + +// User returns the user of auth. +func (s *Session) User() string { + s.mu.RLock() + defer s.mu.RUnlock() + return s.auth.User() +} + +// Salt returns the salt of greeting. +func (s *Session) Salt() []byte { + s.mu.RLock() + defer s.mu.RUnlock() + return s.greeting.Salt +} + +// Scramble returns the scramble of auth. +func (s *Session) Scramble() []byte { + s.mu.RLock() + defer s.mu.RUnlock() + return s.auth.AuthResponse() +} + +// Charset returns the charset of auth. +func (s *Session) Charset() uint8 { + s.mu.RLock() + defer s.mu.RUnlock() + return s.auth.Charset() +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/driver/session_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/driver/session_test.go new file mode 100644 index 00000000..1fe9a678 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/driver/session_test.go @@ -0,0 +1,62 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package driver + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestSession(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.DEBUG)) + th := NewTestHandler(log) + svr, err := MockMysqlServer(log, th) + assert.Nil(t, err) + address := svr.Addr() + + // create session 1 + client, err := NewConn("mock", "mock", address, "test", "") + assert.Nil(t, err) + defer client.Close() + + var sessions []*Session + for _, s := range th.ss { + sessions = append(sessions, s.session) + } + + { + session1 := sessions[0] + + // Session ID. + { + log.Debug("--id:%v", session1.ID()) + log.Debug("--addr:%v", session1.Addr()) + log.Debug("--salt:%v", session1.Salt()) + log.Debug("--scramble:%v", session1.Scramble()) + } + + // schema. + { + want := "xx" + session1.SetSchema(want) + got := session1.Schema() + assert.Equal(t, want, got) + } + + // charset. + { + want := uint8(0x21) + got := session1.Charset() + assert.Equal(t, want, got) + } + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/examples/client.go b/src/vendor/github.com/xelabs/go-mysqlstack/examples/client.go new file mode 100644 index 00000000..27ec8e44 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/examples/client.go @@ -0,0 +1,33 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package main + +import ( + "fmt" + + "github.com/xelabs/go-mysqlstack/driver" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func main() { + log := xlog.NewStdLog(xlog.Level(xlog.INFO)) + address := fmt.Sprintf(":4407") + client, err := driver.NewConn("mock", "mock", address, "", "") + if err != nil { + log.Panic("client.new.connection.error:%+v", err) + } + defer client.Close() + + qr, err := client.FetchAll("SELECT * FROM MOCK", -1) + if err != nil { + log.Panic("client.query.error:%+v", err) + } + log.Info("results:[%+v]", qr.Rows) +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/examples/mysqld.go b/src/vendor/github.com/xelabs/go-mysqlstack/examples/mysqld.go new file mode 100644 index 00000000..e8cb2012 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/examples/mysqld.go @@ -0,0 +1,58 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package main + +import ( + "os" + "os/signal" + "syscall" + + "github.com/xelabs/go-mysqlstack/driver" + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func main() { + result1 := &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "id", + Type: querypb.Type_INT32, + }, + { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(querypb.Type_INT32, []byte("10")), + sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("nice name")), + }, + }, + } + + log := xlog.NewStdLog(xlog.Level(xlog.INFO)) + th := driver.NewTestHandler(log) + th.AddQuery("SELECT * FROM MOCK", result1) + + mysqld, err := driver.MockMysqlServerWithPort(log, 4407, th) + if err != nil { + log.Panic("mysqld.start.error:%+v", err) + } + defer mysqld.Close() + log.Info("mysqld.server.start.address[%v]", mysqld.Addr()) + + // Handle SIGINT and SIGTERM. + ch := make(chan os.Signal) + signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) + <-ch +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/makefile b/src/vendor/github.com/xelabs/go-mysqlstack/makefile new file mode 100644 index 00000000..0daeaf3c --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/makefile @@ -0,0 +1,39 @@ +export PATH := $(GOPATH)/bin:$(PATH) + +fmt: + go fmt ./... + go vet ./... + +test: + go get github.com/stretchr/testify/assert + @echo "--> Testing..." + @$(MAKE) testxlog + @$(MAKE) testsqlparser + @$(MAKE) testcommon + @$(MAKE) testsqldb + @$(MAKE) testproto + @$(MAKE) testpacket + @$(MAKE) testdriver + +testxlog: + go test -v ./xlog +testsqlparser: + go test -v ./sqlparser/... +testcommon: + go test -v ./common +testsqldb: + go test -v ./sqldb +testproto: + go test -v ./proto +testpacket: + go test -v ./packet +testdriver: + go test -v ./driver + +COVPKGS = ./sqlparser ./common ./sqldb ./proto ./packet ./driver ./sqlparser/depends/sqltypes +coverage: + go get github.com/pierrre/gotestcover + gotestcover -coverprofile=coverage.out -v $(COVPKGS) + go tool cover -html=coverage.out + +.PHONY: fmt testcommon testproto testpacket testdriver coverage diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/packet/error.go b/src/vendor/github.com/xelabs/go-mysqlstack/packet/error.go new file mode 100644 index 00000000..a1a124bb --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/packet/error.go @@ -0,0 +1,21 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package packet + +import ( + "errors" +) + +var ( + // ErrBadConn used for the error of bad connection. + ErrBadConn = errors.New("connection.was.bad") + // ErrMalformPacket used for the bad packet. + ErrMalformPacket = errors.New("Malform.packet.error") +) diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/packet/mock.go b/src/vendor/github.com/xelabs/go-mysqlstack/packet/mock.go new file mode 100644 index 00000000..89dab5de --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/packet/mock.go @@ -0,0 +1,89 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved. + * GPL License + * + */ + +package packet + +import ( + "io" + "net" + "time" +) + +var _ net.Conn = &MockConn{} + +// MockConn used to mock a net.Conn for testing purposes. +type MockConn struct { + laddr net.Addr + raddr net.Addr + data []byte + closed bool + read int +} + +// NewMockConn creates new mock connection. +func NewMockConn() *MockConn { + return &MockConn{} +} + +// Read implements the net.Conn interface. +func (m *MockConn) Read(b []byte) (n int, err error) { + // handle the EOF + if len(m.data) == 0 { + err = io.EOF + return + } + + n = copy(b, m.data) + m.read += n + m.data = m.data[n:] + return +} + +// Write implements the net.Conn interface. +func (m *MockConn) Write(b []byte) (n int, err error) { + m.data = append(m.data, b...) + return len(b), nil +} + +// Datas implements the net.Conn interface. +func (m *MockConn) Datas() []byte { + return m.data +} + +// Close implements the net.Conn interface. +func (m *MockConn) Close() error { + m.closed = true + return nil +} + +// LocalAddr implements the net.Conn interface. +func (m *MockConn) LocalAddr() net.Addr { + return m.laddr +} + +// RemoteAddr implements the net.Conn interface. +func (m *MockConn) RemoteAddr() net.Addr { + return m.raddr +} + +// SetDeadline implements the net.Conn interface. +func (m *MockConn) SetDeadline(t time.Time) error { + return nil +} + +// SetReadDeadline implements the net.Conn interface. +func (m *MockConn) SetReadDeadline(t time.Time) error { + return nil +} + +// SetWriteDeadline implements the net.Conn interface. +func (m *MockConn) SetWriteDeadline(t time.Time) error { + return nil +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/packet/packets.go b/src/vendor/github.com/xelabs/go-mysqlstack/packet/packets.go new file mode 100644 index 00000000..4e3a8898 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/packet/packets.go @@ -0,0 +1,284 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package packet + +import ( + "fmt" + "net" + + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/proto" + "github.com/xelabs/go-mysqlstack/sqldb" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" +) + +const ( + // PACKET_MAX_SIZE used for the max packet size. + PACKET_MAX_SIZE = (1<<24 - 1) // (16MB - 1) +) + +// Packet presents the packet tuple. +type Packet struct { + SequenceID byte + Datas []byte +} + +// Packets presents the stream tuple. +type Packets struct { + seq uint8 + stream *Stream +} + +// NewPackets creates the new packets. +func NewPackets(c net.Conn) *Packets { + return &Packets{ + stream: NewStream(c, PACKET_MAX_SIZE), + } +} + +// Next used to read the next packet. +func (p *Packets) Next() ([]byte, error) { + pkt, err := p.stream.Read() + if err != nil { + return nil, err + } + + if pkt.SequenceID != p.seq { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "pkt.read.seq[%v]!=pkt.actual.seq[%v]", pkt.SequenceID, p.seq) + } + p.seq++ + return pkt.Datas, nil +} + +// Write writes the packet to the wire. +// It packed as: +// [header] +// [payload] +func (p *Packets) Write(payload []byte) error { + payLen := len(payload) + pkt := common.NewBuffer(64) + + // body length(24bits) + pkt.WriteU24(uint32(payLen)) + + // SequenceID + pkt.WriteU8(p.seq) + + // body + pkt.WriteBytes(payload) + if err := p.stream.Write(pkt.Datas()); err != nil { + return err + } + p.seq++ + return nil +} + +// WriteCommand writes a command packet to the wire. +func (p *Packets) WriteCommand(command byte, payload []byte) error { + // reset packet sequence + p.seq = 0 + pkt := common.NewBuffer(64) + + // body length(24bits): + // command length + payload length + payLen := len(payload) + pkt.WriteU24(uint32(1 + payLen)) + + // SequenceID + pkt.WriteU8(p.seq) + + // command + pkt.WriteU8(command) + + // body + pkt.WriteBytes(payload) + if err := p.stream.Write(pkt.Datas()); err != nil { + return err + } + p.seq++ + return nil +} + +// ResetSeq reset sequence to zero. +func (p *Packets) ResetSeq() { + p.seq = 0 +} + +// ParseOK used to parse the OK packet. +func (p *Packets) ParseOK(data []byte) (*proto.OK, error) { + return proto.UnPackOK(data) +} + +// WriteOK writes OK packet to the wire. +func (p *Packets) WriteOK(affectedRows, lastInsertID uint64, flags uint16, warnings uint16) error { + ok := &proto.OK{ + AffectedRows: affectedRows, + LastInsertID: lastInsertID, + StatusFlags: flags, + Warnings: warnings, + } + return p.Write(proto.PackOK(ok)) +} + +// ParseERR used to parse the ERR packet. +func (p *Packets) ParseERR(data []byte) error { + return proto.UnPackERR(data) +} + +// WriteERR writes ERR packet to the wire. +func (p *Packets) WriteERR(errorCode uint16, sqlState string, format string, args ...interface{}) error { + e := &proto.ERR{ + ErrorCode: errorCode, + SQLState: sqlState, + ErrorMessage: fmt.Sprintf(format, args...), + } + return p.Write(proto.PackERR(e)) +} + +// Append appends packets to buffer but not write to stream +// NOTICE: SequenceID++ +func (p *Packets) Append(rawdata []byte) error { + pkt := common.NewBuffer(64) + + // body length(24bits): + // payload length + pkt.WriteU24(uint32(len(rawdata))) + + // SequenceID + pkt.WriteU8(p.seq) + + // body + pkt.WriteBytes(rawdata) + if err := p.stream.Append(pkt.Datas()); err != nil { + return err + } + p.seq++ + return nil +} + +// ReadEOF used to read the EOF packet. +func (p *Packets) ReadEOF() error { + // EOF packet + data, err := p.Next() + if err != nil { + return err + } + switch data[0] { + case proto.EOF_PACKET: + return nil + case proto.ERR_PACKET: + return p.ParseERR(data) + default: + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "unexpected.eof.packet[%+v]", data) + } +} + +// AppendEOF appends EOF packet to the stream buffer. +func (p *Packets) AppendEOF() error { + return p.Append([]byte{proto.EOF_PACKET}) +} + +// AppendOKWithEOFHeader appends OK packet to the stream buffer with EOF header. +func (p *Packets) AppendOKWithEOFHeader(affectedRows, lastInsertID uint64, flags uint16, warnings uint16) error { + ok := &proto.OK{ + AffectedRows: affectedRows, + LastInsertID: lastInsertID, + StatusFlags: flags, + Warnings: warnings, + } + buf := common.NewBuffer(64) + buf.WriteU8(proto.EOF_PACKET) + buf.WriteBytes(proto.PackOK(ok)) + return p.Append(buf.Datas()) +} + +// AppendColumns used to append column to columns. +func (p *Packets) AppendColumns(columns []*querypb.Field) error { + // column count + count := len(columns) + buf := common.NewBuffer(64) + buf.WriteLenEncode(uint64(count)) + if err := p.Append(buf.Datas()); err != nil { + return err + } + + // columns info + for i := 0; i < count; i++ { + buf := common.NewBuffer(64) + buf.WriteBytes(proto.PackColumn(columns[i])) + if err := p.Append(buf.Datas()); err != nil { + return err + } + } + return nil +} + +// Flush writes all append-packets to the wire. +func (p *Packets) Flush() error { + return p.stream.Flush() +} + +// ReadComQueryResponse used to read query command response and parse the column count. +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset +// Returns: +// ok, colNumbs, myerr, err +// +// myerr is the error who was send by MySQL server, the client does not close the connection. +// if err is not nil, we(the client) will close the connection. +func (p *Packets) ReadComQueryResponse() (*proto.OK, int, error, error) { + var err error + var data []byte + var numbers uint64 + + if data, err = p.Next(); err != nil { + return nil, 0, nil, err + } + + ok := &proto.OK{} + switch data[0] { + case proto.OK_PACKET: + // OK. + if ok, err = p.ParseOK(data); err != nil { + return nil, 0, nil, err + } + return ok, 0, nil, nil + case proto.ERR_PACKET: + return nil, 0, p.ParseERR(data), nil + case 0xfb: + // Local infile + return nil, 0, sqldb.NewSQLError(sqldb.ER_UNKNOWN_ERROR, "Local.infile.not.implemented"), nil + } + // column count + if numbers, err = proto.ColumnCount(data); err != nil { + return nil, 0, nil, err + } + return ok, int(numbers), nil, nil +} + +// ReadColumns used to read all columns from the stream buffer. +func (p *Packets) ReadColumns(colNumber int) ([]*querypb.Field, error) { + var err error + var data []byte + + // column info + columns := make([]*querypb.Field, 0, colNumber) + for i := 0; i < colNumber; i++ { + if data, err = p.Next(); err != nil { + return nil, err + } + column, err := proto.UnpackColumn(data) + if err != nil { + return nil, err + } + columns = append(columns, column) + } + return columns, nil +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/packet/packets_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/packet/packets_test.go new file mode 100644 index 00000000..eff7d63c --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/packet/packets_test.go @@ -0,0 +1,353 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package packet + +import ( + "io" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/proto" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" +) + +func TestPacketsNext(t *testing.T) { + conn := NewMockConn() + defer conn.Close() + + packets := NewPackets(conn) + data := []byte{0x01, 0x02, 0x03} + + { + // header + buff := common.NewBuffer(64) + buff.WriteU24(3) + buff.WriteU8(0) + buff.WriteBytes(data) + + conn.Write(buff.Datas()) + body, err := packets.Next() + assert.Nil(t, err) + assert.Equal(t, body, data) + } + + { + // header + buff := common.NewBuffer(64) + buff.WriteU24(3) + buff.WriteU8(1) + buff.WriteBytes(data) + + conn.Write(buff.Datas()) + body, err := packets.Next() + assert.Nil(t, err) + assert.Equal(t, body, data) + } + + // seq error test + { + // header + buff := common.NewBuffer(64) + buff.WriteU24(3) + buff.WriteU8(1) + buff.WriteBytes(data) + + conn.Write(buff.Datas()) + _, err := packets.Next() + want := "pkt.read.seq[1]!=pkt.actual.seq[2] (errno 1835) (sqlstate HY000)" + got := err.Error() + assert.Equal(t, want, got) + } + + // reset seq + { + assert.Equal(t, packets.seq, uint8(2)) + packets.ResetSeq() + assert.Equal(t, packets.seq, uint8(0)) + } +} + +func TestPacketsNextFail(t *testing.T) { + conn := NewMockConn() + defer conn.Close() + + packets := NewPackets(conn) + data1 := []byte{0x00, 0x00, 0x00} + data2 := []byte{0x00, 0x00, 0x00, 0x00} + data3 := []byte{0x01, 0x10, 0x00, 0x00} + + { + conn.Write(data1) + _, err := packets.Next() + assert.NotNil(t, err) + } + + { + conn.Write(data2) + _, err := packets.Next() + assert.Nil(t, err) + } + + { + conn.Write(data3) + _, err := packets.Next() + assert.NotNil(t, err) + } +} + +func TestPacketsWrite(t *testing.T) { + conn := NewMockConn() + defer conn.Close() + + buff := common.NewBuffer(64) + packets := NewPackets(conn) + data := []byte{0x01, 0x02, 0x03} + + { + buff.WriteU24(3) + buff.WriteU8(0) + buff.WriteBytes(data) + want := buff.Datas() + + err := packets.Write(data) + assert.Nil(t, err) + got := conn.Datas() + assert.Equal(t, want, got) + } + + { + buff.WriteU24(3) + buff.WriteU8(1) + buff.WriteBytes(data) + want := buff.Datas() + + err := packets.Write(data) + assert.Nil(t, err) + got := conn.Datas() + assert.Equal(t, want, got) + } +} + +func TestPacketsWriteCommand(t *testing.T) { + conn := NewMockConn() + defer conn.Close() + + buff := common.NewBuffer(64) + packets := NewPackets(conn) + cmd := 0x03 + data := []byte{0x01, 0x02, 0x03} + + { + buff.WriteU24(3 + 1) + buff.WriteU8(0) + buff.WriteU8(uint8(cmd)) + buff.WriteBytes(data) + want := buff.Datas() + + err := packets.WriteCommand(byte(cmd), data) + assert.Nil(t, err) + got := conn.Datas() + assert.Equal(t, want, got) + } +} + +func TestPacketsColumns(t *testing.T) { + conn := NewMockConn() + defer conn.Close() + + wPackets := NewPackets(conn) + rPackets := NewPackets(conn) + columns := []*querypb.Field{ + &querypb.Field{ + Database: "test", + Table: "t1", + OrgTable: "t1", + Name: "a", + OrgName: "a", + Charset: 11, + ColumnLength: 11, + Type: sqltypes.Int32, + Flags: 11, + }, + &querypb.Field{ + Database: "test", + Table: "t1", + OrgTable: "t1", + Name: "b", + OrgName: "b", + Charset: 12, + ColumnLength: 12, + Type: sqltypes.Int8, + Flags: 12, + }, + } + + { + err := wPackets.AppendColumns(columns) + assert.Nil(t, err) + wPackets.Flush() + } + + { + _, nums, _, err := rPackets.ReadComQueryResponse() + assert.Nil(t, err) + got, err := rPackets.ReadColumns(nums) + assert.Nil(t, err) + assert.Equal(t, columns, got) + } +} + +func TestPacketsColumnsOK(t *testing.T) { + conn := NewMockConn() + defer conn.Close() + + wPackets := NewPackets(conn) + rPackets := NewPackets(conn) + { + buff := common.NewBuffer(32) + + // header + buff.WriteU8(0x00) + // affected_rows + buff.WriteLenEncode(uint64(3)) + // last_insert_id + buff.WriteLenEncode(uint64(40000000000)) + + // status_flags + buff.WriteU16(0x01) + // warnings + buff.WriteU16(0x02) + wPackets.Write(buff.Datas()) + } + + { + want := &proto.OK{} + want.AffectedRows = 3 + want.LastInsertID = 40000000000 + want.StatusFlags = 1 + want.Warnings = 2 + + got, nums, _, err := rPackets.ReadComQueryResponse() + assert.Nil(t, err) + assert.Equal(t, 0, nums) + assert.Equal(t, want, got) + } +} + +func TestPacketsColumnsERR(t *testing.T) { + conn := NewMockConn() + defer conn.Close() + + wPackets := NewPackets(conn) + rPackets := NewPackets(conn) + { + buff := common.NewBuffer(32) + + // header + buff.WriteU8(0xff) + // error_code + buff.WriteU16(0x01) + // sql_state_marker + buff.WriteString("a") + // sql_state + buff.WriteString("ABCDE") + buff.WriteString("ERROR") + wPackets.Write(buff.Datas()) + } + + { + want := "ERROR (errno 1) (sqlstate ABCDE)" + _, _, myerr, _ := rPackets.ReadComQueryResponse() + got := myerr.Error() + assert.Equal(t, want, got) + } +} + +func TestPacketsColumnsError(t *testing.T) { + conn := NewMockConn() + defer conn.Close() + + wPackets := NewPackets(conn) + rPackets := NewPackets(conn) + { + buff := common.NewBuffer(32) + + // random datas + buff.WriteU8(0xf0) + buff.WriteU16(0x11) + wPackets.Write(buff.Datas()) + } + + { + want := io.EOF + _, nums, _, err := rPackets.ReadComQueryResponse() + assert.Nil(t, err) + _, err = rPackets.ReadColumns(nums) + got := err + assert.Equal(t, want, got) + } +} + +func TestPacketsWriteOK(t *testing.T) { + conn := NewMockConn() + defer conn.Close() + + wPackets := NewPackets(conn) + err := wPackets.WriteOK(1, 1, 1, 1) + assert.Nil(t, err) + + conn.Datas() + conn.LocalAddr() + conn.RemoteAddr() + conn.SetDeadline(time.Now()) + conn.SetReadDeadline(time.Now()) + conn.SetWriteDeadline(time.Now()) + +} + +func TestPacketsWriteError(t *testing.T) { + conn := NewMockConn() + defer conn.Close() + + wPackets := NewPackets(conn) + err := wPackets.WriteERR(1, "YH000", "err:%v", "unknow") + assert.Nil(t, err) +} + +func TestPacketsEOF(t *testing.T) { + conn := NewMockConn() + defer conn.Close() + + wPackets := NewPackets(conn) + rPackets := NewPackets(conn) + // EOF + { + err := wPackets.AppendEOF() + assert.Nil(t, err) + wPackets.Flush() + + err = rPackets.ReadEOF() + assert.Nil(t, err) + } + + // OK with EOF header. + { + err := wPackets.AppendOKWithEOFHeader(1, 1, 1, 1) + assert.Nil(t, err) + wPackets.Flush() + + err = rPackets.ReadEOF() + assert.Nil(t, err) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/packet/stream.go b/src/vendor/github.com/xelabs/go-mysqlstack/packet/stream.go new file mode 100644 index 00000000..e87ef3c8 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/packet/stream.go @@ -0,0 +1,120 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package packet + +import ( + "bufio" + "io" + "net" +) + +const ( + // PACKET_BUFFER_SIZE is how much we buffer for reading. + PACKET_BUFFER_SIZE = 32 * 1024 +) + +// Stream represents the stream tuple. +type Stream struct { + pktMaxSize int + header []byte + reader *bufio.Reader + writer *bufio.Writer +} + +// NewStream creates a new stream. +func NewStream(conn net.Conn, pktMaxSize int) *Stream { + return &Stream{ + pktMaxSize: pktMaxSize, + header: []byte{0, 0, 0, 0}, + reader: bufio.NewReaderSize(conn, PACKET_BUFFER_SIZE), + writer: bufio.NewWriterSize(conn, PACKET_BUFFER_SIZE), + } +} + +// Read reads the next packet from the reader +// The returned pkt.Datas is only guaranteed to be valid until the next read +func (s *Stream) Read() (*Packet, error) { + // Header. + if _, err := io.ReadFull(s.reader, s.header); err != nil { + return nil, err + } + + // Length. + pkt := &Packet{} + pkt.SequenceID = s.header[3] + length := int(uint32(s.header[0]) | uint32(s.header[1])<<8 | uint32(s.header[2])<<16) + if length == 0 { + return pkt, nil + } + + // Datas. + data := make([]byte, length) + if _, err := io.ReadFull(s.reader, data); err != nil { + return nil, err + } + pkt.Datas = data + + // Single packet. + if length < s.pktMaxSize { + return pkt, nil + } + + // There is more than one packet, read them all. + next, err := s.Read() + if err != nil { + return nil, err + } + pkt.SequenceID = next.SequenceID + pkt.Datas = append(pkt.Datas, next.Datas...) + return pkt, nil +} + +// Write writes the packet to writer +func (s *Stream) Write(data []byte) error { + if err := s.Append(data); err != nil { + return err + } + return s.Flush() +} + +// Append used to append data to write buffer. +func (s *Stream) Append(data []byte) error { + payLen := len(data) - 4 + sequence := data[3] + + for { + var size int + if payLen < s.pktMaxSize { + size = payLen + } else { + size = s.pktMaxSize + } + data[0] = byte(size) + data[1] = byte(size >> 8) + data[2] = byte(size >> 16) + data[3] = sequence + + // append to buffer + s.writer.Write(data[:4+size]) + if size < s.pktMaxSize { + break + } + + payLen -= size + data = data[size:] + sequence++ + } + return nil +} + +// Flush used to flush the writer. +func (s *Stream) Flush() error { + return s.writer.Flush() +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/packet/stream_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/packet/stream_test.go new file mode 100644 index 00000000..6878c370 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/packet/stream_test.go @@ -0,0 +1,215 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package packet + +import ( + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/common" + "testing" +) + +// TEST EFFECTS: +// writes normal packet +// +// TEST PROCESSES: +// 1. write datas more than PACKET_BUFFER_SIZE +// 2. write checks +// 3. read checks +func TestStream(t *testing.T) { + rBuf := NewMockConn() + defer rBuf.Close() + + wBuf := NewMockConn() + defer wBuf.Close() + + rStream := NewStream(rBuf, PACKET_MAX_SIZE) + wStream := NewStream(wBuf, PACKET_MAX_SIZE) + + packet := common.NewBuffer(PACKET_BUFFER_SIZE) + payload := common.NewBuffer(PACKET_BUFFER_SIZE) + + for i := 0; i < 1234; i++ { + payload.WriteU8(byte(i)) + } + + packet.WriteU24(uint32(payload.Length())) + packet.WriteU8(1) + packet.WriteBytes(payload.Datas()) + + // write checks + { + err := wStream.Write(packet.Datas()) + assert.Nil(t, err) + + want := packet.Datas() + got := wBuf.Datas() + assert.Equal(t, want, got) + } + + // read checks + { + rBuf.Write(wBuf.Datas()) + ptk, err := rStream.Read() + assert.Nil(t, err) + + assert.Equal(t, byte(0x01), ptk.SequenceID) + assert.Equal(t, payload.Datas(), ptk.Datas) + } +} + +// TEST EFFECTS: +// write packet whoes payload length equals pktMaxSize +// +// TEST PROCESSES: +// 1. write payload whoes length equals pktMaxSize +// 2. read checks +// 3. write checks +func TestStreamWriteMax(t *testing.T) { + rBuf := NewMockConn() + defer rBuf.Close() + + wBuf := NewMockConn() + defer wBuf.Close() + + pktMaxSize := 64 + rStream := NewStream(rBuf, pktMaxSize) + wStream := NewStream(wBuf, pktMaxSize) + + packet := common.NewBuffer(PACKET_BUFFER_SIZE) + expect := common.NewBuffer(PACKET_BUFFER_SIZE) + payload := common.NewBuffer(PACKET_BUFFER_SIZE) + + { + for i := 0; i < (pktMaxSize+1)/4; i++ { + payload.WriteU32(uint32(i)) + } + } + packet.WriteU24(uint32(payload.Length())) + packet.WriteU8(1) + packet.WriteBytes(payload.Datas()) + + // write checks + { + err := wStream.Write(packet.Datas()) + assert.Nil(t, err) + + // check length + { + want := packet.Length() + 4 + got := len(wBuf.Datas()) + assert.Equal(t, want, got) + } + + // check chunks + { + // first chunk + expect.WriteU24(uint32(pktMaxSize)) + expect.WriteU8(1) + expect.WriteBytes(payload.Datas()[:pktMaxSize]) + + // second chunk + expect.WriteU24(0) + expect.WriteU8(2) + + want := expect.Datas() + got := wBuf.Datas() + assert.Equal(t, want, got) + } + } + + // read checks + { + rBuf.Write(wBuf.Datas()) + ptk, err := rStream.Read() + assert.Nil(t, err) + + assert.Equal(t, byte(0x02), ptk.SequenceID) + assert.Equal(t, payload.Datas(), ptk.Datas) + } +} + +// TEST EFFECTS: +// write packet whoes payload length more than pktMaxSizie +// +// TEST PROCESSES: +// 1. write payload whoes length (pktMaxSizie + 8) +// 2. read checks +// 3. write checks +func TestStreamWriteOverMax(t *testing.T) { + rBuf := NewMockConn() + defer rBuf.Close() + + wBuf := NewMockConn() + defer wBuf.Close() + + pktMaxSize := 63 + rStream := NewStream(rBuf, pktMaxSize) + wStream := NewStream(wBuf, pktMaxSize) + + packet := common.NewBuffer(PACKET_BUFFER_SIZE) + expect := common.NewBuffer(PACKET_BUFFER_SIZE) + payload := common.NewBuffer(PACKET_BUFFER_SIZE) + + { + for i := 0; i < pktMaxSize/4; i++ { + payload.WriteU32(uint32(i)) + } + } + // fill with 8bytes + payload.WriteU32(32) + payload.WriteU32(32) + + packet.WriteU24(uint32(payload.Length())) + packet.WriteU8(1) + packet.WriteBytes(payload.Datas()) + + // write checks + { + err := wStream.Write(packet.Datas()) + assert.Nil(t, err) + + // check length + { + want := packet.Length() + 4 + got := len(wBuf.Datas()) + assert.Equal(t, want, got) + } + + // check chunks + { + // first chunk + expect.WriteU24(uint32(pktMaxSize)) + expect.WriteU8(1) + expect.WriteBytes(payload.Datas()[:pktMaxSize]) + + // second chunk + left := (packet.Length() - 4) - pktMaxSize + expect.WriteU24(uint32(left)) + expect.WriteU8(2) + expect.WriteBytes(payload.Datas()[pktMaxSize:]) + + want := expect.Datas() + got := wBuf.Datas() + assert.Equal(t, want, got) + } + } + + // read checks + { + rBuf.Write(wBuf.Datas()) + ptk, err := rStream.Read() + assert.Nil(t, err) + + assert.Equal(t, byte(0x02), ptk.SequenceID) + assert.Equal(t, payload.Datas(), ptk.Datas) + _, err = rStream.Read() + assert.NotNil(t, err) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/proto/auth.go b/src/vendor/github.com/xelabs/go-mysqlstack/proto/auth.go new file mode 100644 index 00000000..46b137f0 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/proto/auth.go @@ -0,0 +1,203 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package proto + +import ( + "crypto/sha1" + "fmt" + + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/sqldb" +) + +// Auth packet. +type Auth struct { + charset uint8 + maxPacketSize uint32 + authResponseLen uint8 + clientFlags uint32 + authResponse []byte + pluginName string + database string + user string +} + +// NewAuth creates new Auth. +func NewAuth() *Auth { + return &Auth{} +} + +// Database returns the database. +func (a *Auth) Database() string { + return a.database +} + +// ClientFlags returns the client flags. +func (a *Auth) ClientFlags() uint32 { + return a.clientFlags +} + +// Charset returns the charset. +func (a *Auth) Charset() uint8 { + return a.charset +} + +// User returns the user. +func (a *Auth) User() string { + return a.user +} + +// AuthResponse returns the auth response. +func (a *Auth) AuthResponse() []byte { + return a.authResponse +} + +// CleanAuthResponse used to set the authResponse to nil. +// To improve the heap gc cost. +func (a *Auth) CleanAuthResponse() { + a.authResponse = nil +} + +// UnPack parses the handshake sent by the client. +// https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse41 +func (a *Auth) UnPack(payload []byte) error { + var err error + buf := common.ReadBuffer(payload) + + if a.clientFlags, err = buf.ReadU32(); err != nil { + return fmt.Errorf("auth.unpack: can't read client flags") + } + if a.clientFlags&sqldb.CLIENT_PROTOCOL_41 == 0 { + return fmt.Errorf("auth.unpack: only support protocol 4.1") + } + if a.maxPacketSize, err = buf.ReadU32(); err != nil { + return fmt.Errorf("auth.unpack: can't read maxPacketSize") + } + if a.charset, err = buf.ReadU8(); err != nil { + return fmt.Errorf("auth.unpack: can't read charset") + } + if err = buf.ReadZero(23); err != nil { + return fmt.Errorf("auth.unpack: can't read 23zeros") + } + if a.user, err = buf.ReadStringNUL(); err != nil { + return fmt.Errorf("auth.unpack: can't read user") + } + if (a.clientFlags & sqldb.CLIENT_SECURE_CONNECTION) > 0 { + if a.authResponseLen, err = buf.ReadU8(); err != nil { + return fmt.Errorf("auth.unpack: can't read authResponse length") + } + if a.authResponse, err = buf.ReadBytes(int(a.authResponseLen)); err != nil { + return fmt.Errorf("auth.unpack: can't read authResponse") + } + } else { + if a.authResponse, err = buf.ReadBytesNUL(); err != nil { + return fmt.Errorf("auth.unpack: can't read authResponse") + } + } + if (a.clientFlags & sqldb.CLIENT_CONNECT_WITH_DB) > 0 { + if a.database, err = buf.ReadStringNUL(); err != nil { + return fmt.Errorf("auth.unpack: can't read dbname") + } + } + if (a.clientFlags & sqldb.CLIENT_PLUGIN_AUTH) > 0 { + if a.pluginName, err = buf.ReadStringNUL(); err != nil { + return fmt.Errorf("auth.unpack: can't read pluginName") + } + } + if a.pluginName != DefaultAuthPluginName { + return fmt.Errorf("invalid authPluginName, got %v but only support %v", a.pluginName, DefaultAuthPluginName) + } + return nil +} + +// Pack used to pack a HandshakeResponse41 packet. +func (a *Auth) Pack(capabilityFlags uint32, charset uint8, username string, password string, salt []byte, database string) []byte { + buf := common.NewBuffer(256) + authResponse := nativePassword(password, salt) + if len(database) > 0 { + capabilityFlags |= sqldb.CLIENT_CONNECT_WITH_DB + } else { + capabilityFlags &= ^sqldb.CLIENT_CONNECT_WITH_DB + } + + // 4 capability flags, CLIENT_PROTOCOL_41 always set + buf.WriteU32(capabilityFlags) + + // 4 max-packet size (none) + buf.WriteU32(0) + + // 1 character set + buf.WriteU8(charset) + + // string[23] reserved (all [0]) + buf.WriteZero(23) + + // string[NUL] username + buf.WriteString(username) + buf.WriteZero(1) + + if (capabilityFlags & sqldb.CLIENT_SECURE_CONNECTION) > 0 { + // 1 length of auth-response + // string[n] auth-response + buf.WriteU8(uint8(len(authResponse))) + buf.WriteBytes(authResponse) + } else { + buf.WriteBytes(authResponse) + buf.WriteZero(1) + } + capabilityFlags &= ^sqldb.CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA + + // string[NUL] database + if capabilityFlags&sqldb.CLIENT_CONNECT_WITH_DB > 0 { + buf.WriteString(database) + buf.WriteZero(1) + } + + // string[NUL] auth plugin name + buf.WriteString(DefaultAuthPluginName) + buf.WriteZero(1) + + // CLIENT_CONNECT_ATTRS none + // + return buf.Datas() +} + +// https://dev.mysql.com/doc/internals/en/secure-password-authentication.html#packet-Authentication::Native41 +// SHA1( password ) XOR SHA1( "20-bytes random data from server" SHA1( SHA1( password ) ) ) +// Encrypt password using 4.1+ method +func nativePassword(password string, salt []byte) []byte { + if len(password) == 0 { + return nil + } + + // stage1Hash = SHA1(password) + crypt := sha1.New() + crypt.Write([]byte(password)) + stage1 := crypt.Sum(nil) + + // scrambleHash = SHA1(scramble + SHA1(stage1Hash)) + // inner Hash + crypt.Reset() + crypt.Write(stage1) + stage1SHA1 := crypt.Sum(nil) + + // stage2Hash = SHA1(salt SHA1(SHA1(password))) + crypt.Reset() + crypt.Write(salt) + crypt.Write(stage1SHA1) + stage2 := crypt.Sum(nil) + + // srambleHash = stage1Hash ^ stage2Hash + scramble := make([]byte, len(stage2)) + for i := range stage2 { + scramble[i] = stage1[i] ^ stage2[i] + } + return scramble +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/proto/auth_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/proto/auth_test.go new file mode 100644 index 00000000..b8d4230e --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/proto/auth_test.go @@ -0,0 +1,263 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package proto + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/sqldb" +) + +func TestAuth(t *testing.T) { + auth := NewAuth() + { + data := []byte{ + 0x8d, 0xa6, 0xff, 0x01, 0x00, 0x00, 0x00, 0x01, + 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x72, 0x6f, 0x6f, 0x74, 0x00, 0x14, 0x0e, 0xb4, + 0xdd, 0xb5, 0x5b, 0x64, 0xf8, 0x54, 0x40, 0xfd, + 0xf3, 0x45, 0xfa, 0x37, 0x12, 0x20, 0x20, 0xda, + 0x38, 0xaa, 0x61, 0x62, 0x63, 0x00, 0x6d, 0x79, + 0x73, 0x71, 0x6c, 0x5f, 0x6e, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x77, + 0x6f, 0x72, 0x64, 0x00} + + auth.UnPack(data) + want := &Auth{ + charset: 33, + maxPacketSize: 16777216, + authResponseLen: 20, + authResponse: []byte{ + 0x0e, 0xb4, 0xdd, 0xb5, 0x5b, 0x64, 0xf8, 0x54, + 0x40, 0xfd, 0xf3, 0x45, 0xfa, 0x37, 0x12, 0x20, + 0x20, 0xda, 0x38, 0xaa}, + pluginName: "mysql_native_password", + database: "abc", + user: "root", + clientFlags: 33531533, + } + got := auth + assert.Equal(t, want, got) + } + + { + want := "abc" + got := auth.Database() + assert.Equal(t, want, got) + } + + { + want := uint32(33531533) + got := auth.ClientFlags() + assert.Equal(t, want, got) + } + + { + want := uint8(33) + got := auth.Charset() + assert.Equal(t, want, got) + } + + // User. + { + want := "root" + got := auth.User() + assert.Equal(t, want, got) + } + + // Resp. + { + want := []byte{ + 0x0e, 0xb4, 0xdd, 0xb5, 0x5b, 0x64, 0xf8, 0x54, + 0x40, 0xfd, 0xf3, 0x45, 0xfa, 0x37, 0x12, 0x20, + 0x20, 0xda, 0x38, 0xaa} + got := auth.AuthResponse() + assert.Equal(t, want, got) + + auth.CleanAuthResponse() + assert.Nil(t, auth.AuthResponse()) + } +} + +func TestAuthUnpackError(t *testing.T) { + auth := NewAuth() + { + data := []byte{ + 0x8d, 0xa6, 0xff, + } + err := auth.UnPack(data) + want := "auth.unpack: can't read client flags" + got := err.Error() + assert.Equal(t, want, got) + } +} + +func TestAuthUnPack(t *testing.T) { + want := NewAuth() + want.charset = 0x02 + want.authResponseLen = 20 + want.clientFlags = DefaultClientCapability + want.clientFlags |= sqldb.CLIENT_CONNECT_WITH_DB + want.authResponse = nativePassword("sbtest", DefaultSalt) + want.database = "sbtest" + want.user = "sbtest" + want.pluginName = DefaultAuthPluginName + + got := NewAuth() + err := got.UnPack(want.Pack( + DefaultClientCapability, + 0x02, + "sbtest", + "sbtest", + DefaultSalt, + "sbtest", + )) + assert.Nil(t, err) + assert.Equal(t, want, got) +} + +func TestAuthWithoutPWD(t *testing.T) { + want := NewAuth() + want.charset = 0x02 + want.authResponseLen = 0 + want.clientFlags = DefaultClientCapability + want.clientFlags |= sqldb.CLIENT_CONNECT_WITH_DB + want.authResponse = nativePassword("", DefaultSalt) + want.database = "sbtest" + want.user = "sbtest" + want.pluginName = DefaultAuthPluginName + + got := NewAuth() + err := got.UnPack(want.Pack( + DefaultClientCapability, + 0x02, + "sbtest", + "", + DefaultSalt, + "sbtest", + )) + assert.Nil(t, err) + assert.Equal(t, want, got) +} + +func TestAuthWithoutDB(t *testing.T) { + want := NewAuth() + want.charset = 0x02 + want.authResponseLen = 20 + want.clientFlags = DefaultClientCapability + want.authResponse = nativePassword("sbtest", DefaultSalt) + want.user = "sbtest" + want.pluginName = DefaultAuthPluginName + + got := NewAuth() + err := got.UnPack(want.Pack( + DefaultClientCapability, + 0x02, + "sbtest", + "sbtest", + DefaultSalt, + "", + )) + assert.Nil(t, err) + assert.Equal(t, want, got) +} + +func TestAuthWithoutSecure(t *testing.T) { + want := NewAuth() + want.charset = 0x02 + want.authResponseLen = 20 + want.clientFlags = DefaultClientCapability &^ sqldb.CLIENT_SECURE_CONNECTION &^ sqldb.CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA + want.clientFlags |= sqldb.CLIENT_CONNECT_WITH_DB + want.authResponse = nativePassword("sbtest", DefaultSalt) + want.user = "sbtest" + want.database = "sbtest" + want.pluginName = DefaultAuthPluginName + + got := NewAuth() + err := got.UnPack(want.Pack( + DefaultClientCapability&^sqldb.CLIENT_SECURE_CONNECTION, + 0x02, + "sbtest", + "sbtest", + DefaultSalt, + "sbtest", + )) + got.authResponseLen = 20 + assert.Nil(t, err) + assert.Equal(t, want, got) +} + +func TestAuthUnPackError(t *testing.T) { + capabilityFlags := DefaultClientCapability + capabilityFlags |= sqldb.CLIENT_PROTOCOL_41 + capabilityFlags |= sqldb.CLIENT_CONNECT_WITH_DB + + // NULL + f0 := func(buff *common.Buffer) { + } + + // Write clientFlags. + f1 := func(buff *common.Buffer) { + buff.WriteU32(capabilityFlags) + } + + // Write maxPacketSize. + f2 := func(buff *common.Buffer) { + buff.WriteU32(uint32(16777216)) + } + + // Write charset. + f3 := func(buff *common.Buffer) { + buff.WriteU8(0x01) + } + + // Write 23 NULLs. + f4 := func(buff *common.Buffer) { + buff.WriteZero(23) + } + + // Write username. + f5 := func(buff *common.Buffer) { + buff.WriteString("mock") + buff.WriteZero(1) + } + + // Write auth-response. + f6 := func(buff *common.Buffer) { + authRsp := make([]byte, 8) + buff.WriteU8(8) + buff.WriteBytes(authRsp) + } + + // Write database. + f7 := func(buff *common.Buffer) { + buff.WriteString("db1") + buff.WriteZero(1) + } + + buff := common.NewBuffer(32) + fs := []func(buff *common.Buffer){f0, f1, f2, f3, f4, f5, f6, f7} + for i := 0; i < len(fs); i++ { + auth := NewAuth() + err := auth.UnPack(buff.Datas()) + assert.NotNil(t, err) + fs[i](buff) + } + + { + auth := NewAuth() + err := auth.UnPack(buff.Datas()) + assert.NotNil(t, err) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/proto/column.go b/src/vendor/github.com/xelabs/go-mysqlstack/proto/column.go new file mode 100644 index 00000000..4c3c387e --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/proto/column.go @@ -0,0 +1,160 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package proto + +import ( + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/sqldb" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +// ColumnCount returns the column count. +func ColumnCount(payload []byte) (count uint64, err error) { + buff := common.ReadBuffer(payload) + if count, err = buff.ReadLenEncode(); err != nil { + return 0, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting column count failed") + } + return +} + +// UnpackColumn used to unpack the column packet. +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41 +func UnpackColumn(payload []byte) (*querypb.Field, error) { + var err error + field := &querypb.Field{} + buff := common.ReadBuffer(payload) + // Catalog is ignored, always set to "def" + if _, err = buff.ReadLenEncodeString(); err != nil { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "skipping col catalog failed") + } + + // lenenc_str Schema + if field.Database, err = buff.ReadLenEncodeString(); err != nil { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting col schema failed") + } + + // lenenc_str Table + if field.Table, err = buff.ReadLenEncodeString(); err != nil { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting col table failed") + } + + // lenenc_str Org_Table + if field.OrgTable, err = buff.ReadLenEncodeString(); err != nil { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting col org_table failed") + } + + // lenenc_str Name + if field.Name, err = buff.ReadLenEncodeString(); err != nil { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting col name failed") + } + + // lenenc_str Org_Name + if field.OrgName, err = buff.ReadLenEncodeString(); err != nil { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting col org_name failed") + } + + // lenenc_int length of fixed-length fields [0c], skip + if _, err = buff.ReadLenEncode(); err != nil { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting col 0c failed") + } + + // 2 character set + charset, err := buff.ReadU16() + if err != nil { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting col charset failed") + } + field.Charset = uint32(charset) + + // 4 column length + if field.ColumnLength, err = buff.ReadU32(); err != nil { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting col columnlength failed") + } + + // 1 type + t, err := buff.ReadU8() + if err != nil { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting col type failed") + } + + // 2 flags + flags, err := buff.ReadU16() + if err != nil { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting col flags failed") + } + field.Flags = uint32(flags) + + // Convert MySQL type + if field.Type, err = sqltypes.MySQLToType(int64(t), int64(field.Flags)); err != nil { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "MySQLToType(%v,%v) failed: %v", t, field.Flags, err) + } + + // 1 Decimals + decimals, err := buff.ReadU8() + if err != nil { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting col type failed") + } + field.Decimals = uint32(decimals) + + // 2 Filler and Default Values is ignored + // + return field, nil +} + +// PackColumn used to pack the column packet. +func PackColumn(field *querypb.Field) []byte { + typ, flags := sqltypes.TypeToMySQL(field.Type) + if field.Flags != 0 { + flags = int64(field.Flags) + } + + buf := common.NewBuffer(256) + + // lenenc_str Catalog, always 'def' + buf.WriteLenEncodeString("def") + + // lenenc_str Schema + buf.WriteLenEncodeString(field.Database) + + // lenenc_str Table + buf.WriteLenEncodeString(field.Table) + + // lenenc_str Org_Table + buf.WriteLenEncodeString(field.OrgTable) + + // lenenc_str Name + buf.WriteLenEncodeString(field.Name) + + // lenenc_str Org_Name + buf.WriteLenEncodeString(field.OrgName) + + // lenenc_int length of fixed-length fields [0c] + buf.WriteLenEncode(uint64(0x0c)) + + // 2 character set + buf.WriteU16(uint16(field.Charset)) + + // 4 column length + buf.WriteU32(field.ColumnLength) + + // 1 type + buf.WriteU8(byte(typ)) + + // 2 flags + buf.WriteU16(uint16(flags)) + + //1 Decimals + buf.WriteU8(uint8(field.Decimals)) + + // 2 filler [00] [00] + buf.WriteU16(uint16(0)) + return buf.Datas() +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/proto/column_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/proto/column_test.go new file mode 100644 index 00000000..7a27cf23 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/proto/column_test.go @@ -0,0 +1,124 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package proto + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" +) + +func TestColumnCount(t *testing.T) { + payload := []byte{ + 0x02, + } + + want := uint64(2) + got, err := ColumnCount(payload) + assert.Nil(t, err) + assert.Equal(t, want, got) +} + +func TestColumn(t *testing.T) { + want := &querypb.Field{ + Database: "test", + Table: "t1", + OrgTable: "t1", + Name: "a", + OrgName: "a", + Charset: 11, + ColumnLength: 11, + Type: sqltypes.Int32, + Flags: 11, + } + + datas := PackColumn(want) + got, err := UnpackColumn(datas) + assert.Nil(t, err) + assert.Equal(t, want, got) +} + +func TestColumnUnPackError(t *testing.T) { + // NULL + f0 := func(buff *common.Buffer) { + } + + // Write catalog. + f1 := func(buff *common.Buffer) { + buff.WriteLenEncodeString("def") + } + + // Write schema. + f2 := func(buff *common.Buffer) { + buff.WriteLenEncodeString("sbtest") + } + + // Write table. + f3 := func(buff *common.Buffer) { + buff.WriteLenEncodeString("table1") + } + + // Write org table. + f4 := func(buff *common.Buffer) { + buff.WriteLenEncodeString("orgtable1") + } + + // Write Name. + f5 := func(buff *common.Buffer) { + buff.WriteLenEncodeString("name") + } + + // Write Org Name. + f6 := func(buff *common.Buffer) { + buff.WriteLenEncodeString("name") + } + + // Write length. + f7 := func(buff *common.Buffer) { + buff.WriteLenEncode(0x0c) + } + + // Write Charset. + f8 := func(buff *common.Buffer) { + buff.WriteU16(uint16(1)) + } + + // Write Column length. + f9 := func(buff *common.Buffer) { + buff.WriteU32(uint32(1)) + } + + // Write type. + f10 := func(buff *common.Buffer) { + buff.WriteU8(0x01) + } + + // Write flags + f11 := func(buff *common.Buffer) { + buff.WriteU16(uint16(1)) + } + + buff := common.NewBuffer(32) + fs := []func(buff *common.Buffer){f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11} + for i := 0; i < len(fs); i++ { + _, err := UnpackColumn(buff.Datas()) + assert.NotNil(t, err) + fs[i](buff) + } + + { + _, err := UnpackColumn(buff.Datas()) + assert.NotNil(t, err) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/proto/const.go b/src/vendor/github.com/xelabs/go-mysqlstack/proto/const.go new file mode 100644 index 00000000..400475bc --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/proto/const.go @@ -0,0 +1,47 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package proto + +import ( + "github.com/xelabs/go-mysqlstack/sqldb" +) + +const ( + // DefaultAuthPluginName is the default plugin name. + DefaultAuthPluginName = "mysql_native_password" + + // DefaultServerCapability is the default server capability. + DefaultServerCapability = sqldb.CLIENT_LONG_PASSWORD | + sqldb.CLIENT_LONG_FLAG | + sqldb.CLIENT_CONNECT_WITH_DB | + sqldb.CLIENT_PROTOCOL_41 | + sqldb.CLIENT_TRANSACTIONS | + sqldb.CLIENT_MULTI_STATEMENTS | + sqldb.CLIENT_PLUGIN_AUTH | + sqldb.CLIENT_DEPRECATE_EOF | + sqldb.CLIENT_SECURE_CONNECTION + + // DefaultClientCapability is the default client capability. + DefaultClientCapability = sqldb.CLIENT_LONG_PASSWORD | + sqldb.CLIENT_LONG_FLAG | + sqldb.CLIENT_PROTOCOL_41 | + sqldb.CLIENT_TRANSACTIONS | + sqldb.CLIENT_MULTI_STATEMENTS | + sqldb.CLIENT_PLUGIN_AUTH | + sqldb.CLIENT_DEPRECATE_EOF | + sqldb.CLIENT_SECURE_CONNECTION +) + +var ( + // DefaultSalt is the default salt bytes. + DefaultSalt = []byte{ + 0x77, 0x63, 0x6a, 0x6d, 0x61, 0x22, 0x23, 0x27, // first part + 0x38, 0x26, 0x55, 0x58, 0x3b, 0x5d, 0x44, 0x78, 0x53, 0x73, 0x6b, 0x41} +) diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/proto/eof.go b/src/vendor/github.com/xelabs/go-mysqlstack/proto/eof.go new file mode 100644 index 00000000..5c963f83 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/proto/eof.go @@ -0,0 +1,15 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package proto + +const ( + // EOF_PACKET is the EOF packet. + EOF_PACKET byte = 0xfe +) diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/proto/err.go b/src/vendor/github.com/xelabs/go-mysqlstack/proto/err.go new file mode 100644 index 00000000..ab8fd15d --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/proto/err.go @@ -0,0 +1,84 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package proto + +import ( + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/sqldb" +) + +const ( + // ERR_PACKET is the error packet byte. + ERR_PACKET byte = 0xff +) + +// ERR is the error packet. +type ERR struct { + Header byte // always 0xff + ErrorCode uint16 + SQLState string + ErrorMessage string +} + +// UnPackERR parses the error packet and returns a sqldb.SQLError. +// https://dev.mysql.com/doc/internals/en/packet-ERR_Packet.html +func UnPackERR(data []byte) error { + var err error + e := &ERR{} + buf := common.ReadBuffer(data) + if e.Header, err = buf.ReadU8(); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "invalid error packet header: %v", data) + } + if e.Header != ERR_PACKET { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "invalid error packet header: %v", e.Header) + } + if e.ErrorCode, err = buf.ReadU16(); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "invalid error packet code: %v", data) + } + + // Skip SQLStateMarker + if _, err = buf.ReadString(1); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "invalid error packet marker: %v", data) + } + if e.SQLState, err = buf.ReadString(5); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "invalid error packet sqlstate: %v", data) + } + msgLen := len(data) - buf.Seek() + if e.ErrorMessage, err = buf.ReadString(msgLen); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "invalid error packet message: %v", data) + } + return sqldb.NewSQLError1(e.ErrorCode, e.SQLState, "%s", e.ErrorMessage) +} + +// PackERR used to pack the error packet. +func PackERR(e *ERR) []byte { + buf := common.NewBuffer(64) + + buf.WriteU8(ERR_PACKET) + + // error code + buf.WriteU16(e.ErrorCode) + + // sql-state marker # + buf.WriteU8('#') + + // sql-state (?) 5 ascii bytes + if e.SQLState == "" { + e.SQLState = "HY000" + } + if len(e.SQLState) != 5 { + panic("sqlState has to be 5 characters long") + } + buf.WriteString(e.SQLState) + + // error msg + buf.WriteString(e.ErrorMessage) + return buf.Datas() +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/proto/err_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/proto/err_test.go new file mode 100644 index 00000000..80285dfa --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/proto/err_test.go @@ -0,0 +1,99 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package proto + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/sqldb" +) + +func TestERR(t *testing.T) { + { + buff := common.NewBuffer(32) + + // header + buff.WriteU8(0xff) + // error_code + buff.WriteU16(0x01) + // sql_state_marker + buff.WriteString("#") + // sql_state + buff.WriteString("ABCDE") + buff.WriteString("ERROR") + + e := &ERR{} + e.Header = 0xff + e.ErrorCode = 0x1 + e.SQLState = "ABCDE" + e.ErrorMessage = "ERROR" + want := sqldb.NewSQLError1(e.ErrorCode, e.SQLState, "%s", e.ErrorMessage) + got := UnPackERR(buff.Datas()) + assert.Equal(t, want, got) + } + + { + e := &ERR{} + e.Header = 0xff + e.ErrorCode = 0x1 + e.ErrorMessage = "ERROR" + datas := PackERR(e) + want := sqldb.NewSQLError1(e.ErrorCode, e.SQLState, "%s", e.ErrorMessage) + got := UnPackERR(datas) + assert.Equal(t, want, got) + } +} + +func TestERRUnPackError(t *testing.T) { + // header error + { + buff := common.NewBuffer(32) + + // header + buff.WriteU8(0x01) + + err := UnPackERR(buff.Datas()) + assert.NotNil(t, err) + } + + // NULL + f0 := func(buff *common.Buffer) { + } + + // Write error header. + f1 := func(buff *common.Buffer) { + buff.WriteU8(0xff) + } + + // Write error code. + f2 := func(buff *common.Buffer) { + buff.WriteU16(0x01) + } + + // Write SQLStateMarker. + f3 := func(buff *common.Buffer) { + buff.WriteU8('#') + } + + // Write SQLState. + f4 := func(buff *common.Buffer) { + buff.WriteString("xxxxx") + } + + buff := common.NewBuffer(32) + fs := []func(buff *common.Buffer){f0, f1, f2, f3, f4} + for i := 0; i < len(fs); i++ { + err := UnPackERR(buff.Datas()) + assert.NotNil(t, err) + fs[i](buff) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/proto/greeting.go b/src/vendor/github.com/xelabs/go-mysqlstack/proto/greeting.go new file mode 100644 index 00000000..7cec2c21 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/proto/greeting.go @@ -0,0 +1,221 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package proto + +import ( + "math/rand" + "time" + + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/sqldb" +) + +// Greeting used for greeting packet. +type Greeting struct { + protocolVersion uint8 + Charset uint8 + + // StatusFlags are the status flags we will base our returned flags on. + // It is only used by the server. + // SERVER_STATUS_AUTOCOMMIT is default. + status uint16 + + // Capabilities is the current set of features this connection + // is using. It is the features that are both supported by + // the client and the server, and currently in use. + // It is set after the initial handshake. + Capability uint32 + ConnectionID uint32 + serverVersion string + authPluginName string + Salt []byte +} + +// NewGreeting creates a new Greeting. +func NewGreeting(connectionID uint32) *Greeting { + greeting := &Greeting{ + protocolVersion: 10, + serverVersion: "5.7-Radon-1.0", + ConnectionID: connectionID, + Capability: DefaultServerCapability, + Charset: sqldb.CharacterSetUtf8, + status: sqldb.SERVER_STATUS_AUTOCOMMIT, + Salt: make([]byte, 20), + } + + // Generate the rand salts, range [1, 123]. + for i := 0; i < len(greeting.Salt); i++ { + greeting.Salt[i] = byteRand(1, 123) + } + return greeting +} + +func byteRand(min int, max int) byte { + rand.Seed(time.Now().UTC().UnixNano()) + return byte(min + rand.Intn(max-min)) +} + +// Status returns status of greeting. +func (g *Greeting) Status() uint16 { + return g.status +} + +// Pack used to pack the greeting packet. +// https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeV10 +func (g *Greeting) Pack() []byte { + // greeting buffer + buf := common.NewBuffer(256) + capLower := uint16(g.Capability) + capUpper := uint16(uint32(g.Capability) >> 16) + + // 1: [0a] protocol version + buf.WriteU8(g.protocolVersion) + + // string[NUL]: server version + buf.WriteString(g.serverVersion) + buf.WriteZero(1) + + // 4: connection id + buf.WriteU32(g.ConnectionID) + + // string[8]: auth-plugin-data-part-1 + buf.WriteBytes(g.Salt[:8]) + + // 1: [00] filler + buf.WriteZero(1) + + // 2: capability flags (lower 2 bytes) + buf.WriteU16(capLower) + + // 1: character set + buf.WriteU8(sqldb.CharacterSetUtf8) + + // 2: status flags + buf.WriteU16(g.status) + + // 2: capability flags (upper 2 bytes) + buf.WriteU16(capUpper) + + // Length of auth plugin data. + // Always 21 (8 + 13). + buf.WriteU8(21) + + // string[10]: reserved (all [00]) + buf.WriteZero(10) + + // string[$len]: auth-plugin-data-part-2 ($len=MAX(13, length of auth-plugin-data - 8)) + buf.WriteBytes(g.Salt[8:]) + buf.WriteZero(1) + + // string[NUL] auth-plugin name + pluginName := "mysql_native_password" + buf.WriteString(pluginName) + buf.WriteZero(1) + return buf.Datas() +} + +// UnPack used to unpack the greeting packet. +func (g *Greeting) UnPack(payload []byte) error { + var err error + buf := common.ReadBuffer(payload) + + // 1: [0a] protocol version + if g.protocolVersion, err = buf.ReadU8(); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting greeting protocol-version failed") + } + + // string[NUL]: server version + if g.serverVersion, err = buf.ReadStringNUL(); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting greeting server-version failed") + } + + // 4: connection id + if g.ConnectionID, err = buf.ReadU32(); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting greeting onnection-id failed") + } + + // string[8]: auth-plugin-data-part-1 + var salt8 []byte + if salt8, err = buf.ReadBytes(8); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting greeting auth-plugin-data-part-1 failed") + } + copy(g.Salt, salt8) + + // 1: [00] filler + if err = buf.ReadZero(1); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting greeting filler failed") + } + + // 2: capability flags (lower 2 bytes) + var capLower uint16 + if capLower, err = buf.ReadU16(); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting greeting capability-flags failed") + } + + // 1: character set + if g.Charset, err = buf.ReadU8(); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting greeting charset failed") + } + + // 2: status flags + if g.status, err = buf.ReadU16(); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting greeting status-flags failed") + } + + // 2: capability flags (upper 2 bytes) + var capUpper uint16 + if capUpper, err = buf.ReadU16(); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting greeting capability-flags-upper failed") + } + g.Capability = (uint32(capUpper) << 16) | (uint32(capLower)) + + // 1: length of auth-plugin-data-part-1 + var SLEN byte + if (g.Capability & sqldb.CLIENT_PLUGIN_AUTH) > 0 { + if SLEN, err = buf.ReadU8(); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting greeting auth-plugin-data length failed") + } + } else { + if err = buf.ReadZero(1); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting greeting zero failed") + } + } + + // string[10]: reserved (all [00]) + if err = buf.ReadZero(10); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting greeting reserved failed") + } + + // string[$len]: auth-plugin-data-part-2 ($len=MAX(13, length of auth-plugin-data - 8)) + if (g.Capability & sqldb.CLIENT_SECURE_CONNECTION) > 0 { + read := int(SLEN) - 8 + if read < 0 || read > 13 { + read = 13 + } + var salt2 []byte + if salt2, err = buf.ReadBytes(read); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting greeting salt2 failed") + } + + // The last byte has to be 0, and is not part of the data. + if salt2[read-1] != 0 { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting greeting auth-plugin-data-part-2 is not 0 terminated") + } + copy(g.Salt[8:], salt2[:read-1]) + } + + // string[NUL] auth-plugin name + if (g.Capability & sqldb.CLIENT_PLUGIN_AUTH) > 0 { + if g.authPluginName, err = buf.ReadStringNUL(); err != nil { + return sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "extracting greeting auth-plugin-name failed") + } + } + return nil +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/proto/greeting_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/proto/greeting_test.go new file mode 100644 index 00000000..f229cf3c --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/proto/greeting_test.go @@ -0,0 +1,148 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package proto + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/sqldb" +) + +func TestGreetingUnPack(t *testing.T) { + want := NewGreeting(4) + got := NewGreeting(4) + + // normal + { + want.authPluginName = "mysql_native_password" + err := got.UnPack(want.Pack()) + assert.Nil(t, err) + assert.Equal(t, want, got) + assert.Equal(t, sqldb.SERVER_STATUS_AUTOCOMMIT, int(got.Status())) + } + + // 1. off sqldb.CLIENT_PLUGIN_AUTH + { + want.Capability = want.Capability &^ sqldb.CLIENT_PLUGIN_AUTH + want.authPluginName = "mysql_native_password" + err := got.UnPack(want.Pack()) + assert.Nil(t, err) + assert.Equal(t, want, got) + } + + // 2. off sqldb.CLIENT_SECURE_CONNECTION + { + want.Capability &= ^sqldb.CLIENT_SECURE_CONNECTION + want.authPluginName = "mysql_native_password" + err := got.UnPack(want.Pack()) + assert.Nil(t, err) + assert.Equal(t, want, got) + } + + // 3. off sqldb.CLIENT_PLUGIN_AUTH && sqldb.CLIENT_SECURE_CONNECTION + { + want.Capability &= (^sqldb.CLIENT_PLUGIN_AUTH ^ sqldb.CLIENT_SECURE_CONNECTION) + want.authPluginName = "mysql_native_password" + err := got.UnPack(want.Pack()) + assert.Nil(t, err) + assert.Equal(t, want, got) + } +} + +func TestGreetingUnPackError(t *testing.T) { + // NULL + f0 := func(buff *common.Buffer) { + } + + // Write protocol version. + f1 := func(buff *common.Buffer) { + buff.WriteU8(0x01) + } + + // Write server version. + f2 := func(buff *common.Buffer) { + buff.WriteString("5.7.17-11") + buff.WriteZero(1) + } + + // Write connection ID. + f3 := func(buff *common.Buffer) { + buff.WriteU32(uint32(1)) + } + + // Write salt[8]. + f4 := func(buff *common.Buffer) { + salt8 := make([]byte, 8) + buff.WriteBytes(salt8) + } + + // Write filler. + f5 := func(buff *common.Buffer) { + buff.WriteZero(1) + } + + capability := DefaultServerCapability + capLower := uint16(capability) + capUpper := uint16(uint32(capability) >> 16) + + // Write capability lower 2 bytes + f6 := func(buff *common.Buffer) { + buff.WriteU16(capLower) + } + + // Write charset. + f7 := func(buff *common.Buffer) { + buff.WriteU8(0x01) + } + + // Write statu flags + f8 := func(buff *common.Buffer) { + buff.WriteU16(uint16(1)) + } + + // Write capability upper 2 bytes + f9 := func(buff *common.Buffer) { + buff.WriteU16(capUpper) + } + + // Write length of auth-plugin + f10 := func(buff *common.Buffer) { + buff.WriteU8(0x01) + } + + // Write reserved. + f11 := func(buff *common.Buffer) { + buff.WriteZero(10) + } + + // Write auth plugin data part 2 + f12 := func(buff *common.Buffer) { + data2 := make([]byte, 13) + data2[12] = 0x01 + buff.WriteBytes(data2) + } + + buff := common.NewBuffer(32) + fs := []func(buff *common.Buffer){f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12} + for i := 0; i < len(fs); i++ { + greeting := NewGreeting(0) + err := greeting.UnPack(buff.Datas()) + assert.NotNil(t, err) + fs[i](buff) + } + + { + greeting := NewGreeting(0) + err := greeting.UnPack(buff.Datas()) + assert.NotNil(t, err) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/proto/ok.go b/src/vendor/github.com/xelabs/go-mysqlstack/proto/ok.go new file mode 100644 index 00000000..ce8365d8 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/proto/ok.go @@ -0,0 +1,87 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package proto + +import ( + "github.com/xelabs/go-mysqlstack/common" + "github.com/xelabs/go-mysqlstack/sqldb" +) + +const ( + // OK_PACKET is the OK byte. + OK_PACKET byte = 0x00 +) + +// OK used for OK packet. +type OK struct { + Header byte // 0x00 + AffectedRows uint64 + LastInsertID uint64 + StatusFlags uint16 + Warnings uint16 +} + +// UnPackOK used to unpack the OK packet. +// https://dev.mysql.com/doc/internals/en/packet-OK_Packet.html +func UnPackOK(data []byte) (*OK, error) { + var err error + o := &OK{} + buf := common.ReadBuffer(data) + + // header + if o.Header, err = buf.ReadU8(); err != nil { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "invalid ok packet header: %v", data) + } + if o.Header != OK_PACKET { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "invalid ok packet header: %v", o.Header) + } + + // AffectedRows + if o.AffectedRows, err = buf.ReadLenEncode(); err != nil { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "invalid ok packet affectedrows: %v", data) + } + + // LastInsertID + if o.LastInsertID, err = buf.ReadLenEncode(); err != nil { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "invalid ok packet lastinsertid: %v", data) + } + + // Status + if o.StatusFlags, err = buf.ReadU16(); err != nil { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "invalid ok packet statusflags: %v", data) + } + + // Warnings + if o.Warnings, err = buf.ReadU16(); err != nil { + return nil, sqldb.NewSQLError(sqldb.ER_MALFORMED_PACKET, "invalid ok packet warnings: %v", data) + } + return o, nil +} + +// PackOK used to pack the OK packet. +func PackOK(o *OK) []byte { + buf := common.NewBuffer(64) + + // OK + buf.WriteU8(OK_PACKET) + + // affected rows + buf.WriteLenEncode(o.AffectedRows) + + // last insert id + buf.WriteLenEncode(o.LastInsertID) + + // status + buf.WriteU16(o.StatusFlags) + + // warnings + buf.WriteU16(o.Warnings) + return buf.Datas() +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/proto/ok_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/proto/ok_test.go new file mode 100644 index 00000000..e3d85cce --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/proto/ok_test.go @@ -0,0 +1,106 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package proto + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/common" +) + +func TestOK(t *testing.T) { + { + buff := common.NewBuffer(32) + + // header + buff.WriteU8(0x00) + // affected_rows + buff.WriteLenEncode(uint64(3)) + // last_insert_id + buff.WriteLenEncode(uint64(40000000000)) + + // status_flags + buff.WriteU16(0x01) + // warnings + buff.WriteU16(0x02) + + want := &OK{} + want.AffectedRows = 3 + want.LastInsertID = 40000000000 + want.StatusFlags = 1 + want.Warnings = 2 + + got, err := UnPackOK(buff.Datas()) + assert.Nil(t, err) + assert.Equal(t, want, got) + } + + { + want := &OK{} + want.AffectedRows = 3 + want.LastInsertID = 40000000000 + want.StatusFlags = 1 + want.Warnings = 2 + datas := PackOK(want) + + got, err := UnPackOK(datas) + assert.Nil(t, err) + assert.Equal(t, want, got) + } +} + +func TestOKUnPackError(t *testing.T) { + // header error + { + buff := common.NewBuffer(32) + // header + buff.WriteU8(0x99) + _, err := UnPackOK(buff.Datas()) + assert.NotNil(t, err) + } + + // NULL + f0 := func(buff *common.Buffer) { + } + + // Write OK header. + f1 := func(buff *common.Buffer) { + buff.WriteU8(0x00) + } + + // Write AffectedRows. + f2 := func(buff *common.Buffer) { + buff.WriteLenEncode(uint64(3)) + } + + // Write LastInsertID. + f3 := func(buff *common.Buffer) { + buff.WriteLenEncode(uint64(3)) + } + + // Write Status. + f4 := func(buff *common.Buffer) { + buff.WriteU16(0x01) + } + + buff := common.NewBuffer(32) + fs := []func(buff *common.Buffer){f0, f1, f2, f3, f4} + for i := 0; i < len(fs); i++ { + _, err := UnPackOK(buff.Datas()) + assert.NotNil(t, err) + fs[i](buff) + } + + { + _, err := UnPackOK(buff.Datas()) + assert.NotNil(t, err) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqldb/constants.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqldb/constants.go new file mode 100644 index 00000000..a9a20a18 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqldb/constants.go @@ -0,0 +1,340 @@ +/* + * This code was derived from https://github.com/youtube/vitess. + * + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package sqldb + +/***************************************************/ +// https://dev.mysql.com/doc/internals/en/command-phase.html +// include/my_command.h +const ( + COM_SLEEP byte = iota + COM_QUIT + COM_INIT_DB + COM_QUERY + COM_FIELD_LIST + COM_CREATE_DB + COM_DROP_DB + COM_REFRESH + COM_SHUTDOWN + COM_STATISTICS + COM_PROCESS_INFO + COM_CONNECT + COM_PROCESS_KILL + COM_DEBUG + COM_PING + COM_TIME + COM_DELAYED_INSERT + COM_CHANGE_USER + COM_BINLOG_DUMP + COM_TABLE_DUMP + COM_CONNECT_OUT + COM_REGISTER_SLAVE + COM_STMT_PREPARE + COM_STMT_EXECUTE + COM_STMT_SEND_LONG_DATA + COM_STMT_CLOSE + COM_STMT_RESET + COM_SET_OPTION + COM_STMT_FETCH + COM_DAEMON + COM_BINLOG_DUMP_GTID + COM_RESET_CONNECTION +) + +// CommandString used for translate cmd to string. +func CommandString(cmd byte) string { + switch cmd { + case COM_SLEEP: + return "COM_SLEEP" + case COM_QUIT: + return "COM_QUIT" + case COM_INIT_DB: + return "COM_INIT_DB" + case COM_QUERY: + return "COM_QUERY" + case COM_FIELD_LIST: + return "COM_FIELD_LIST" + case COM_CREATE_DB: + return "COM_CREATE_DB" + case COM_DROP_DB: + return "COM_DROP_DB" + case COM_REFRESH: + return "COM_REFRESH" + case COM_SHUTDOWN: + return "COM_SHUTDOWN" + case COM_STATISTICS: + return "COM_STATISTICS" + case COM_PROCESS_INFO: + return "COM_PROCESS_INFO" + case COM_CONNECT: + return "COM_CONNECT" + case COM_PROCESS_KILL: + return "COM_PROCESS_KILL" + case COM_DEBUG: + return "COM_DEBUG" + case COM_PING: + return "COM_PING" + case COM_TIME: + return "COM_TIME" + case COM_DELAYED_INSERT: + return "COM_DELAYED_INSERT" + case COM_CHANGE_USER: + return "COM_CHANGE_USER" + case COM_BINLOG_DUMP: + return "COM_BINLOG_DUMP" + case COM_TABLE_DUMP: + return "COM_TABLE_DUMP" + case COM_CONNECT_OUT: + return "COM_CONNECT_OUT" + case COM_REGISTER_SLAVE: + return "COM_REGISTER_SLAVE" + case COM_STMT_PREPARE: + return "COM_STMT_PREPARE" + case COM_STMT_EXECUTE: + return "COM_STMT_EXECUTE" + case COM_STMT_SEND_LONG_DATA: + return "COM_STMT_SEND_LONG_DATA" + case COM_STMT_CLOSE: + return "COM_STMT_CLOSE" + case COM_STMT_RESET: + return "COM_STMT_RESET" + case COM_SET_OPTION: + return "COM_SET_OPTION" + case COM_STMT_FETCH: + return "COM_STMT_FETCH" + case COM_DAEMON: + return "COM_DAEMON" + case COM_BINLOG_DUMP_GTID: + return "COM_BINLOG_DUMP_GTID" + case COM_RESET_CONNECTION: + return "COM_RESET_CONNECTION" + } + return "UNKNOWN" +} + +// https://dev.mysql.com/doc/internals/en/capability-flags.html +// include/mysql_com.h +const ( + // new more secure password + CLIENT_LONG_PASSWORD = 1 + + // Found instead of affected rows + CLIENT_FOUND_ROWS = uint32(1 << 1) + + // Get all column flags + CLIENT_LONG_FLAG = uint32(1 << 2) + + // One can specify db on connect + CLIENT_CONNECT_WITH_DB = uint32(1 << 3) + + // Don't allow database.table.column + CLIENT_NO_SCHEMA = uint32(1 << 4) + + // Can use compression protocol + CLIENT_COMPRESS = uint32(1 << 5) + + // Odbc client + CLIENT_ODBC = uint32(1 << 6) + + // Can use LOAD DATA LOCAL + CLIENT_LOCAL_FILES = uint32(1 << 7) + + // Ignore spaces before '(' + CLIENT_IGNORE_SPACE = uint32(1 << 8) + + // New 4.1 protocol + CLIENT_PROTOCOL_41 = uint32(1 << 9) + + // This is an interactive client + CLIENT_INTERACTIVE = uint32(1 << 10) + + // Switch to SSL after handshake + CLIENT_SSL = uint32(1 << 11) + + // IGNORE sigpipes + CLIENT_IGNORE_SIGPIPE = uint32(1 << 12) + + // Client knows about transactions + CLIENT_TRANSACTIONS = uint32(1 << 13) + + // Old flag for 4.1 protocol + CLIENT_RESERVED = uint32(1 << 14) + + // Old flag for 4.1 authentication + CLIENT_SECURE_CONNECTION = uint32(1 << 15) + + // Enable/disable multi-stmt support + CLIENT_MULTI_STATEMENTS = uint32(1 << 16) + + // Enable/disable multi-results + CLIENT_MULTI_RESULTS = uint32(1 << 17) + + // Multi-results in PS-protocol + CLIENT_PS_MULTI_RESULTS = uint32(1 << 18) + + // Client supports plugin authentication + CLIENT_PLUGIN_AUTH = uint32(1 << 19) + + // Client supports connection attributes + CLIENT_CONNECT_ATTRS = uint32(1 << 20) + + // Enable authentication response packet to be larger than 255 bytes + CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA = uint32(1 << 21) + + // Don't close the connection for a connection with expired password + CLIENT_CAN_HANDLE_EXPIRED_PASSWORDS = uint32(1 << 22) + + // Capable of handling server state change information. Its a hint to the + // server to include the state change information in Ok packet. + CLIENT_SESSION_TRACK = uint32(1 << 23) + + //Client no longer needs EOF packet + CLIENT_DEPRECATE_EOF = uint32(1 << 24) +) + +const ( + // SSUnknownSQLState is the default SQLState. + SSUnknownSQLState = "HY000" +) + +// Status flags. They are returned by the server in a few cases. +// Originally found in include/mysql/mysql_com.h +// See http://dev.mysql.com/doc/internals/en/status-flags.html +const ( + // SERVER_STATUS_AUTOCOMMIT is the default status of auto-commit. + SERVER_STATUS_AUTOCOMMIT = 0x0002 +) + +// A few interesting character set values. +// See http://dev.mysql.com/doc/internals/en/character-set.html#packet-Protocol::CharacterSet +const ( + // CharacterSetUtf8 is for UTF8. We use this by default. + CharacterSetUtf8 = 33 + + // CharacterSetBinary is for binary. Use by integer fields for instance. + CharacterSetBinary = 63 +) + +// CharacterSetMap maps the charset name (used in ConnParams) to the +// integer value. Interesting ones have their own constant above. +var CharacterSetMap = map[string]uint8{ + "big5": 1, + "dec8": 3, + "cp850": 4, + "hp8": 6, + "koi8r": 7, + "latin1": 8, + "latin2": 9, + "swe7": 10, + "ascii": 11, + "ujis": 12, + "sjis": 13, + "hebrew": 16, + "tis620": 18, + "euckr": 19, + "koi8u": 22, + "gb2312": 24, + "greek": 25, + "cp1250": 26, + "gbk": 28, + "latin5": 30, + "armscii8": 32, + "utf8": CharacterSetUtf8, + "ucs2": 35, + "cp866": 36, + "keybcs2": 37, + "macce": 38, + "macroman": 39, + "cp852": 40, + "latin7": 41, + "utf8mb4": 45, + "cp1251": 51, + "utf16": 54, + "utf16le": 56, + "cp1256": 57, + "cp1257": 59, + "utf32": 60, + "binary": CharacterSetBinary, + "geostd8": 92, + "cp932": 95, + "eucjpms": 97, +} + +const ( + // Error codes for server-side errors. + // Originally found in include/mysql/mysqld_error.h + + // ER_ERROR_FIRST enum. + ER_ERROR_FIRST uint16 = 1000 + + // ER_CON_COUNT_ERROR enum. + ER_CON_COUNT_ERROR uint16 = 1040 + + // ER_ACCESS_DENIED_ERROR enum. + ER_ACCESS_DENIED_ERROR = 1045 + + // ER_NO_DB_ERROR enum. + ER_NO_DB_ERROR = 1046 + + // ER_BAD_DB_ERROR enum. + ER_BAD_DB_ERROR = 1049 + + // ER_UNKNOWN_ERROR enum. + ER_UNKNOWN_ERROR = 1105 + + // ER_HOST_NOT_PRIVILEGED enum. + ER_HOST_NOT_PRIVILEGED = 1130 + + // ER_NO_SUCH_TABLE enum. + ER_NO_SUCH_TABLE = 1146 + + // ER_SYNTAX_ERROR enum. + ER_SYNTAX_ERROR = 1149 + + // ER_SPECIFIC_ACCESS_DENIED_ERROR enum. + ER_SPECIFIC_ACCESS_DENIED_ERROR = 1227 + + // ER_OPTION_PREVENTS_STATEMENT enum. + ER_OPTION_PREVENTS_STATEMENT = 1290 + + // ER_MALFORMED_PACKET enum. + ER_MALFORMED_PACKET = 1835 + + // Error codes for client-side errors. + // Originally found in include/mysql/errmsg.h + // Used when: + // - the client cannot write an initial auth packet. + // - the client cannot read an initial auth packet. + // - the client cannot read a response from the server. + + // CR_SERVER_LOST enum. + CR_SERVER_LOST = 2013 + + // CR_VERSION_ERROR enum. + // This is returned if the server versions don't match what we support. + CR_VERSION_ERROR = 2007 +) + +// SQLErrors is the list of sql errors. +var SQLErrors = map[uint16]*SQLError{ + ER_CON_COUNT_ERROR: &SQLError{Num: ER_CON_COUNT_ERROR, State: "08004", Message: "Too many connections"}, + ER_ACCESS_DENIED_ERROR: &SQLError{Num: ER_ACCESS_DENIED_ERROR, State: "28000", Message: "Access denied for user '%-.48s'@'%-.64s' (using password: %s)"}, + ER_NO_DB_ERROR: &SQLError{Num: ER_NO_DB_ERROR, State: "3D000", Message: "No database selected"}, + ER_BAD_DB_ERROR: &SQLError{Num: ER_BAD_DB_ERROR, State: "42000", Message: "Unknown database '%-.192s'"}, + ER_UNKNOWN_ERROR: &SQLError{Num: ER_UNKNOWN_ERROR, State: "HY000", Message: ""}, + ER_HOST_NOT_PRIVILEGED: &SQLError{Num: ER_HOST_NOT_PRIVILEGED, State: "HY000", Message: "Host '%-.64s' is not allowed to connect to this MySQL server"}, + ER_NO_SUCH_TABLE: &SQLError{Num: ER_NO_SUCH_TABLE, State: "42S02", Message: "Table '%s' doesn't exist"}, + ER_SYNTAX_ERROR: &SQLError{Num: ER_SYNTAX_ERROR, State: "42000", Message: "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use, %s"}, + ER_SPECIFIC_ACCESS_DENIED_ERROR: &SQLError{Num: ER_SPECIFIC_ACCESS_DENIED_ERROR, State: "42000", Message: "Access denied; you need (at least one of) the %-.128s privilege(s) for this operation"}, + ER_OPTION_PREVENTS_STATEMENT: &SQLError{Num: ER_OPTION_PREVENTS_STATEMENT, State: "42000", Message: "The MySQL server is running with the %s option so it cannot execute this statement"}, + ER_MALFORMED_PACKET: &SQLError{Num: ER_MALFORMED_PACKET, State: "HY000", Message: "Malformed communication packet."}, + CR_SERVER_LOST: &SQLError{Num: CR_SERVER_LOST, State: "HY000", Message: ""}, +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqldb/constants_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqldb/constants_test.go new file mode 100644 index 00000000..5dbf7262 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqldb/constants_test.go @@ -0,0 +1,21 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package sqldb + +import ( + "testing" +) + +func TestConstants(t *testing.T) { + var i byte + for i = 0; i < COM_RESET_CONNECTION+2; i++ { + CommandString(i) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqldb/sql_error.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqldb/sql_error.go new file mode 100644 index 00000000..f4b9f1ec --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqldb/sql_error.go @@ -0,0 +1,120 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqldb + +import ( + "bytes" + "fmt" + "regexp" + "strconv" +) + +const ( + // SQLStateGeneral is the SQLSTATE value for "general error". + SQLStateGeneral = "HY000" +) + +// SQLError is the error structure returned from calling a db library function +type SQLError struct { + Num uint16 + State string + Message string + Query string +} + +// NewSQLError creates new sql error. +func NewSQLError(number uint16, format string, args ...interface{}) *SQLError { + sqlErr := &SQLError{} + err, ok := SQLErrors[number] + if !ok { + unknow := SQLErrors[ER_UNKNOWN_ERROR] + sqlErr.Num = unknow.Num + sqlErr.State = unknow.State + } else { + sqlErr.Num = err.Num + sqlErr.State = err.State + } + + if format != "" { + sqlErr.Message = fmt.Sprintf(format, args...) + } else { + sqlErr.Message = fmt.Sprintf(err.Message, args...) + } + return sqlErr +} + +// NewSQLError1 creates new sql error with state. +func NewSQLError1(number uint16, state string, format string, args ...interface{}) *SQLError { + return &SQLError{ + Num: number, + State: state, + Message: fmt.Sprintf(format, args...), + } +} + +// Error implements the error interface +func (se *SQLError) Error() string { + buf := &bytes.Buffer{} + buf.WriteString(se.Message) + + // Add MySQL errno and SQLSTATE in a format that we can later parse. + // There's no avoiding string parsing because all errors + // are converted to strings anyway at RPC boundaries. + // See NewSQLErrorFromError. + fmt.Fprintf(buf, " (errno %v) (sqlstate %v)", se.Num, se.State) + + if se.Query != "" { + fmt.Fprintf(buf, " during query: %s", se.Query) + } + return buf.String() +} + +var errExtract = regexp.MustCompile(`.*\(errno ([0-9]*)\) \(sqlstate ([0-9a-zA-Z]{5})\).*`) + +// NewSQLErrorFromError returns a *SQLError from the provided error. +// If it's not the right type, it still tries to get it from a regexp. +func NewSQLErrorFromError(err error) error { + if err == nil { + return nil + } + + if serr, ok := err.(*SQLError); ok { + return serr + } + + msg := err.Error() + match := errExtract.FindStringSubmatch(msg) + if len(match) < 2 { + // Not found, build a generic SQLError. + // TODO(alainjobart) maybe we can also check the canonical + // error code, and translate that into the right error. + + // FIXME(alainjobart): 1105 is unknown error. Will + // merge with sqlconn later. + unknow := SQLErrors[ER_UNKNOWN_ERROR] + return &SQLError{ + Num: unknow.Num, + State: unknow.State, + Message: msg, + } + } + + num, err := strconv.Atoi(match[1]) + if err != nil { + unknow := SQLErrors[ER_UNKNOWN_ERROR] + return &SQLError{ + Num: unknow.Num, + State: unknow.State, + Message: msg, + } + } + + serr := &SQLError{ + Num: uint16(num), + State: match[2], + Message: msg, + } + return serr +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqldb/sql_error_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqldb/sql_error_test.go new file mode 100644 index 00000000..b0e51338 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqldb/sql_error_test.go @@ -0,0 +1,62 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package sqldb + +import ( + "testing" + + "errors" + "github.com/stretchr/testify/assert" +) + +func TestSqlError(t *testing.T) { + { + sqlerr := NewSQLError(1, "i.am.error.man") + assert.Equal(t, "i.am.error.man (errno 1105) (sqlstate HY000)", sqlerr.Error()) + } + + { + sqlerr := NewSQLError(1, "i.am.error.man%s", "xx") + assert.Equal(t, "i.am.error.manxx (errno 1105) (sqlstate HY000)", sqlerr.Error()) + } + + { + sqlerr := NewSQLError(ER_NO_DB_ERROR, "") + assert.Equal(t, "No database selected (errno 1046) (sqlstate 3D000)", sqlerr.Error()) + } +} + +func TestSqlErrorFromErr(t *testing.T) { + { + err := errors.New("errorman") + sqlerr := NewSQLErrorFromError(err) + assert.NotNil(t, sqlerr) + } + + { + err := errors.New("i.am.error.man (errno 1) (sqlstate HY000)") + sqlerr := NewSQLErrorFromError(err) + assert.NotNil(t, sqlerr) + } + + { + err := errors.New("No database selected (errno 1046) (sqlstate 3D000)") + want := &SQLError{Num: 1046, State: "3D000", Message: "No database selected (errno 1046) (sqlstate 3D000)"} + got := NewSQLErrorFromError(err) + assert.Equal(t, want, got) + } + + { + err := NewSQLError1(10086, "xx", "i.am.the.error.man.%s", "xx") + want := &SQLError{Num: 10086, State: "xx", Message: "i.am.the.error.man.xx"} + got := NewSQLErrorFromError(err) + assert.Equal(t, want, got) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/Makefile b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/Makefile new file mode 100644 index 00000000..28b15802 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/Makefile @@ -0,0 +1,11 @@ +# Copyright 2012, Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can +# be found in the LICENSE file. + +MAKEFLAGS = -s + +sql.go: sql.y + goyacc -o sql.go sql.y + +clean: + rm -f y.output sql.go diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/analyzer.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/analyzer.go new file mode 100644 index 00000000..df5833cf --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/analyzer.go @@ -0,0 +1,222 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +// analyzer.go contains utility analysis functions. + +import ( + "errors" + "fmt" + "strings" + "unicode" + + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +// These constants are used to identify the SQL statement type. +const ( + StmtSelect = iota + StmtInsert + StmtReplace + StmtUpdate + StmtDelete + StmtDDL + StmtBegin + StmtCommit + StmtRollback + StmtSet + StmtShow + StmtUse + StmtOther + StmtUnknown +) + +// Preview analyzes the beginning of the query using a simpler and faster +// textual comparison to identify the statement type. +func Preview(sql string) int { + trimmed := StripLeadingComments(sql) + + firstWord := trimmed + if end := strings.IndexFunc(trimmed, unicode.IsSpace); end != -1 { + firstWord = trimmed[:end] + } + + // Comparison is done in order of priority. + loweredFirstWord := strings.ToLower(firstWord) + switch loweredFirstWord { + case "select": + return StmtSelect + case "insert": + return StmtInsert + case "replace": + return StmtReplace + case "update": + return StmtUpdate + case "delete": + return StmtDelete + } + switch strings.ToLower(trimmed) { + case "begin", "start transaction": + return StmtBegin + case "commit": + return StmtCommit + case "rollback": + return StmtRollback + } + switch loweredFirstWord { + case "create", "alter", "rename", "drop": + return StmtDDL + case "set": + return StmtSet + case "show": + return StmtShow + case "use": + return StmtUse + case "analyze", "describe", "desc", "explain", "repair", "optimize", "truncate": + return StmtOther + } + return StmtUnknown +} + +// IsDML returns true if the query is an INSERT, UPDATE or DELETE statement. +func IsDML(sql string) bool { + switch Preview(sql) { + case StmtInsert, StmtReplace, StmtUpdate, StmtDelete: + return true + } + return false +} + +// GetTableName returns the table name from the SimpleTableExpr +// only if it's a simple expression. Otherwise, it returns "". +func GetTableName(node SimpleTableExpr) TableIdent { + if n, ok := node.(TableName); ok && n.Qualifier.IsEmpty() { + return n.Name + } + // sub-select or '.' expression + return NewTableIdent("") +} + +// IsColName returns true if the Expr is a *ColName. +func IsColName(node Expr) bool { + _, ok := node.(*ColName) + return ok +} + +// IsValue returns true if the Expr is a string, integral or value arg. +// NULL is not considered to be a value. +func IsValue(node Expr) bool { + switch v := node.(type) { + case *SQLVal: + switch v.Type { + case StrVal, HexVal, IntVal, ValArg: + return true + } + case *ValuesFuncExpr: + if v.Resolved != nil { + return IsValue(v.Resolved) + } + } + return false +} + +// IsNull returns true if the Expr is SQL NULL +func IsNull(node Expr) bool { + switch node.(type) { + case *NullVal: + return true + } + return false +} + +// IsSimpleTuple returns true if the Expr is a ValTuple that +// contains simple values or if it's a list arg. +func IsSimpleTuple(node Expr) bool { + switch vals := node.(type) { + case ValTuple: + for _, n := range vals { + if !IsValue(n) { + return false + } + } + return true + case ListArg: + return true + } + // It's a subquery + return false +} + +// NewPlanValue builds a sqltypes.PlanValue from an Expr. +func NewPlanValue(node Expr) (sqltypes.PlanValue, error) { + switch node := node.(type) { + case *SQLVal: + switch node.Type { + case ValArg: + return sqltypes.PlanValue{Key: string(node.Val[1:])}, nil + case IntVal: + n, err := sqltypes.NewIntegral(string(node.Val)) + if err != nil { + return sqltypes.PlanValue{}, err + } + return sqltypes.PlanValue{Value: n}, nil + case StrVal: + return sqltypes.PlanValue{Value: sqltypes.MakeTrusted(sqltypes.VarBinary, node.Val)}, nil + case HexVal: + v, err := node.HexDecode() + if err != nil { + return sqltypes.PlanValue{}, err + } + return sqltypes.PlanValue{Value: sqltypes.MakeTrusted(sqltypes.VarBinary, v)}, nil + } + case ListArg: + return sqltypes.PlanValue{ListKey: string(node[2:])}, nil + case ValTuple: + pv := sqltypes.PlanValue{ + Values: make([]sqltypes.PlanValue, 0, len(node)), + } + for _, val := range node { + innerpv, err := NewPlanValue(val) + if err != nil { + return sqltypes.PlanValue{}, err + } + if innerpv.ListKey != "" || innerpv.Values != nil { + return sqltypes.PlanValue{}, errors.New("unsupported: nested lists") + } + pv.Values = append(pv.Values, innerpv) + } + return pv, nil + case *ValuesFuncExpr: + if node.Resolved != nil { + return NewPlanValue(node.Resolved) + } + case *NullVal: + return sqltypes.PlanValue{}, nil + } + return sqltypes.PlanValue{}, fmt.Errorf("expression is too complex '%v'", String(node)) +} + +// StringIn is a convenience function that returns +// true if str matches any of the values. +func StringIn(str string, values ...string) bool { + for _, val := range values { + if str == val { + return true + } + } + return false +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/analyzer_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/analyzer_test.go new file mode 100644 index 00000000..471d519b --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/analyzer_test.go @@ -0,0 +1,394 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "reflect" + "strings" + "testing" + + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +func TestPreview(t *testing.T) { + testcases := []struct { + sql string + want int + }{ + {"select ...", StmtSelect}, + {" select ...", StmtSelect}, + {"insert ...", StmtInsert}, + {"replace ....", StmtReplace}, + {" update ...", StmtUpdate}, + {"Update", StmtUpdate}, + {"UPDATE ...", StmtUpdate}, + {"\n\t delete ...", StmtDelete}, + {"", StmtUnknown}, + {" ", StmtUnknown}, + {"begin", StmtBegin}, + {" begin", StmtBegin}, + {" begin ", StmtBegin}, + {"\n\t begin ", StmtBegin}, + {"... begin ", StmtUnknown}, + {"begin ...", StmtUnknown}, + {"start transaction", StmtBegin}, + {"commit", StmtCommit}, + {"rollback", StmtRollback}, + {"create", StmtDDL}, + {"alter", StmtDDL}, + {"rename", StmtDDL}, + {"drop", StmtDDL}, + {"set", StmtSet}, + {"show", StmtShow}, + {"use", StmtUse}, + {"analyze", StmtOther}, + {"describe", StmtOther}, + {"desc", StmtOther}, + {"explain", StmtOther}, + {"repair", StmtOther}, + {"optimize", StmtOther}, + {"truncate", StmtOther}, + {"unknown", StmtUnknown}, + + {"/* leading comment */ select ...", StmtSelect}, + {"/* leading comment */ /* leading comment 2 */ select ...", StmtSelect}, + {"-- leading single line comment \n select ...", StmtSelect}, + {"-- leading single line comment \n -- leading single line comment 2\n select ...", StmtSelect}, + + {"/* leading comment no end select ...", StmtUnknown}, + {"-- leading single line comment no end select ...", StmtUnknown}, + } + for _, tcase := range testcases { + if got := Preview(tcase.sql); got != tcase.want { + t.Errorf("Preview(%s): %v, want %v", tcase.sql, got, tcase.want) + } + } +} + +func TestIsDML(t *testing.T) { + testcases := []struct { + sql string + want bool + }{ + {" update ...", true}, + {"Update", true}, + {"UPDATE ...", true}, + {"\n\t delete ...", true}, + {"insert ...", true}, + {"replace ...", true}, + {"select ...", false}, + {" select ...", false}, + {"", false}, + {" ", false}, + } + for _, tcase := range testcases { + if got := IsDML(tcase.sql); got != tcase.want { + t.Errorf("IsDML(%s): %v, want %v", tcase.sql, got, tcase.want) + } + } +} + +func TestGetTableName(t *testing.T) { + testcases := []struct { + in, out string + }{{ + in: "select * from t", + out: "t", + }, { + in: "select * from t.t", + out: "", + }, { + in: "select * from (select * from t) as tt", + out: "", + }} + + for _, tc := range testcases { + tree, err := Parse(tc.in) + if err != nil { + t.Error(err) + continue + } + out := GetTableName(tree.(*Select).From[0].(*AliasedTableExpr).Expr) + if out.String() != tc.out { + t.Errorf("GetTableName('%s'): %s, want %s", tc.in, out, tc.out) + } + } +} + +func TestIsColName(t *testing.T) { + testcases := []struct { + in Expr + out bool + }{{ + in: &ColName{}, + out: true, + }, { + in: newHexVal(""), + }} + for _, tc := range testcases { + out := IsColName(tc.in) + if out != tc.out { + t.Errorf("IsColName(%T): %v, want %v", tc.in, out, tc.out) + } + } +} + +func TestIsValue(t *testing.T) { + testcases := []struct { + in Expr + out bool + }{{ + in: newStrVal("aa"), + out: true, + }, { + in: newHexVal("3131"), + out: true, + }, { + in: newIntVal("1"), + out: true, + }, { + in: newValArg(":a"), + out: true, + }, { + in: &ValuesFuncExpr{ + Name: NewColIdent("foo"), + Resolved: newStrVal(""), + }, + out: true, + }, { + in: &ValuesFuncExpr{ + Name: NewColIdent("foo"), + }, + out: false, + }, { + in: &NullVal{}, + out: false, + }} + for _, tc := range testcases { + out := IsValue(tc.in) + if out != tc.out { + t.Errorf("IsValue(%T): %v, want %v", tc.in, out, tc.out) + } + if tc.out { + // NewPlanValue should not fail for valid values. + if _, err := NewPlanValue(tc.in); err != nil { + t.Error(err) + } + } + } +} + +func TestIsNull(t *testing.T) { + testcases := []struct { + in Expr + out bool + }{{ + in: &NullVal{}, + out: true, + }, { + in: newStrVal(""), + }} + for _, tc := range testcases { + out := IsNull(tc.in) + if out != tc.out { + t.Errorf("IsNull(%T): %v, want %v", tc.in, out, tc.out) + } + } +} + +func TestIsSimpleTuple(t *testing.T) { + testcases := []struct { + in Expr + out bool + }{{ + in: ValTuple{newStrVal("aa")}, + out: true, + }, { + in: ValTuple{&ColName{}}, + }, { + in: ListArg("::a"), + out: true, + }, { + in: &ColName{}, + }} + for _, tc := range testcases { + out := IsSimpleTuple(tc.in) + if out != tc.out { + t.Errorf("IsSimpleTuple(%T): %v, want %v", tc.in, out, tc.out) + } + if tc.out { + // NewPlanValue should not fail for valid tuples. + if _, err := NewPlanValue(tc.in); err != nil { + t.Error(err) + } + } + } +} + +func TestNewPlanValue(t *testing.T) { + tcases := []struct { + in Expr + out sqltypes.PlanValue + err string + }{{ + in: &SQLVal{ + Type: ValArg, + Val: []byte(":valarg"), + }, + out: sqltypes.PlanValue{Key: "valarg"}, + }, { + in: &SQLVal{ + Type: IntVal, + Val: []byte("10"), + }, + out: sqltypes.PlanValue{Value: sqltypes.NewInt64(10)}, + }, { + in: &SQLVal{ + Type: IntVal, + Val: []byte("1111111111111111111111111111111111111111"), + }, + err: "value out of range", + }, { + in: &SQLVal{ + Type: StrVal, + Val: []byte("strval"), + }, + out: sqltypes.PlanValue{Value: sqltypes.NewVarBinary("strval")}, + }, { + in: &SQLVal{ + Type: HexVal, + Val: []byte("3131"), + }, + out: sqltypes.PlanValue{Value: sqltypes.NewVarBinary("11")}, + }, { + in: &SQLVal{ + Type: HexVal, + Val: []byte("313"), + }, + err: "odd length hex string", + }, { + in: ListArg("::list"), + out: sqltypes.PlanValue{ListKey: "list"}, + }, { + in: ValTuple{ + &SQLVal{ + Type: ValArg, + Val: []byte(":valarg"), + }, + &SQLVal{ + Type: StrVal, + Val: []byte("strval"), + }, + }, + out: sqltypes.PlanValue{ + Values: []sqltypes.PlanValue{{ + Key: "valarg", + }, { + Value: sqltypes.NewVarBinary("strval"), + }}, + }, + }, { + in: ValTuple{ + &ParenExpr{Expr: &SQLVal{ + Type: ValArg, + Val: []byte(":valarg"), + }}, + }, + err: "expression is too complex", + }, { + in: ValTuple{ + ListArg("::list"), + }, + err: "unsupported: nested lists", + }, { + in: &ValuesFuncExpr{ + Name: NewColIdent("valfunc"), + Resolved: &SQLVal{ + Type: ValArg, + Val: []byte(":vf"), + }, + }, + out: sqltypes.PlanValue{Key: "vf"}, + }, { + in: &ValuesFuncExpr{ + Name: NewColIdent("valfunc"), + }, + err: "expression is too complex", + }, { + in: &NullVal{}, + out: sqltypes.PlanValue{}, + }, { + in: &ParenExpr{Expr: &SQLVal{ + Type: ValArg, + Val: []byte(":valarg"), + }}, + err: "expression is too complex", + }} + for _, tc := range tcases { + got, err := NewPlanValue(tc.in) + if err != nil { + if !strings.Contains(err.Error(), tc.err) { + t.Errorf("NewPlanValue(%s) error: %v, want '%s'", String(tc.in), err, tc.err) + } + continue + } + if tc.err != "" { + t.Errorf("NewPlanValue(%s) error: nil, want '%s'", String(tc.in), tc.err) + continue + } + if !reflect.DeepEqual(got, tc.out) { + t.Errorf("NewPlanValue(%s): %v, want %v", String(tc.in), got, tc.out) + } + } +} + +func TestStringIn(t *testing.T) { + testcases := []struct { + in1 string + in2 []string + out bool + }{{ + in1: "v1", + in2: []string{"v1", "v2"}, + out: true, + }, { + in1: "v0", + in2: []string{"v1", "v2"}, + }} + for _, tc := range testcases { + out := StringIn(tc.in1, tc.in2...) + if out != tc.out { + t.Errorf("StringIn(%v,%v): %#v, want %#v", tc.in1, tc.in2, out, tc.out) + } + } +} + +func newStrVal(in string) *SQLVal { + return NewStrVal([]byte(in)) +} + +func newIntVal(in string) *SQLVal { + return NewIntVal([]byte(in)) +} + +func newHexVal(in string) *SQLVal { + return NewHexVal([]byte(in)) +} + +func newValArg(in string) *SQLVal { + return NewValArg([]byte(in)) +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/ast.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/ast.go new file mode 100644 index 00000000..dadcaa13 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/ast.go @@ -0,0 +1,2653 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "strings" + + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +// Instructions for creating new types: If a type +// needs to satisfy an interface, declare that function +// along with that interface. This will help users +// identify the list of types to which they can assert +// those interfaces. +// If the member of a type has a string with a predefined +// list of values, declare those values as const following +// the type. +// For interfaces that define dummy functions to consolidate +// a set of types, define the function as iTypeName. +// This will help avoid name collisions. + +// Parse parses the sql and returns a Statement, which +// is the AST representation of the query. If a DDL statement +// is partially parsed but still contains a syntax error, the +// error is ignored and the DDL is returned anyway. +func Parse(sql string) (Statement, error) { + tokenizer := NewStringTokenizer(sql) + if yyParse(tokenizer) != 0 { + return nil, errors.New(tokenizer.LastError) + } + return tokenizer.ParseTree, nil +} + +// SQLNode defines the interface for all nodes +// generated by the parser. +type SQLNode interface { + Format(buf *TrackedBuffer) + // WalkSubtree calls visit on all underlying nodes + // of the subtree, but not the current one. Walking + // must be interrupted if visit returns an error. + WalkSubtree(visit Visit) error +} + +// Visit defines the signature of a function that +// can be used to visit all nodes of a parse tree. +type Visit func(node SQLNode) (kontinue bool, err error) + +// Walk calls visit on every node. +// If visit returns true, the underlying nodes +// are also visited. If it returns an error, walking +// is interrupted, and the error is returned. +func Walk(visit Visit, nodes ...SQLNode) error { + for _, node := range nodes { + if node == nil { + continue + } + kontinue, err := visit(node) + if err != nil { + return err + } + if kontinue { + err = node.WalkSubtree(visit) + if err != nil { + return err + } + } + } + return nil +} + +// String returns a string representation of an SQLNode. +func String(node SQLNode) string { + buf := NewTrackedBuffer(nil) + buf.Myprintf("%v", node) + return buf.String() +} + +// Append appends the SQLNode to the buffer. +func Append(buf *bytes.Buffer, node SQLNode) { + tbuf := &TrackedBuffer{ + Buffer: buf, + } + node.Format(tbuf) +} + +// Statement represents a statement. +type Statement interface { + iStatement() + SQLNode +} + +func (*Union) iStatement() {} +func (*Select) iStatement() {} +func (*Insert) iStatement() {} +func (*Update) iStatement() {} +func (*Delete) iStatement() {} +func (*Set) iStatement() {} +func (*DDL) iStatement() {} +func (*Show) iStatement() {} +func (*Use) iStatement() {} +func (*OtherRead) iStatement() {} +func (*OtherAdmin) iStatement() {} + +// ParenSelect can actually not be a top level statement, +// but we have to allow it because it's a requirement +// of SelectStatement. +func (*ParenSelect) iStatement() {} + +// SelectStatement any SELECT statement. +type SelectStatement interface { + iSelectStatement() + iStatement() + iInsertRows() + AddOrder(*Order) + SetLimit(*Limit) + SQLNode +} + +func (*Select) iSelectStatement() {} +func (*Union) iSelectStatement() {} +func (*ParenSelect) iSelectStatement() {} + +// Select represents a SELECT statement. +type Select struct { + Cache string + Comments Comments + Distinct string + Hints string + SelectExprs SelectExprs + From TableExprs + Where *Where + GroupBy GroupBy + Having *Where + OrderBy OrderBy + Limit *Limit + Lock string + ForBackup string +} + +// Select.Distinct +const ( + DistinctStr = "distinct " + StraightJoinHint = "straight_join " +) + +// Select.Lock +const ( + ForUpdateStr = " for update" + ShareModeStr = " lock in share mode" +) + +// Select.Cache +const ( + SQLCacheStr = "sql_cache " + SQLNoCacheStr = "sql_no_cache " +) + +// AddOrder adds an order by element +func (node *Select) AddOrder(order *Order) { + node.OrderBy = append(node.OrderBy, order) +} + +// SetLimit sets the limit clause +func (node *Select) SetLimit(limit *Limit) { + node.Limit = limit +} + +// Format formats the node. +func (node *Select) Format(buf *TrackedBuffer) { + buf.Myprintf("select %v%s%s%s%v from %v%v%v%v%v%v%s", + node.Comments, node.Cache, node.Distinct, node.Hints, node.SelectExprs, + node.From, node.Where, + node.GroupBy, node.Having, node.OrderBy, + node.Limit, node.Lock) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *Select) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Comments, + node.SelectExprs, + node.From, + node.Where, + node.GroupBy, + node.Having, + node.OrderBy, + node.Limit, + ) +} + +// AddWhere adds the boolean expression to the +// WHERE clause as an AND condition. If the expression +// is an OR clause, it parenthesizes it. Currently, +// the OR operator is the only one that's lower precedence +// than AND. +func (node *Select) AddWhere(expr Expr) { + if _, ok := expr.(*OrExpr); ok { + expr = &ParenExpr{Expr: expr} + } + if node.Where == nil { + node.Where = &Where{ + Type: WhereStr, + Expr: expr, + } + return + } + node.Where.Expr = &AndExpr{ + Left: node.Where.Expr, + Right: expr, + } +} + +// AddHaving adds the boolean expression to the +// HAVING clause as an AND condition. If the expression +// is an OR clause, it parenthesizes it. Currently, +// the OR operator is the only one that's lower precedence +// than AND. +func (node *Select) AddHaving(expr Expr) { + if _, ok := expr.(*OrExpr); ok { + expr = &ParenExpr{Expr: expr} + } + if node.Having == nil { + node.Having = &Where{ + Type: HavingStr, + Expr: expr, + } + return + } + node.Having.Expr = &AndExpr{ + Left: node.Having.Expr, + Right: expr, + } +} + +// ParenSelect is a parenthesized SELECT statement. +type ParenSelect struct { + Select SelectStatement +} + +// AddOrder adds an order by element +func (node *ParenSelect) AddOrder(order *Order) { + panic("unreachable") +} + +// SetLimit sets the limit clause +func (node *ParenSelect) SetLimit(limit *Limit) { + panic("unreachable") +} + +// Format formats the node. +func (node *ParenSelect) Format(buf *TrackedBuffer) { + buf.Myprintf("(%v)", node.Select) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *ParenSelect) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Select, + ) +} + +// Union represents a UNION statement. +type Union struct { + Type string + Left, Right SelectStatement + OrderBy OrderBy + Limit *Limit + Lock string +} + +// Union.Type +const ( + UnionStr = "union" + UnionAllStr = "union all" + UnionDistinctStr = "union distinct" +) + +// AddOrder adds an order by element +func (node *Union) AddOrder(order *Order) { + node.OrderBy = append(node.OrderBy, order) +} + +// SetLimit sets the limit clause +func (node *Union) SetLimit(limit *Limit) { + node.Limit = limit +} + +// Format formats the node. +func (node *Union) Format(buf *TrackedBuffer) { + buf.Myprintf("%v %s %v%v%v%s", node.Left, node.Type, node.Right, + node.OrderBy, node.Limit, node.Lock) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *Union) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Left, + node.Right, + ) +} + +// Insert represents an INSERT or REPLACE statement. +// Per the MySQL docs, http://dev.mysql.com/doc/refman/5.7/en/replace.html +// Replace is the counterpart to `INSERT IGNORE`, and works exactly like a +// normal INSERT except if the row exists. In that case it first deletes +// the row and re-inserts with new values. For that reason we keep it as an Insert struct. +// Replaces are currently disallowed in sharded schemas because +// of the implications the deletion part may have on vindexes. +type Insert struct { + Action string + Comments Comments + Ignore string + Table TableName + Columns Columns + Rows InsertRows + OnDup OnDup +} + +const ( + // InsertStr represents insert action. + InsertStr = "insert" + // ReplaceStr represents replace action. + ReplaceStr = "replace" +) + +// Format formats the node. +func (node *Insert) Format(buf *TrackedBuffer) { + buf.Myprintf("%s %v%sinto %v%v %v%v", + node.Action, + node.Comments, node.Ignore, + node.Table, node.Columns, node.Rows, node.OnDup) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *Insert) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Comments, + node.Table, + node.Columns, + node.Rows, + node.OnDup, + ) +} + +// InsertRows represents the rows for an INSERT statement. +type InsertRows interface { + iInsertRows() + SQLNode +} + +func (*Select) iInsertRows() {} +func (*Union) iInsertRows() {} +func (Values) iInsertRows() {} +func (*ParenSelect) iInsertRows() {} + +// Update represents an UPDATE statement. +type Update struct { + Comments Comments + Table TableName + Exprs UpdateExprs + Where *Where + OrderBy OrderBy + Limit *Limit +} + +// Format formats the node. +func (node *Update) Format(buf *TrackedBuffer) { + buf.Myprintf("update %v%v set %v%v%v%v", + node.Comments, node.Table, + node.Exprs, node.Where, node.OrderBy, node.Limit) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *Update) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Comments, + node.Table, + node.Exprs, + node.Where, + node.OrderBy, + node.Limit, + ) +} + +// Delete represents a DELETE statement. +type Delete struct { + Comments Comments + Table TableName + Where *Where + OrderBy OrderBy + Limit *Limit +} + +// Format formats the node. +func (node *Delete) Format(buf *TrackedBuffer) { + buf.Myprintf("delete %vfrom %v%v%v%v", node.Comments, node.Table, node.Where, node.OrderBy, node.Limit) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *Delete) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Comments, + node.Table, + node.Where, + node.OrderBy, + node.Limit, + ) +} + +// Set represents a SET statement. +type Set struct { + Comments Comments + Exprs UpdateExprs +} + +// Format formats the node. +func (node *Set) Format(buf *TrackedBuffer) { + buf.Myprintf("set") +} + +// WalkSubtree walks the nodes of the subtree. +func (node *Set) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Comments, + node.Exprs, + ) +} + +// DDL represents a CREATE, ALTER, DROP or RENAME statement. +// Table is set for AlterStr, DropStr, RenameStr. +// NewName is set for AlterStr, CreateStr, RenameStr. +type DDL struct { + Action string + Engine string + Charset string + IndexName string + PartitionName string + IfExists bool + IfNotExists bool + Table TableName + NewName TableName + Database TableIdent + TableSpec *TableSpec + + // table column operation + DropColumnName string + ModifyColumnDef *ColumnDefinition +} + +// DDL strings. +const ( + CreateDBStr = "create database" + CreateTableStr = "create table" + CreatePartitionTableStr = "create partition table" + CreateIndexStr = "create index" + DropDBStr = "drop database" + DropTableStr = "drop table" + DropIndexStr = "drop index" + AlterStr = "alter" + AlterEngineStr = "alter table" + AlterCharsetStr = "alter table charset" + AlterAddColumnStr = "alter table add column" + AlterDropColumnStr = "alter table drop column" + AlterModifyColumnStr = "alter table modify column" + RenameStr = "rename" + TruncateTableStr = "truncate table" +) + +// Format formats the node. +func (node *DDL) Format(buf *TrackedBuffer) { + switch node.Action { + case CreateDBStr: + ifnotexists := "" + if node.IfNotExists { + ifnotexists = " if not exists" + } + buf.Myprintf("%s%s %s", node.Action, ifnotexists, node.Database.String()) + case DropDBStr: + exists := "" + if node.IfExists { + exists = " if exists" + } + buf.Myprintf("%s%s %s", node.Action, exists, node.Database.String()) + case CreateTableStr: + ifnotexists := "" + if node.IfNotExists { + ifnotexists = " if not exists" + } + if node.TableSpec == nil { + buf.Myprintf("%s%s %v", node.Action, ifnotexists, node.NewName) + } else { + buf.Myprintf("%s%s %v %v", node.Action, ifnotexists, node.NewName, node.TableSpec) + } + case CreateIndexStr: + buf.Myprintf("%s %s on %v", node.Action, node.IndexName, node.NewName) + case DropTableStr: + exists := "" + if node.IfExists { + exists = " if exists" + } + buf.Myprintf("%s%s %v", node.Action, exists, node.Table) + case DropIndexStr: + buf.Myprintf("%s %s on %v", node.Action, node.IndexName, node.Table) + case RenameStr: + buf.Myprintf("%s %v %v", node.Action, node.Table, node.NewName) + case AlterStr: + buf.Myprintf("%s table %v", node.Action, node.NewName) + case AlterEngineStr: + buf.Myprintf("%s %v engine = %s", node.Action, node.NewName, node.Engine) + case AlterCharsetStr: + buf.Myprintf("alter table %v convert to character set %s", node.NewName, node.Charset) + case AlterAddColumnStr: + buf.Myprintf("alter table %v add column %v", node.NewName, node.TableSpec) + case AlterDropColumnStr: + buf.Myprintf("alter table %v drop column `%s`", node.NewName, node.DropColumnName) + case AlterModifyColumnStr: + buf.Myprintf("alter table %v modify column %v", node.NewName, node.ModifyColumnDef) + case TruncateTableStr: + buf.Myprintf("%s %v", node.Action, node.NewName) + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node *DDL) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Table, + node.NewName, + ) +} + +// TableOptions represents the table options. +type TableOptions struct { + Engine string + Charset string +} + +// Format formats the node. +func (opts TableOptions) Format(buf *TrackedBuffer) { + if opts.Engine != "" { + buf.Myprintf(" engine=%s", opts.Engine) + } + + if opts.Charset != "" { + buf.Myprintf(" default charset=%s", opts.Charset) + } +} + +// WalkSubtree walks the nodes of the subtree. +func (opts TableOptions) WalkSubtree(visit Visit) error { + return nil +} + +// TableSpec describes the structure of a table from a CREATE TABLE statement +type TableSpec struct { + Columns []*ColumnDefinition + Indexes []*IndexDefinition + Options TableOptions +} + +// Format formats the node. +func (ts *TableSpec) Format(buf *TrackedBuffer) { + buf.Myprintf("(\n") + for i, col := range ts.Columns { + if i == 0 { + buf.Myprintf("\t%v", col) + } else { + buf.Myprintf(",\n\t%v", col) + } + } + for _, idx := range ts.Indexes { + buf.Myprintf(",\n\t%v", idx) + } + buf.Myprintf("\n)%v", ts.Options) +} + +// AddColumn appends the given column to the list in the spec +func (ts *TableSpec) AddColumn(cd *ColumnDefinition) { + ts.Columns = append(ts.Columns, cd) +} + +// AddIndex appends the given index to the list in the spec +func (ts *TableSpec) AddIndex(id *IndexDefinition) { + ts.Indexes = append(ts.Indexes, id) +} + +// WalkSubtree walks the nodes of the subtree. +func (ts *TableSpec) WalkSubtree(visit Visit) error { + if ts == nil { + return nil + } + + for _, n := range ts.Columns { + if err := Walk(visit, n); err != nil { + return err + } + } + + for _, n := range ts.Indexes { + if err := Walk(visit, n); err != nil { + return err + } + } + + return nil +} + +// ColumnDefinition describes a column in a CREATE TABLE statement +type ColumnDefinition struct { + Name ColIdent + Type ColumnType +} + +// Format formats the node. +func (col *ColumnDefinition) Format(buf *TrackedBuffer) { + buf.Myprintf("`%s` %v", col.Name.String(), &col.Type) +} + +// WalkSubtree walks the nodes of the subtree. +func (col *ColumnDefinition) WalkSubtree(visit Visit) error { + if col == nil { + return nil + } + return Walk( + visit, + col.Name, + &col.Type, + ) +} + +// ColumnType represents a sql type in a CREATE TABLE statement +// All optional fields are nil if not specified +type ColumnType struct { + // The base type string + Type string + + // Generic field options. + NotNull BoolVal + Autoincrement BoolVal + Default *SQLVal + Comment *SQLVal + + // Numeric field options + Length *SQLVal + Unsigned BoolVal + Zerofill BoolVal + Scale *SQLVal + + // Text field options + Charset string + Collate string + + // Enum values + EnumValues []string + + // Key specification + KeyOpt ColumnKeyOption +} + +// Format returns a canonical string representation of the type and all relevant options +func (ct *ColumnType) Format(buf *TrackedBuffer) { + buf.Myprintf("%s", ct.Type) + + if ct.Length != nil && ct.Scale != nil { + buf.Myprintf("(%v,%v)", ct.Length, ct.Scale) + + } else if ct.Length != nil { + buf.Myprintf("(%v)", ct.Length) + } + + if ct.EnumValues != nil { + buf.Myprintf("(%s)", strings.Join(ct.EnumValues, ", ")) + } + + opts := make([]string, 0, 16) + if ct.Unsigned { + opts = append(opts, keywordStrings[UNSIGNED]) + } + if ct.Zerofill { + opts = append(opts, keywordStrings[ZEROFILL]) + } + if ct.Charset != "" { + opts = append(opts, keywordStrings[CHARACTER], keywordStrings[SET], ct.Charset) + } + if ct.Collate != "" { + opts = append(opts, keywordStrings[COLLATE], ct.Collate) + } + if ct.NotNull { + opts = append(opts, keywordStrings[NOT], keywordStrings[NULL]) + } + if ct.Default != nil { + opts = append(opts, keywordStrings[DEFAULT], String(ct.Default)) + } + if ct.Autoincrement { + opts = append(opts, keywordStrings[AUTO_INCREMENT]) + } + if ct.Comment != nil { + opts = append(opts, keywordStrings[COMMENT_KEYWORD], String(ct.Comment)) + } + if ct.KeyOpt == ColKeyPrimary { + opts = append(opts, keywordStrings[PRIMARY], keywordStrings[KEY]) + } + if ct.KeyOpt == ColKeyUnique { + opts = append(opts, keywordStrings[UNIQUE]) + } + if ct.KeyOpt == ColKeyUniqueKey { + opts = append(opts, keywordStrings[UNIQUE], keywordStrings[KEY]) + } + if ct.KeyOpt == ColKey { + opts = append(opts, keywordStrings[KEY]) + } + + if len(opts) != 0 { + buf.Myprintf(" %s", strings.Join(opts, " ")) + } +} + +// WalkSubtree walks the nodes of the subtree. +func (ct *ColumnType) WalkSubtree(visit Visit) error { + return nil +} + +// IndexDefinition describes an index in a CREATE TABLE statement +type IndexDefinition struct { + Info *IndexInfo + Columns []*IndexColumn +} + +// Format formats the node. +func (idx *IndexDefinition) Format(buf *TrackedBuffer) { + buf.Myprintf("%v (", idx.Info) + for i, col := range idx.Columns { + if i != 0 { + buf.Myprintf(", `%s`", col.Column.String()) + } else { + buf.Myprintf("`%s`", col.Column.String()) + } + if col.Length != nil { + buf.Myprintf("(%v)", col.Length) + } + } + buf.Myprintf(")") +} + +// WalkSubtree walks the nodes of the subtree. +func (idx *IndexDefinition) WalkSubtree(visit Visit) error { + if idx == nil { + return nil + } + + for _, n := range idx.Columns { + if err := Walk(visit, n.Column); err != nil { + return err + } + } + + return nil +} + +// IndexInfo describes the name and type of an index in a CREATE TABLE statement +type IndexInfo struct { + Type string + Name ColIdent + Primary bool + Unique bool +} + +// Format formats the node. +func (ii *IndexInfo) Format(buf *TrackedBuffer) { + if ii.Primary { + buf.Myprintf("%s", ii.Type) + } else { + buf.Myprintf("%s `%v`", ii.Type, ii.Name) + } +} + +// WalkSubtree walks the nodes of the subtree. +func (ii *IndexInfo) WalkSubtree(visit Visit) error { + return Walk(visit, ii.Name) +} + +// IndexColumn describes a column in an index definition with optional length +type IndexColumn struct { + Column ColIdent + Length *SQLVal +} + +// LengthScaleOption is used for types that have an optional length +// and scale +type LengthScaleOption struct { + Length *SQLVal + Scale *SQLVal +} + +// ColumnKeyOption indicates whether or not the given column is defined as an +// index element and contains the type of the option +type ColumnKeyOption int + +const ( + // ColKeyNone enum. + ColKeyNone ColumnKeyOption = iota + + // ColKeyPrimary enum. + ColKeyPrimary + + // ColKeyUnique enum. + ColKeyUnique + + // ColKeyUniqueKey enum. + ColKeyUniqueKey + + // ColKey enum. + ColKey +) + +// Show represents a show statement. +type Show struct { + Type string + Table TableName + Database TableName + From string + Limit *Limit +} + +// The frollowing constants represent SHOW statements. +const ( + ShowDatabasesStr = "databases" + ShowCreateDatabaseStr = "create database" + ShowTablesStr = "tables" + ShowTablesFromStr = "tables from" + ShowCreateTableStr = "create table" + ShowEnginesStr = "engines" + ShowStatusStr = "status" + ShowVersionsStr = "versions" + ShowProcesslistStr = "processlist" + ShowQueryzStr = "queryz" + ShowTxnzStr = "txnz" + ShowWarningsStr = "warnings" + ShowVariablesStr = "variables" + ShowBinlogEventsStr = "binlog events" + ShowUnsupportedStr = "unsupported" +) + +// Format formats the node. +func (node *Show) Format(buf *TrackedBuffer) { + switch node.Type { + case ShowCreateDatabaseStr: + buf.Myprintf("show %s %v", node.Type, node.Database) + case ShowCreateTableStr: + buf.Myprintf("show %s %v", node.Type, node.Table) + case ShowTablesFromStr: + buf.Myprintf("show %s %v", node.Type, node.Database) + case ShowBinlogEventsStr: + buf.Myprintf("show %s", node.Type) + if node.From != "" { + buf.Myprintf(" from gtid '%s'", node.From) + } + buf.Myprintf("%v", node.Limit) + default: + buf.Myprintf("show %s", node.Type) + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node *Show) WalkSubtree(visit Visit) error { + return nil +} + +// Use represents a use statement. +type Use struct { + DBName TableIdent +} + +// Format formats the node. +func (node *Use) Format(buf *TrackedBuffer) { + buf.Myprintf("use %v", node.DBName) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *Use) WalkSubtree(visit Visit) error { + return Walk(visit, node.DBName) +} + +// OtherRead represents a DESCRIBE, or EXPLAIN statement. +// It should be used only as an indicator. It does not contain +// the full AST for the statement. +type OtherRead struct{} + +// Format formats the node. +func (node *OtherRead) Format(buf *TrackedBuffer) { + buf.WriteString("otherread") +} + +// WalkSubtree walks the nodes of the subtree. +func (node *OtherRead) WalkSubtree(visit Visit) error { + return nil +} + +// OtherAdmin represents a misc statement that relies on ADMIN privileges, +// such as REPAIR, OPTIMIZE, or TRUNCATE statement. +// It should be used only as an indicator. It does not contain +// the full AST for the statement. +type OtherAdmin struct{} + +// Format formats the node. +func (node *OtherAdmin) Format(buf *TrackedBuffer) { + buf.WriteString("otheradmin") +} + +// WalkSubtree walks the nodes of the subtree. +func (node *OtherAdmin) WalkSubtree(visit Visit) error { + return nil +} + +// Comments represents a list of comments. +type Comments [][]byte + +// Format formats the node. +func (node Comments) Format(buf *TrackedBuffer) { + for _, c := range node { + buf.Myprintf("%s ", c) + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node Comments) WalkSubtree(visit Visit) error { + return nil +} + +// SelectExprs represents SELECT expressions. +type SelectExprs []SelectExpr + +// Format formats the node. +func (node SelectExprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node SelectExprs) WalkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// SelectExpr represents a SELECT expression. +type SelectExpr interface { + iSelectExpr() + SQLNode +} + +func (*StarExpr) iSelectExpr() {} +func (*AliasedExpr) iSelectExpr() {} +func (Nextval) iSelectExpr() {} + +// StarExpr defines a '*' or 'table.*' expression. +type StarExpr struct { + TableName TableName +} + +// Format formats the node. +func (node *StarExpr) Format(buf *TrackedBuffer) { + if !node.TableName.IsEmpty() { + buf.Myprintf("%v.", node.TableName) + } + buf.Myprintf("*") +} + +// WalkSubtree walks the nodes of the subtree. +func (node *StarExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.TableName, + ) +} + +// AliasedExpr defines an aliased SELECT expression. +type AliasedExpr struct { + Expr Expr + As ColIdent +} + +// Format formats the node. +func (node *AliasedExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v", node.Expr) + if !node.As.IsEmpty() { + buf.Myprintf(" as %v", node.As) + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node *AliasedExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + node.As, + ) +} + +// Nextval defines the NEXT VALUE expression. +type Nextval struct { + Expr Expr +} + +// Format formats the node. +func (node Nextval) Format(buf *TrackedBuffer) { + buf.Myprintf("next %v values", node.Expr) +} + +// WalkSubtree walks the nodes of the subtree. +func (node Nextval) WalkSubtree(visit Visit) error { + return Walk(visit, node.Expr) +} + +// Columns represents an insert column list. +type Columns []ColIdent + +// Format formats the node. +func (node Columns) Format(buf *TrackedBuffer) { + if node == nil { + return + } + prefix := "(" + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } + buf.WriteString(")") +} + +// WalkSubtree walks the nodes of the subtree. +func (node Columns) WalkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// FindColumn finds a column in the column list, returning +// the index if it exists or -1 otherwise +func (node Columns) FindColumn(col ColIdent) int { + for i, colName := range node { + if colName.Equal(col) { + return i + } + } + return -1 +} + +// TableExprs represents a list of table expressions. +type TableExprs []TableExpr + +// Format formats the node. +func (node TableExprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node TableExprs) WalkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// TableExpr represents a table expression. +type TableExpr interface { + iTableExpr() + SQLNode +} + +func (*AliasedTableExpr) iTableExpr() {} +func (*ParenTableExpr) iTableExpr() {} +func (*JoinTableExpr) iTableExpr() {} + +// AliasedTableExpr represents a table expression +// coupled with an optional alias or index hint. +// If As is empty, no alias was used. +type AliasedTableExpr struct { + Expr SimpleTableExpr + As TableIdent + Hints *IndexHints +} + +// Format formats the node. +func (node *AliasedTableExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v", node.Expr) + if !node.As.IsEmpty() { + buf.Myprintf(" as %v", node.As) + } + if node.Hints != nil { + // Hint node provides the space padding. + buf.Myprintf("%v", node.Hints) + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node *AliasedTableExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + node.As, + node.Hints, + ) +} + +// SimpleTableExpr represents a simple table expression. +type SimpleTableExpr interface { + iSimpleTableExpr() + SQLNode +} + +func (TableName) iSimpleTableExpr() {} +func (*Subquery) iSimpleTableExpr() {} + +// TableName represents a table name. +// Qualifier, if specified, represents a database or keyspace. +// TableName is a value struct whose fields are case sensitive. +// This means two TableName vars can be compared for equality +// and a TableName can also be used as key in a map. +type TableName struct { + Name, Qualifier TableIdent +} + +// Format formats the node. +func (node TableName) Format(buf *TrackedBuffer) { + if node.IsEmpty() { + return + } + if !node.Qualifier.IsEmpty() { + buf.Myprintf("%v.", node.Qualifier) + } + buf.Myprintf("%v", node.Name) +} + +// WalkSubtree walks the nodes of the subtree. +func (node TableName) WalkSubtree(visit Visit) error { + return Walk( + visit, + node.Name, + node.Qualifier, + ) +} + +// IsEmpty returns true if TableName is nil or empty. +func (node TableName) IsEmpty() bool { + // If Name is empty, Qualifer is also empty. + return node.Name.IsEmpty() +} + +// ParenTableExpr represents a parenthesized list of TableExpr. +type ParenTableExpr struct { + Exprs TableExprs +} + +// Format formats the node. +func (node *ParenTableExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("(%v)", node.Exprs) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *ParenTableExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Exprs, + ) +} + +// JoinTableExpr represents a TableExpr that's a JOIN operation. +type JoinTableExpr struct { + LeftExpr TableExpr + Join string + RightExpr TableExpr + On Expr +} + +// JoinTableExpr.Join +const ( + JoinStr = "join" + StraightJoinStr = "straight_join" + LeftJoinStr = "left join" + RightJoinStr = "right join" + NaturalJoinStr = "natural join" + NaturalLeftJoinStr = "natural left join" + NaturalRightJoinStr = "natural right join" +) + +// Format formats the node. +func (node *JoinTableExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v %s %v", node.LeftExpr, node.Join, node.RightExpr) + if node.On != nil { + buf.Myprintf(" on %v", node.On) + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node *JoinTableExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.LeftExpr, + node.RightExpr, + node.On, + ) +} + +// IndexHints represents a list of index hints. +type IndexHints struct { + Type string + Indexes []ColIdent +} + +// Index hints. +const ( + UseStr = "use " + IgnoreStr = "ignore " + ForceStr = "force " +) + +// Format formats the node. +func (node *IndexHints) Format(buf *TrackedBuffer) { + buf.Myprintf(" %sindex ", node.Type) + prefix := "(" + for _, n := range node.Indexes { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } + buf.Myprintf(")") +} + +// WalkSubtree walks the nodes of the subtree. +func (node *IndexHints) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + for _, n := range node.Indexes { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// Where represents a WHERE or HAVING clause. +type Where struct { + Type string + Expr Expr +} + +// Where.Type +const ( + WhereStr = "where" + HavingStr = "having" +) + +// NewWhere creates a WHERE or HAVING clause out +// of a Expr. If the expression is nil, it returns nil. +func NewWhere(typ string, expr Expr) *Where { + if expr == nil { + return nil + } + return &Where{Type: typ, Expr: expr} +} + +// Format formats the node. +func (node *Where) Format(buf *TrackedBuffer) { + if node == nil || node.Expr == nil { + return + } + buf.Myprintf(" %s %v", node.Type, node.Expr) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *Where) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + ) +} + +// Expr represents an expression. +type Expr interface { + iExpr() + SQLNode +} + +func (*AndExpr) iExpr() {} +func (*OrExpr) iExpr() {} +func (*NotExpr) iExpr() {} +func (*ParenExpr) iExpr() {} +func (*ComparisonExpr) iExpr() {} +func (*RangeCond) iExpr() {} +func (*IsExpr) iExpr() {} +func (*ExistsExpr) iExpr() {} +func (*SQLVal) iExpr() {} +func (*NullVal) iExpr() {} +func (BoolVal) iExpr() {} +func (*ColName) iExpr() {} +func (ValTuple) iExpr() {} +func (*Subquery) iExpr() {} +func (ListArg) iExpr() {} +func (*BinaryExpr) iExpr() {} +func (*UnaryExpr) iExpr() {} +func (*IntervalExpr) iExpr() {} +func (*CollateExpr) iExpr() {} +func (*FuncExpr) iExpr() {} +func (*CaseExpr) iExpr() {} +func (*ValuesFuncExpr) iExpr() {} +func (*ConvertExpr) iExpr() {} +func (*ConvertUsingExpr) iExpr() {} +func (*MatchExpr) iExpr() {} +func (*GroupConcatExpr) iExpr() {} +func (*Default) iExpr() {} + +// Exprs represents a list of value expressions. +// It's not a valid expression because it's not parenthesized. +type Exprs []Expr + +// Format formats the node. +func (node Exprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node Exprs) WalkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// AndExpr represents an AND expression. +type AndExpr struct { + Left, Right Expr +} + +// Format formats the node. +func (node *AndExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v and %v", node.Left, node.Right) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *AndExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Left, + node.Right, + ) +} + +// OrExpr represents an OR expression. +type OrExpr struct { + Left, Right Expr +} + +// Format formats the node. +func (node *OrExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v or %v", node.Left, node.Right) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *OrExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Left, + node.Right, + ) +} + +// NotExpr represents a NOT expression. +type NotExpr struct { + Expr Expr +} + +// Format formats the node. +func (node *NotExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("not %v", node.Expr) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *NotExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + ) +} + +// ParenExpr represents a parenthesized boolean expression. +type ParenExpr struct { + Expr Expr +} + +// Format formats the node. +func (node *ParenExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("(%v)", node.Expr) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *ParenExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + ) +} + +// ComparisonExpr represents a two-value comparison expression. +type ComparisonExpr struct { + Operator string + Left, Right Expr + Escape Expr +} + +// ComparisonExpr.Operator +const ( + EqualStr = "=" + LessThanStr = "<" + GreaterThanStr = ">" + LessEqualStr = "<=" + GreaterEqualStr = ">=" + NotEqualStr = "!=" + NullSafeEqualStr = "<=>" + InStr = "in" + NotInStr = "not in" + LikeStr = "like" + NotLikeStr = "not like" + RegexpStr = "regexp" + NotRegexpStr = "not regexp" + JSONExtractOp = "->" + JSONUnquoteExtractOp = "->>" +) + +// Format formats the node. +func (node *ComparisonExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v %s %v", node.Left, node.Operator, node.Right) + if node.Escape != nil { + buf.Myprintf(" escape %v", node.Escape) + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node *ComparisonExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Left, + node.Right, + node.Escape, + ) +} + +// RangeCond represents a BETWEEN or a NOT BETWEEN expression. +type RangeCond struct { + Operator string + Left Expr + From, To Expr +} + +// RangeCond.Operator +const ( + BetweenStr = "between" + NotBetweenStr = "not between" +) + +// Format formats the node. +func (node *RangeCond) Format(buf *TrackedBuffer) { + buf.Myprintf("%v %s %v and %v", node.Left, node.Operator, node.From, node.To) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *RangeCond) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Left, + node.From, + node.To, + ) +} + +// IsExpr represents an IS ... or an IS NOT ... expression. +type IsExpr struct { + Operator string + Expr Expr +} + +// IsExpr.Operator +const ( + IsNullStr = "is null" + IsNotNullStr = "is not null" + IsTrueStr = "is true" + IsNotTrueStr = "is not true" + IsFalseStr = "is false" + IsNotFalseStr = "is not false" +) + +// Format formats the node. +func (node *IsExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v %s", node.Expr, node.Operator) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *IsExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + ) +} + +// ExistsExpr represents an EXISTS expression. +type ExistsExpr struct { + Subquery *Subquery +} + +// Format formats the node. +func (node *ExistsExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("exists %v", node.Subquery) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *ExistsExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Subquery, + ) +} + +// ValType specifies the type for SQLVal. +type ValType int + +// These are the possible Valtype values. +// HexNum represents a 0x... value. It cannot +// be treated as a simple value because it can +// be interpreted differently depending on the +// context. +const ( + StrVal = ValType(iota) + IntVal + FloatVal + HexNum + HexVal + ValArg +) + +// SQLVal represents a single value. +type SQLVal struct { + Type ValType + Val []byte +} + +// NewStrVal builds a new StrVal. +func NewStrVal(in []byte) *SQLVal { + return &SQLVal{Type: StrVal, Val: in} +} + +// NewIntVal builds a new IntVal. +func NewIntVal(in []byte) *SQLVal { + return &SQLVal{Type: IntVal, Val: in} +} + +// NewFloatVal builds a new FloatVal. +func NewFloatVal(in []byte) *SQLVal { + return &SQLVal{Type: FloatVal, Val: in} +} + +// NewHexNum builds a new HexNum. +func NewHexNum(in []byte) *SQLVal { + return &SQLVal{Type: HexNum, Val: in} +} + +// NewHexVal builds a new HexVal. +func NewHexVal(in []byte) *SQLVal { + return &SQLVal{Type: HexVal, Val: in} +} + +// NewValArg builds a new ValArg. +func NewValArg(in []byte) *SQLVal { + return &SQLVal{Type: ValArg, Val: in} +} + +// Format formats the node. +func (node *SQLVal) Format(buf *TrackedBuffer) { + switch node.Type { + case StrVal: + sqltypes.MakeTrusted(sqltypes.VarBinary, node.Val).EncodeSQL(buf) + case IntVal, FloatVal, HexNum: + buf.Myprintf("%s", []byte(node.Val)) + case HexVal: + buf.Myprintf("X'%s'", []byte(node.Val)) + case ValArg: + buf.WriteArg(string(node.Val)) + default: + panic("unexpected") + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node *SQLVal) WalkSubtree(visit Visit) error { + return nil +} + +// HexDecode decodes the hexval into bytes. +func (node *SQLVal) HexDecode() ([]byte, error) { + dst := make([]byte, hex.DecodedLen(len([]byte(node.Val)))) + _, err := hex.Decode(dst, []byte(node.Val)) + if err != nil { + return nil, err + } + return dst, err +} + +// NullVal represents a NULL value. +type NullVal struct{} + +// Format formats the node. +func (node *NullVal) Format(buf *TrackedBuffer) { + buf.Myprintf("null") +} + +// WalkSubtree walks the nodes of the subtree. +func (node *NullVal) WalkSubtree(visit Visit) error { + return nil +} + +// BoolVal is true or false. +type BoolVal bool + +// Format formats the node. +func (node BoolVal) Format(buf *TrackedBuffer) { + if node { + buf.Myprintf("true") + } else { + buf.Myprintf("false") + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node BoolVal) WalkSubtree(visit Visit) error { + return nil +} + +// ColName represents a column name. +type ColName struct { + // Metadata is not populated by the parser. + // It's a placeholder for analyzers to store + // additional data, typically info about which + // table or column this node references. + Metadata interface{} + Name ColIdent + Qualifier TableName +} + +// Format formats the node. +func (node *ColName) Format(buf *TrackedBuffer) { + if !node.Qualifier.IsEmpty() { + buf.Myprintf("%v.", node.Qualifier) + } + buf.Myprintf("%v", node.Name) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *ColName) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Name, + node.Qualifier, + ) +} + +// Equal returns true if the column names match. +func (node *ColName) Equal(c *ColName) bool { + // Failsafe: ColName should not be empty. + if node == nil || c == nil { + return false + } + return node.Name.Equal(c.Name) && node.Qualifier == c.Qualifier +} + +// ColTuple represents a list of column values. +// It can be ValTuple, Subquery, ListArg. +type ColTuple interface { + iColTuple() + Expr +} + +func (ValTuple) iColTuple() {} +func (*Subquery) iColTuple() {} +func (ListArg) iColTuple() {} + +// ValTuple represents a tuple of actual values. +type ValTuple Exprs + +// Format formats the node. +func (node ValTuple) Format(buf *TrackedBuffer) { + buf.Myprintf("(%v)", Exprs(node)) +} + +// WalkSubtree walks the nodes of the subtree. +func (node ValTuple) WalkSubtree(visit Visit) error { + return Walk(visit, Exprs(node)) +} + +// Subquery represents a subquery. +type Subquery struct { + Select SelectStatement +} + +// Format formats the node. +func (node *Subquery) Format(buf *TrackedBuffer) { + buf.Myprintf("(%v)", node.Select) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *Subquery) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Select, + ) +} + +// ListArg represents a named list argument. +type ListArg []byte + +// Format formats the node. +func (node ListArg) Format(buf *TrackedBuffer) { + buf.WriteArg(string(node)) +} + +// WalkSubtree walks the nodes of the subtree. +func (node ListArg) WalkSubtree(visit Visit) error { + return nil +} + +// BinaryExpr represents a binary value expression. +type BinaryExpr struct { + Operator string + Left, Right Expr +} + +// BinaryExpr.Operator +const ( + BitAndStr = "&" + BitOrStr = "|" + BitXorStr = "^" + PlusStr = "+" + MinusStr = "-" + MultStr = "*" + DivStr = "/" + IntDivStr = "div" + ModStr = "%" + ShiftLeftStr = "<<" + ShiftRightStr = ">>" +) + +// Format formats the node. +func (node *BinaryExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v %s %v", node.Left, node.Operator, node.Right) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *BinaryExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Left, + node.Right, + ) +} + +// UnaryExpr represents a unary value expression. +type UnaryExpr struct { + Operator string + Expr Expr +} + +// UnaryExpr.Operator +const ( + UPlusStr = "+" + UMinusStr = "-" + TildaStr = "~" + BangStr = "!" + BinaryStr = "binary " +) + +// Format formats the node. +func (node *UnaryExpr) Format(buf *TrackedBuffer) { + if _, unary := node.Expr.(*UnaryExpr); unary { + buf.Myprintf("%s %v", node.Operator, node.Expr) + return + } + buf.Myprintf("%s%v", node.Operator, node.Expr) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *UnaryExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + ) +} + +// IntervalExpr represents a date-time INTERVAL expression. +type IntervalExpr struct { + Expr Expr + Unit ColIdent +} + +// Format formats the node. +func (node *IntervalExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("interval %v %v", node.Expr, node.Unit) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *IntervalExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + node.Unit, + ) +} + +// CollateExpr represents dynamic collate operator. +type CollateExpr struct { + Expr Expr + Charset string +} + +// Format formats the node. +func (node *CollateExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v collate %s", node.Expr, node.Charset) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *CollateExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + ) +} + +// FuncExpr represents a function call. +type FuncExpr struct { + Qualifier TableIdent + Name ColIdent + Distinct bool + Exprs SelectExprs +} + +// Format formats the node. +func (node *FuncExpr) Format(buf *TrackedBuffer) { + var distinct string + if node.Distinct { + distinct = "distinct " + } + if !node.Qualifier.IsEmpty() { + buf.Myprintf("%v.", node.Qualifier) + } + // Function names should not be back-quoted even + // if they match a reserved word. So, print the + // name as is. + buf.Myprintf("%s(%s%v)", node.Name.String(), distinct, node.Exprs) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *FuncExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Qualifier, + node.Name, + node.Exprs, + ) +} + +// Aggregates is a map of all aggregate functions. +var Aggregates = map[string]bool{ + "avg": true, + "bit_and": true, + "bit_or": true, + "bit_xor": true, + "count": true, + "group_concat": true, + "max": true, + "min": true, + "std": true, + "stddev_pop": true, + "stddev_samp": true, + "stddev": true, + "sum": true, + "var_pop": true, + "var_samp": true, + "variance": true, +} + +// IsAggregate returns true if the function is an aggregate. +func (node *FuncExpr) IsAggregate() bool { + return Aggregates[node.Name.Lowered()] +} + +// GroupConcatExpr represents a call to GROUP_CONCAT +type GroupConcatExpr struct { + Distinct string + Exprs SelectExprs + OrderBy OrderBy + Separator string +} + +// Format formats the node +func (node *GroupConcatExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("group_concat(%s%v%v%s)", node.Distinct, node.Exprs, node.OrderBy, node.Separator) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *GroupConcatExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Exprs, + node.OrderBy, + ) +} + +// ValuesFuncExpr represents a function call. +type ValuesFuncExpr struct { + Name ColIdent + Resolved Expr +} + +// Format formats the node. +func (node *ValuesFuncExpr) Format(buf *TrackedBuffer) { + // Function names should not be back-quoted even + // if they match a reserved word. So, print the + // name as is. + if node.Resolved != nil { + buf.Myprintf("%v", node.Resolved) + } else { + buf.Myprintf("values(%s)", node.Name.String()) + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node *ValuesFuncExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Name, + node.Resolved, + ) +} + +// ConvertExpr represents a call to CONVERT(expr, type) +// or it's equivalent CAST(expr AS type). Both are rewritten to the former. +type ConvertExpr struct { + Expr Expr + Type *ConvertType +} + +// Format formats the node. +func (node *ConvertExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("convert(%v, %v)", node.Expr, node.Type) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *ConvertExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + node.Type, + ) +} + +// ConvertUsingExpr represents a call to CONVERT(expr USING charset). +type ConvertUsingExpr struct { + Expr Expr + Type string +} + +// Format formats the node. +func (node *ConvertUsingExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("convert(%v using %s)", node.Expr, node.Type) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *ConvertUsingExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + ) +} + +// ConvertType represents the type in call to CONVERT(expr, type) +type ConvertType struct { + Type string + Length *SQLVal + Scale *SQLVal + Operator string + Charset string +} + +// this string is "character set" and this comment is required +const ( + CharacterSetStr = " character set" +) + +// Format formats the node. +func (node *ConvertType) Format(buf *TrackedBuffer) { + buf.Myprintf("%s", node.Type) + if node.Length != nil { + buf.Myprintf("(%v", node.Length) + if node.Scale != nil { + buf.Myprintf(", %v", node.Scale) + } + buf.Myprintf(")") + } + if node.Charset != "" { + buf.Myprintf("%s %s", node.Operator, node.Charset) + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node *ConvertType) WalkSubtree(visit Visit) error { + return nil +} + +// MatchExpr represents a call to the MATCH function +type MatchExpr struct { + Columns SelectExprs + Expr Expr + Option string +} + +// MatchExpr.Option +const ( + BooleanModeStr = " in boolean mode" + NaturalLanguageModeStr = " in natural language mode" + NaturalLanguageModeWithQueryExpansionStr = " in natural language mode with query expansion" + QueryExpansionStr = " with query expansion" +) + +// Format formats the node +func (node *MatchExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("match(%v) against (%v%s)", node.Columns, node.Expr, node.Option) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *MatchExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Columns, + node.Expr, + ) +} + +// CaseExpr represents a CASE expression. +type CaseExpr struct { + Expr Expr + Whens []*When + Else Expr +} + +// Format formats the node. +func (node *CaseExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("case ") + if node.Expr != nil { + buf.Myprintf("%v ", node.Expr) + } + for _, when := range node.Whens { + buf.Myprintf("%v ", when) + } + if node.Else != nil { + buf.Myprintf("else %v ", node.Else) + } + buf.Myprintf("end") +} + +// WalkSubtree walks the nodes of the subtree. +func (node *CaseExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + if err := Walk(visit, node.Expr); err != nil { + return err + } + for _, n := range node.Whens { + if err := Walk(visit, n); err != nil { + return err + } + } + return Walk(visit, node.Else) +} + +// Default represents a DEFAULT expression. +type Default struct { + ColName string +} + +// Format formats the node. +func (node *Default) Format(buf *TrackedBuffer) { + buf.Myprintf("default") + if node.ColName != "" { + buf.Myprintf("(%s)", node.ColName) + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node *Default) WalkSubtree(visit Visit) error { + return nil +} + +// When represents a WHEN sub-expression. +type When struct { + Cond Expr + Val Expr +} + +// Format formats the node. +func (node *When) Format(buf *TrackedBuffer) { + buf.Myprintf("when %v then %v", node.Cond, node.Val) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *When) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Cond, + node.Val, + ) +} + +// GroupBy represents a GROUP BY clause. +type GroupBy []Expr + +// Format formats the node. +func (node GroupBy) Format(buf *TrackedBuffer) { + prefix := " group by " + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node GroupBy) WalkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// OrderBy represents an ORDER By clause. +type OrderBy []*Order + +// Format formats the node. +func (node OrderBy) Format(buf *TrackedBuffer) { + prefix := " order by " + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node OrderBy) WalkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// Order represents an ordering expression. +type Order struct { + Expr Expr + Direction string +} + +// Order.Direction +const ( + AscScr = "asc" + DescScr = "desc" +) + +// Format formats the node. +func (node *Order) Format(buf *TrackedBuffer) { + if node, ok := node.Expr.(*NullVal); ok { + buf.Myprintf("%v", node) + return + } + buf.Myprintf("%v %s", node.Expr, node.Direction) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *Order) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + ) +} + +// Limit represents a LIMIT clause. +type Limit struct { + Offset, Rowcount Expr +} + +// Format formats the node. +func (node *Limit) Format(buf *TrackedBuffer) { + if node == nil { + return + } + buf.Myprintf(" limit ") + if node.Offset != nil { + buf.Myprintf("%v, ", node.Offset) + } + buf.Myprintf("%v", node.Rowcount) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *Limit) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Offset, + node.Rowcount, + ) +} + +// Values represents a VALUES clause. +type Values []ValTuple + +// Format formats the node. +func (node Values) Format(buf *TrackedBuffer) { + prefix := "values " + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node Values) WalkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// UpdateExprs represents a list of update expressions. +type UpdateExprs []*UpdateExpr + +// Format formats the node. +func (node UpdateExprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node UpdateExprs) WalkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// UpdateExpr represents an update expression. +type UpdateExpr struct { + Name *ColName + Expr Expr +} + +// Format formats the node. +func (node *UpdateExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v = %v", node.Name, node.Expr) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *UpdateExpr) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Name, + node.Expr, + ) +} + +// OnDup represents an ON DUPLICATE KEY clause. +type OnDup UpdateExprs + +// Format formats the node. +func (node OnDup) Format(buf *TrackedBuffer) { + if node == nil { + return + } + buf.Myprintf(" on duplicate key update %v", UpdateExprs(node)) +} + +// WalkSubtree walks the nodes of the subtree. +func (node OnDup) WalkSubtree(visit Visit) error { + return Walk(visit, UpdateExprs(node)) +} + +// ColIdent is a case insensitive SQL identifier. It will be escaped with +// backquotes if necessary. +type ColIdent struct { + // This artifact prevents this struct from being compared + // with itself. It consumes no space as long as it's not the + // last field in the struct. + _ [0]struct{ _ []byte } + val, lowered string +} + +// NewColIdent makes a new ColIdent. +func NewColIdent(str string) ColIdent { + return ColIdent{ + val: str, + } +} + +// Format formats the node. +func (node ColIdent) Format(buf *TrackedBuffer) { + formatID(buf, node.val, node.Lowered()) +} + +// WalkSubtree walks the nodes of the subtree. +func (node ColIdent) WalkSubtree(visit Visit) error { + return nil +} + +// IsEmpty returns true if the name is empty. +func (node ColIdent) IsEmpty() bool { + return node.val == "" +} + +// String returns the unescaped column name. It must +// not be used for SQL generation. Use sqlparser.String +// instead. The Stringer conformance is for usage +// in templates. +func (node ColIdent) String() string { + return node.val +} + +// CompliantName returns a compliant id name +// that can be used for a bind var. +func (node ColIdent) CompliantName() string { + return compliantName(node.val) +} + +// Lowered returns a lower-cased column name. +// This function should generally be used only for optimizing +// comparisons. +func (node ColIdent) Lowered() string { + if node.val == "" { + return "" + } + if node.lowered == "" { + node.lowered = strings.ToLower(node.val) + } + return node.lowered +} + +// Equal performs a case-insensitive compare. +func (node ColIdent) Equal(in ColIdent) bool { + return node.Lowered() == in.Lowered() +} + +// EqualString performs a case-insensitive compare with str. +func (node ColIdent) EqualString(str string) bool { + return node.Lowered() == strings.ToLower(str) +} + +// MarshalJSON marshals into JSON. +func (node ColIdent) MarshalJSON() ([]byte, error) { + return json.Marshal(node.val) +} + +// UnmarshalJSON unmarshals from JSON. +func (node *ColIdent) UnmarshalJSON(b []byte) error { + var result string + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + node.val = result + return nil +} + +// TableIdent is a case sensitive SQL identifier. It will be escaped with +// backquotes if necessary. +type TableIdent struct { + v string +} + +// NewTableIdent creates a new TableIdent. +func NewTableIdent(str string) TableIdent { + return TableIdent{v: str} +} + +// Format formats the node. +func (node TableIdent) Format(buf *TrackedBuffer) { + formatID(buf, node.v, strings.ToLower(node.v)) +} + +// WalkSubtree walks the nodes of the subtree. +func (node TableIdent) WalkSubtree(visit Visit) error { + return nil +} + +// IsEmpty returns true if TabIdent is empty. +func (node TableIdent) IsEmpty() bool { + return node.v == "" +} + +// String returns the unescaped table name. It must +// not be used for SQL generation. Use sqlparser.String +// instead. The Stringer conformance is for usage +// in templates. +func (node TableIdent) String() string { + return node.v +} + +// CompliantName returns a compliant id name +// that can be used for a bind var. +func (node TableIdent) CompliantName() string { + return compliantName(node.v) +} + +// MarshalJSON marshals into JSON. +func (node TableIdent) MarshalJSON() ([]byte, error) { + return json.Marshal(node.v) +} + +// UnmarshalJSON unmarshals from JSON. +func (node *TableIdent) UnmarshalJSON(b []byte) error { + var result string + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + node.v = result + return nil +} + +// Backtick produces a backticked literal given an input string. +func Backtick(in string) string { + var buf bytes.Buffer + buf.WriteByte('`') + for _, c := range in { + buf.WriteRune(c) + if c == '`' { + buf.WriteByte('`') + } + } + buf.WriteByte('`') + return buf.String() +} + +func formatID(buf *TrackedBuffer, original, lowered string) { + for i, c := range original { + if !isLetter(uint16(c)) { + if i == 0 || !isDigit(uint16(c)) { + goto mustEscape + } + } + } + if _, ok := keywords[lowered]; ok { + goto mustEscape + } + buf.Myprintf("%s", original) + return + +mustEscape: + buf.WriteByte('`') + for _, c := range original { + buf.WriteRune(c) + if c == '`' { + buf.WriteByte('`') + } + } + buf.WriteByte('`') +} + +func compliantName(in string) string { + var buf bytes.Buffer + for i, c := range in { + if !isLetter(uint16(c)) { + if i == 0 || !isDigit(uint16(c)) { + buf.WriteByte('_') + continue + } + } + buf.WriteRune(c) + } + return buf.String() +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/ast_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/ast_test.go new file mode 100644 index 00000000..a7838c62 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/ast_test.go @@ -0,0 +1,399 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" + "unsafe" +) + +func TestAppend(t *testing.T) { + query := "select * from t where a = 1" + tree, err := Parse(query) + if err != nil { + t.Error(err) + } + var b bytes.Buffer + Append(&b, tree) + got := b.String() + want := query + if got != want { + t.Errorf("Append: %s, want %s", got, want) + } + Append(&b, tree) + got = b.String() + want = query + query + if got != want { + t.Errorf("Append: %s, want %s", got, want) + } +} + +func TestSelect(t *testing.T) { + tree, err := Parse("select * from t where a = 1") + if err != nil { + t.Error(err) + } + expr := tree.(*Select).Where.Expr + + sel := &Select{} + sel.AddWhere(expr) + buf := NewTrackedBuffer(nil) + sel.Where.Format(buf) + want := " where a = 1" + if buf.String() != want { + t.Errorf("where: %q, want %s", buf.String(), want) + } + sel.AddWhere(expr) + buf = NewTrackedBuffer(nil) + sel.Where.Format(buf) + want = " where a = 1 and a = 1" + if buf.String() != want { + t.Errorf("where: %q, want %s", buf.String(), want) + } + sel = &Select{} + sel.AddHaving(expr) + buf = NewTrackedBuffer(nil) + sel.Having.Format(buf) + want = " having a = 1" + if buf.String() != want { + t.Errorf("having: %q, want %s", buf.String(), want) + } + sel.AddHaving(expr) + buf = NewTrackedBuffer(nil) + sel.Having.Format(buf) + want = " having a = 1 and a = 1" + if buf.String() != want { + t.Errorf("having: %q, want %s", buf.String(), want) + } + + // OR clauses must be parenthesized. + tree, err = Parse("select * from t where a = 1 or b = 1") + if err != nil { + t.Error(err) + } + expr = tree.(*Select).Where.Expr + sel = &Select{} + sel.AddWhere(expr) + buf = NewTrackedBuffer(nil) + sel.Where.Format(buf) + want = " where (a = 1 or b = 1)" + if buf.String() != want { + t.Errorf("where: %q, want %s", buf.String(), want) + } + sel = &Select{} + sel.AddHaving(expr) + buf = NewTrackedBuffer(nil) + sel.Having.Format(buf) + want = " having (a = 1 or b = 1)" + if buf.String() != want { + t.Errorf("having: %q, want %s", buf.String(), want) + } +} + +func TestAddOrder(t *testing.T) { + src, err := Parse("select foo, bar from baz order by foo") + if err != nil { + t.Error(err) + } + order := src.(*Select).OrderBy[0] + dst, err := Parse("select * from t") + if err != nil { + t.Error(err) + } + dst.(*Select).AddOrder(order) + buf := NewTrackedBuffer(nil) + dst.Format(buf) + want := "select * from t order by foo asc" + if buf.String() != want { + t.Errorf("order: %q, want %s", buf.String(), want) + } + dst, err = Parse("select * from t union select * from s") + if err != nil { + t.Error(err) + } + dst.(*Union).AddOrder(order) + buf = NewTrackedBuffer(nil) + dst.Format(buf) + want = "select * from t union select * from s order by foo asc" + if buf.String() != want { + t.Errorf("order: %q, want %s", buf.String(), want) + } +} + +func TestSetLimit(t *testing.T) { + src, err := Parse("select foo, bar from baz limit 4") + if err != nil { + t.Error(err) + } + limit := src.(*Select).Limit + dst, err := Parse("select * from t") + if err != nil { + t.Error(err) + } + dst.(*Select).SetLimit(limit) + buf := NewTrackedBuffer(nil) + dst.Format(buf) + want := "select * from t limit 4" + if buf.String() != want { + t.Errorf("limit: %q, want %s", buf.String(), want) + } + dst, err = Parse("select * from t union select * from s") + if err != nil { + t.Error(err) + } + dst.(*Union).SetLimit(limit) + buf = NewTrackedBuffer(nil) + dst.Format(buf) + want = "select * from t union select * from s limit 4" + if buf.String() != want { + t.Errorf("order: %q, want %s", buf.String(), want) + } +} + +func TestWhere(t *testing.T) { + var w *Where + buf := NewTrackedBuffer(nil) + w.Format(buf) + if buf.String() != "" { + t.Errorf("w.Format(nil): %q, want \"\"", buf.String()) + } + w = NewWhere(WhereStr, nil) + buf = NewTrackedBuffer(nil) + w.Format(buf) + if buf.String() != "" { + t.Errorf("w.Format(&Where{nil}: %q, want \"\"", buf.String()) + } +} + +func TestIsAggregate(t *testing.T) { + f := FuncExpr{Name: NewColIdent("avg")} + if !f.IsAggregate() { + t.Error("IsAggregate: false, want true") + } + + f = FuncExpr{Name: NewColIdent("Avg")} + if !f.IsAggregate() { + t.Error("IsAggregate: false, want true") + } + + f = FuncExpr{Name: NewColIdent("foo")} + if f.IsAggregate() { + t.Error("IsAggregate: true, want false") + } +} + +func TestColNameEqual(t *testing.T) { + var c1, c2 *ColName + if c1.Equal(c2) { + t.Error("nil columns equal, want unequal") + } + c1 = &ColName{ + Name: NewColIdent("aa"), + } + c2 = &ColName{ + Name: NewColIdent("bb"), + } + if c1.Equal(c2) { + t.Error("columns equal, want unequal") + } + c2.Name = NewColIdent("aa") + if !c1.Equal(c2) { + t.Error("columns unequal, want equal") + } +} + +func TestColIdent(t *testing.T) { + str := NewColIdent("Ab") + if str.String() != "Ab" { + t.Errorf("String=%s, want Ab", str.String()) + } + if str.String() != "Ab" { + t.Errorf("Val=%s, want Ab", str.String()) + } + if str.Lowered() != "ab" { + t.Errorf("Val=%s, want ab", str.Lowered()) + } + if !str.Equal(NewColIdent("aB")) { + t.Error("str.Equal(NewColIdent(aB))=false, want true") + } + if !str.EqualString("ab") { + t.Error("str.EqualString(ab)=false, want true") + } + str = NewColIdent("") + if str.Lowered() != "" { + t.Errorf("Val=%s, want \"\"", str.Lowered()) + } +} + +func TestColIdentMarshal(t *testing.T) { + str := NewColIdent("Ab") + b, err := json.Marshal(str) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `"Ab"` + if got != want { + t.Errorf("json.Marshal()= %s, want %s", got, want) + } + var out ColIdent + err = json.Unmarshal(b, &out) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(out, str) { + t.Errorf("Unmarshal: %v, want %v", out, str) + } +} + +func TestColIdentSize(t *testing.T) { + size := unsafe.Sizeof(NewColIdent("")) + want := 2 * unsafe.Sizeof("") + if size != want { + t.Errorf("Size of ColIdent: %d, want 32", want) + } +} + +func TestTableIdentMarshal(t *testing.T) { + str := NewTableIdent("Ab") + b, err := json.Marshal(str) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `"Ab"` + if got != want { + t.Errorf("json.Marshal()= %s, want %s", got, want) + } + var out TableIdent + err = json.Unmarshal(b, &out) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(out, str) { + t.Errorf("Unmarshal: %v, want %v", out, str) + } +} + +func TestHexDecode(t *testing.T) { + testcase := []struct { + in, out string + }{{ + in: "313233", + out: "123", + }, { + in: "ag", + out: "encoding/hex: invalid byte: U+0067 'g'", + }, { + in: "777", + out: "encoding/hex: odd length hex string", + }} + for _, tc := range testcase { + out, err := newHexVal(tc.in).HexDecode() + if err != nil { + if err.Error() != tc.out { + t.Errorf("Decode(%q): %v, want %s", tc.in, err, tc.out) + } + continue + } + if !bytes.Equal(out, []byte(tc.out)) { + t.Errorf("Decode(%q): %s, want %s", tc.in, out, tc.out) + } + } +} + +func TestCompliantName(t *testing.T) { + testcases := []struct { + in, out string + }{{ + in: "aa", + out: "aa", + }, { + in: "1a", + out: "_a", + }, { + in: "a1", + out: "a1", + }, { + in: "a.b", + out: "a_b", + }, { + in: ".ab", + out: "_ab", + }} + for _, tc := range testcases { + out := NewColIdent(tc.in).CompliantName() + if out != tc.out { + t.Errorf("ColIdent(%s).CompliantNamt: %s, want %s", tc.in, out, tc.out) + } + out = NewTableIdent(tc.in).CompliantName() + if out != tc.out { + t.Errorf("TableIdent(%s).CompliantNamt: %s, want %s", tc.in, out, tc.out) + } + } +} + +func TestEscape(t *testing.T) { + testcases := []struct { + in, out string + }{{ + in: "aa", + out: "`aa`", + }, { + in: "a`a", + out: "`a``a`", + }} + for _, tc := range testcases { + out := Backtick(tc.in) + if out != tc.out { + t.Errorf("Escape(%s): %s, want %s", tc.in, out, tc.out) + } + } +} + +func TestColumns_FindColumn(t *testing.T) { + cols := Columns{NewColIdent("a"), NewColIdent("c"), NewColIdent("b"), NewColIdent("0")} + + testcases := []struct { + in string + out int + }{{ + in: "a", + out: 0, + }, { + in: "b", + out: 2, + }, + { + in: "0", + out: 3, + }, + { + in: "f", + out: -1, + }} + + for _, tc := range testcases { + val := cols.FindColumn(NewColIdent(tc.in)) + if val != tc.out { + t.Errorf("FindColumn(%s): %d, want %d", tc.in, val, tc.out) + } + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/comments.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/comments.go new file mode 100644 index 00000000..ba9cccde --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/comments.go @@ -0,0 +1,140 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "strings" + "unicode" +) + +type matchtracker struct { + query string + index int + eof bool +} + +// SplitTrailingComments splits the query trailing comments from the query. +func SplitTrailingComments(sql string) (query, comments string) { + tracker := matchtracker{ + query: sql, + index: len(sql), + } + pos := tracker.matchComments() + if pos >= 0 { + return tracker.query[:pos], tracker.query[pos:] + } + return sql, "" +} + +// matchComments matches trailing comments. If no comment was found, +// it returns -1. Otherwise, it returns the position where the query ends +// before the trailing comments begin. +func (tracker *matchtracker) matchComments() (pos int) { + pos = -1 + for { + // Verify end of comment + if !tracker.match('/') { + return pos + } + if !tracker.match('*') { + return pos + } + + // find start of comment + for { + if !tracker.match('*') { + if tracker.eof { + return pos + } + continue + } + // Skip subsequent '*' + for tracker.match('*') { + } + if tracker.eof { + return pos + } + // See if the last mismatch was a '/' + if tracker.query[tracker.index] == '/' { + break + } + } + tracker.skipBlanks() + pos = tracker.index + } +} + +// match advances to the 'previous' character and returns +// true if it's a match. If it cannot advance any more, +// it returns false and sets the eof flag. tracker.index +// points to the latest position. +func (tracker *matchtracker) match(required byte) bool { + if tracker.index == 0 { + tracker.eof = true + return false + } + tracker.index-- + ret := (tracker.query[tracker.index] == required) + return ret +} + +// skipBlanks advances till a non-blank character +// or the beginning of stream is reached. It does +// not set the eof flag. tracker.index points to +// the latest position. +func (tracker *matchtracker) skipBlanks() { + var ch byte + for ; tracker.index != 0; tracker.index-- { + ch = tracker.query[tracker.index-1] + if ch == ' ' || ch == '\n' || ch == '\r' || ch == '\t' { + continue + } + break + } +} + +// StripLeadingComments trims the SQL string and removes any leading comments +func StripLeadingComments(sql string) string { + sql = strings.TrimFunc(sql, unicode.IsSpace) + + for hasCommentPrefix(sql) { + switch sql[0] { + case '/': + // Multi line comment + index := strings.Index(sql, "*/") + if index <= 1 { + return sql + } + sql = sql[index+2:] + case '-': + // Single line comment + index := strings.Index(sql, "\n") + if index == -1 { + return sql + } + sql = sql[index+1:] + } + + sql = strings.TrimFunc(sql, unicode.IsSpace) + } + + return sql +} + +func hasCommentPrefix(sql string) bool { + return len(sql) > 1 && ((sql[0] == '/' && sql[1] == '*') || (sql[0] == '-' && sql[1] == '-')) +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/comments_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/comments_test.go new file mode 100644 index 00000000..5835e69a --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/comments_test.go @@ -0,0 +1,169 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import "testing" + +func TestSplitTrailingComments(t *testing.T) { + var testCases = []struct { + input, outSQL, outComments string + }{{ + input: "/", + outSQL: "/", + outComments: "", + }, { + input: "*/", + outSQL: "*/", + outComments: "", + }, { + input: "/*/", + outSQL: "/*/", + outComments: "", + }, { + input: "a*/", + outSQL: "a*/", + outComments: "", + }, { + input: "*a*/", + outSQL: "*a*/", + outComments: "", + }, { + input: "**a*/", + outSQL: "**a*/", + outComments: "", + }, { + input: "/*b**a*/", + outSQL: "", + outComments: "/*b**a*/", + }, { + input: "/*a*/", + outSQL: "", + outComments: "/*a*/", + }, { + input: "/**/", + outSQL: "", + outComments: "/**/", + }, { + input: "/*b*/ /*a*/", + outSQL: "", + outComments: "/*b*/ /*a*/", + }, { + input: "foo /* bar */", + outSQL: "foo", + outComments: " /* bar */", + }, { + input: "foo /** bar */", + outSQL: "foo", + outComments: " /** bar */", + }, { + input: "foo /*** bar */", + outSQL: "foo", + outComments: " /*** bar */", + }, { + input: "foo /** bar **/", + outSQL: "foo", + outComments: " /** bar **/", + }, { + input: "foo /*** bar ***/", + outSQL: "foo", + outComments: " /*** bar ***/", + }, { + input: "*** bar ***/", + outSQL: "*** bar ***/", + outComments: "", + }} + for _, testCase := range testCases { + gotSQL, gotComments := SplitTrailingComments(testCase.input) + + if gotSQL != testCase.outSQL { + t.Errorf("test input: '%s', got SQL\n%+v, want\n%+v", testCase.input, gotSQL, testCase.outSQL) + } + if gotComments != testCase.outComments { + t.Errorf("test input: '%s', got Comments\n%+v, want\n%+v", testCase.input, gotComments, testCase.outComments) + } + } +} + +func TestStripLeadingComments(t *testing.T) { + var testCases = []struct { + input, outSQL string + }{{ + input: "/", + outSQL: "/", + }, { + input: "*/", + outSQL: "*/", + }, { + input: "/*/", + outSQL: "/*/", + }, { + input: "/*a", + outSQL: "/*a", + }, { + input: "/*a*", + outSQL: "/*a*", + }, { + input: "/*a**", + outSQL: "/*a**", + }, { + input: "/*b**a*/", + outSQL: "", + }, { + input: "/*a*/", + outSQL: "", + }, { + input: "/**/", + outSQL: "", + }, { + input: "/*b*/ /*a*/", + outSQL: "", + }, { + input: `/*b*/ --foo +bar`, + outSQL: "bar", + }, { + input: "foo /* bar */", + outSQL: "foo /* bar */", + }, { + input: "/* foo */ bar", + outSQL: "bar", + }, { + input: "-- /* foo */ bar", + outSQL: "-- /* foo */ bar", + }, { + input: "foo -- bar */", + outSQL: "foo -- bar */", + }, { + input: `/* +foo */ bar`, + outSQL: "bar", + }, { + input: `-- foo bar +a`, + outSQL: "a", + }, { + input: `-- foo bar`, + outSQL: "-- foo bar", + }} + for _, testCase := range testCases { + gotSQL := StripLeadingComments(testCase.input) + + if gotSQL != testCase.outSQL { + t.Errorf("test input: '%s', got SQL\n%+v, want\n%+v", testCase.input, gotSQL, testCase.outSQL) + } + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/ddl_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/ddl_test.go new file mode 100644 index 00000000..5ef54891 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/ddl_test.go @@ -0,0 +1,262 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import "strings" +import "testing" + +func TestDDL1(t *testing.T) { + validSQL := []struct { + input string + output string + }{ + // Table. + { + input: "create table t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ") partition by hash(id)", + output: "create table t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ")", + }, + + { + input: "create table t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ") partition by hash(id)", + output: "create table t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ")", + }, + + { + input: "create table t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ") partition by hash(id) partitions 6", + output: "create table t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ")", + }, + + { + input: "create table test.t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ")", + output: "create table test.t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ")", + }, + + { + input: "create table test.t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ") default charset=utf8", + output: "create table test.t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ") default charset=utf8", + }, + + { + input: "create table test.t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ") engine=tokudb", + output: "create table test.t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ") engine=tokudb", + }, + + { + input: "create table test.t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ") engine=tokudb default charset=utf8", + output: "create table test.t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ") engine=tokudb default charset=utf8", + }, + + { + input: "create table test.t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ") ENGINE=InnoDB AUTO_INCREMENT=34 DEFAULT CHARSET=utf8mb4 PARTITION BY HASH(id)", + output: "create table test.t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ") engine=InnoDB default charset=utf8mb4", + }, + + { + input: "create table test.t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ") engine=tokudb default charset=utf8 partition by hash(id)", + output: "create table test.t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ") engine=tokudb default charset=utf8", + }, + // Index. + { + input: "create table test.t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10),\n" + + " KEY `IDX_USER` (`user_id`)\n" + + ") engine=tokudb default charset=utf8 partition by hash(id)", + output: "create table test.t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10),\n" + + " key `IDX_USER` (`user_id`)\n" + + ") engine=tokudb default charset=utf8", + }, + + { + input: "create table if not exists t1 (a int)", + output: "create table if not exists t1 (\n\t`a` int\n)", + }, + + { + input: "truncate table t1", + output: "truncate table t1", + }, + + { + input: "drop table t1", + output: "drop table t1", + }, + + { + input: "drop table if exists t1", + output: "drop table if exists t1", + }, + + // Database. + { + input: "drop database test", + output: "drop database test", + }, + + { + input: "create database test", + output: "create database test", + }, + + { + input: "drop database if exists test", + output: "drop database if exists test", + }, + { + input: "create database if not exists test", + output: "create database if not exists test", + }, + + // Alter engine. + { + input: "alter table test engine=tokudb", + output: "alter table test engine = tokudb", + }, + { + input: "alter table test.t1 engine=tokudb", + output: "alter table test.t1 engine = tokudb", + }, + + // Alter charset. + { + input: "alter table test.t1 convert to character set utf8", + output: "alter table test.t1 convert to character set utf8", + }, + + // Index. + { + input: "create index idx on test(a,b)", + output: "create index idx on test", + }, + { + input: "drop index idx on test", + output: "drop index idx on test", + }, + + // Add column. + { + input: "alter table test add column(id int primary key)", + output: "alter table test add column (\n" + + " `id` int primary key\n" + + ")", + }, + { + input: "alter table test add column(id int primary key, name varchar(100))", + output: "alter table test add column (\n" + + " `id` int primary key,\n" + + " `name` varchar(100)\n" + + ")", + }, + + // Modify column. + { + input: "alter table test modify column name varchar(200)", + output: "alter table test modify column `name` varchar(200)", + }, + { + input: "alter table test modify column name varchar(200) not null", + output: "alter table test modify column `name` varchar(200) not null", + }, + + // Drop column. + { + input: "alter table test drop column name", + output: "alter table test drop column `name`", + }, + } + + for _, ddl := range validSQL { + sql := strings.TrimSpace(ddl.input) + tree, err := Parse(sql) + if err != nil { + t.Errorf("input: %s, err: %v", sql, err) + continue + } + + // Walk. + Walk(func(node SQLNode) (bool, error) { + return true, nil + }, tree) + + // Walk. + node := tree.(*DDL) + Walk(func(node SQLNode) (bool, error) { + return true, nil + }, node.TableSpec) + + got := String(tree.(*DDL)) + if ddl.output != got { + t.Errorf("want:\n%s\ngot:\n%s", ddl.output, got) + } + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/bytes2/buffer.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/bytes2/buffer.go new file mode 100644 index 00000000..72f8fc6e --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/bytes2/buffer.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bytes2 + +// Buffer implements a subset of the write portion of +// bytes.Buffer, but more efficiently. This is meant to +// be used in very high QPS operations, especially for +// WriteByte, and without abstracting it as a Writer. +// Function signatures contain errors for compatibility, +// but they do not return errors. +type Buffer struct { + bytes []byte +} + +// NewBuffer is equivalent to bytes.NewBuffer. +func NewBuffer(b []byte) *Buffer { + return &Buffer{bytes: b} +} + +// Write is equivalent to bytes.Buffer.Write. +func (buf *Buffer) Write(b []byte) (int, error) { + buf.bytes = append(buf.bytes, b...) + return len(b), nil +} + +// WriteString is equivalent to bytes.Buffer.WriteString. +func (buf *Buffer) WriteString(s string) (int, error) { + buf.bytes = append(buf.bytes, s...) + return len(s), nil +} + +// WriteByte is equivalent to bytes.Buffer.WriteByte. +func (buf *Buffer) WriteByte(b byte) error { + buf.bytes = append(buf.bytes, b) + return nil +} + +// Bytes is equivalent to bytes.Buffer.Bytes. +func (buf *Buffer) Bytes() []byte { + return buf.bytes +} + +// Strings is equivalent to bytes.Buffer.Strings. +func (buf *Buffer) String() string { + return string(buf.bytes) +} + +// Len is equivalent to bytes.Buffer.Len. +func (buf *Buffer) Len() int { + return len(buf.bytes) +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/bytes2/buffer_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/bytes2/buffer_test.go new file mode 100644 index 00000000..6f3b102f --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/bytes2/buffer_test.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bytes2 + +import "testing" + +func TestBuffer(t *testing.T) { + b := NewBuffer(nil) + b.Write([]byte("ab")) + b.WriteString("cd") + b.WriteByte('e') + want := "abcde" + if got := string(b.Bytes()); got != want { + t.Errorf("b.Bytes(): %s, want %s", got, want) + } + if got := b.String(); got != want { + t.Errorf("b.String(): %s, want %s", got, want) + } + if got := b.Len(); got != 5 { + t.Errorf("b.Len(): %d, want 5", got) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/cistring/cistring.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/cistring/cistring.go new file mode 100644 index 00000000..83aefc9a --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/cistring/cistring.go @@ -0,0 +1,92 @@ +// Copyright 2016, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cistring implements a case-insensitive string type. +package cistring + +import ( + "encoding/json" + "strings" +) + +// CIString is an immutable case-insensitive string. +// It precomputes and stores the lower case version of the string +// internally. This increases the initial memory cost of the object +// but saves the CPU (and memory) cost of lowercasing as needed. +// This should generally trade off favorably because there are many +// situations where comparisons are performed in a loop against +// the same object. +type CIString struct { + // This artifact prevents this struct from being compared + // with itself. It consumes no space as long as it's not the + // last field in the struct. + _ [0]struct{} + val, lowered string +} + +// New creates a new CIString. +func New(str string) CIString { + return CIString{ + val: str, + lowered: strings.ToLower(str), + } +} + +func (s CIString) String() string { + return s.val +} + +// Original returns the case-preserved value of the string. +func (s CIString) Original() string { + return s.val +} + +// Lowered returns the lower-case value of the string. +// This function should generally be used only for optimizing +// comparisons. +func (s CIString) Lowered() string { + return s.lowered +} + +// Equal performs a case-insensitive compare. For comparing +// in a loop, it's beneficial to build a CIString outside +// the loop and using it to compare with other CIString +// variables inside the loop. +func (s CIString) Equal(in CIString) bool { + return s.lowered == in.lowered +} + +// EqualString performs a case-insensitive compare with str. +// If the input is already lower-cased, it's more efficient +// to check if s.Lowered()==in. +func (s CIString) EqualString(in string) bool { + return s.lowered == strings.ToLower(in) +} + +// MarshalJSON marshals into JSON. +func (s CIString) MarshalJSON() ([]byte, error) { + return json.Marshal(s.val) +} + +// UnmarshalJSON unmarshals from JSON. +func (s *CIString) UnmarshalJSON(b []byte) error { + var result string + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + s.val = result + s.lowered = strings.ToLower(result) + return nil +} + +// ToStrings converts a []CIString to a case-preserved +// []string. +func ToStrings(in []CIString) []string { + s := make([]string, len(in)) + for i := 0; i < len(in); i++ { + s[i] = in[i].Original() + } + return s +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/cistring/cistring_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/cistring/cistring_test.go new file mode 100644 index 00000000..54af56ca --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/cistring/cistring_test.go @@ -0,0 +1,73 @@ +// Copyright 2016, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cistring + +import ( + "encoding/json" + "reflect" + "testing" + "unsafe" +) + +func TestCIString(t *testing.T) { + str := New("Ab") + if str.String() != "Ab" { + t.Errorf("String=%s, want Ab", str.Original()) + } + if str.Original() != "Ab" { + t.Errorf("Val=%s, want Ab", str.Original()) + } + if str.Lowered() != "ab" { + t.Errorf("Val=%s, want ab", str.Lowered()) + } + str2 := New("aB") + if !str.Equal(str2) { + t.Error("str.Equal(New(aB))=false, want true") + } + if !str.EqualString("ab") { + t.Error("str.Equal(ab)=false, want true") + } +} + +func TestCIStringMarshal(t *testing.T) { + str := New("Ab") + b, err := json.Marshal(str) + if err != nil { + t.Fatal(err) + } + got := string(b) + want := `"Ab"` + if got != want { + t.Errorf("json.Marshal()= %s, want %s", got, want) + } + var out CIString + err = json.Unmarshal(b, &out) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(out, str) { + t.Errorf("Unmarshal: %v, want %v", out, str) + } +} + +func TestToStrings(t *testing.T) { + in := []CIString{ + New("Ab"), + New("aB"), + } + want := []string{"Ab", "aB"} + got := ToStrings(in) + if !reflect.DeepEqual(got, want) { + t.Errorf("ToStrings(in)=%+v, want %+v", got, want) + } +} + +func TestSize(t *testing.T) { + size := unsafe.Sizeof(New("")) + want := 2 * unsafe.Sizeof("") + if size != want { + t.Errorf("Size of CIString: %d, want 32", want) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/hack/hack.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/hack/hack.go new file mode 100644 index 00000000..9e452baf --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/hack/hack.go @@ -0,0 +1,67 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hack gives you some efficient functionality at the cost of +// breaking some Go rules. +package hack + +import ( + "reflect" + "unsafe" +) + +// StringArena lets you consolidate allocations for a group of strings +// that have similar life length +type StringArena struct { + buf []byte + str string +} + +// NewStringArena creates an arena of the specified size. +func NewStringArena(size int) *StringArena { + sa := &StringArena{buf: make([]byte, 0, size)} + pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&sa.buf)) + pstring := (*reflect.StringHeader)(unsafe.Pointer(&sa.str)) + pstring.Data = pbytes.Data + pstring.Len = pbytes.Cap + return sa +} + +// NewString copies a byte slice into the arena and returns it as a string. +// If the arena is full, it returns a traditional go string. +func (sa *StringArena) NewString(b []byte) string { + if len(b) == 0 { + return "" + } + if len(sa.buf)+len(b) > cap(sa.buf) { + return string(b) + } + start := len(sa.buf) + sa.buf = append(sa.buf, b...) + return sa.str[start : start+len(b)] +} + +// SpaceLeft returns the amount of space left in the arena. +func (sa *StringArena) SpaceLeft() int { + return cap(sa.buf) - len(sa.buf) +} + +// String force casts a []byte to a string. +// USE AT YOUR OWN RISK +func String(b []byte) (s string) { + if len(b) == 0 { + return "" + } + pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + pstring := (*reflect.StringHeader)(unsafe.Pointer(&s)) + pstring.Data = pbytes.Data + pstring.Len = pbytes.Len + return +} + +// StringPointer returns &s[0], which is not allowed in go +func StringPointer(s string) unsafe.Pointer { + pstring := (*reflect.StringHeader)(unsafe.Pointer(&s)) + return unsafe.Pointer(pstring.Data) +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/hack/hack_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/hack/hack_test.go new file mode 100644 index 00000000..07fad452 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/hack/hack_test.go @@ -0,0 +1,79 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hack + +import "testing" + +func TestStringArena(t *testing.T) { + sarena := NewStringArena(10) + + s0 := sarena.NewString(nil) + checkint(t, len(sarena.buf), 0) + checkint(t, sarena.SpaceLeft(), 10) + checkstring(t, s0, "") + + s1 := sarena.NewString([]byte("01234")) + checkint(t, len(sarena.buf), 5) + checkint(t, sarena.SpaceLeft(), 5) + checkstring(t, s1, "01234") + + s2 := sarena.NewString([]byte("5678")) + checkint(t, len(sarena.buf), 9) + checkint(t, sarena.SpaceLeft(), 1) + checkstring(t, s2, "5678") + + // s3 will be allocated outside of sarena + s3 := sarena.NewString([]byte("ab")) + checkint(t, len(sarena.buf), 9) + checkint(t, sarena.SpaceLeft(), 1) + checkstring(t, s3, "ab") + + // s4 should still fit in sarena + s4 := sarena.NewString([]byte("9")) + checkint(t, len(sarena.buf), 10) + checkint(t, sarena.SpaceLeft(), 0) + checkstring(t, s4, "9") + + sarena.buf[0] = 'A' + checkstring(t, s1, "A1234") + + sarena.buf[5] = 'B' + checkstring(t, s2, "B678") + + sarena.buf[9] = 'C' + // s3 will not change + checkstring(t, s3, "ab") + checkstring(t, s4, "C") + checkstring(t, sarena.str, "A1234B678C") +} + +func checkstring(t *testing.T, actual, expected string) { + if actual != expected { + t.Errorf("received %s, expecting %s", actual, expected) + } +} + +func checkint(t *testing.T, actual, expected int) { + if actual != expected { + t.Errorf("received %d, expecting %d", actual, expected) + } +} + +func TestByteToString(t *testing.T) { + v1 := []byte("1234") + if s := String(v1); s != "1234" { + t.Errorf("String(\"1234\"): %q, want 1234", s) + } + + v1 = []byte("") + if s := String(v1); s != "" { + t.Errorf("String(\"\"): %q, want empty", s) + } + + v1 = nil + if s := String(v1); s != "" { + t.Errorf("String(\"\"): %q, want empty", s) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/query/query.pb.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/query/query.pb.go new file mode 100644 index 00000000..9b4cc9b0 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/query/query.pb.go @@ -0,0 +1,366 @@ +// Code generated by protoc-gen-go. +// source: query.proto +// DO NOT EDIT! + +/* +Package query is a generated protocol buffer package. + +It is generated from these files: + query.proto +*/ +package query + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. + +// Flag allows us to qualify types by their common properties. + +// Flags sent from the MySQL C API +type MySqlFlag int32 + +const ( + MySqlFlag_EMPTY MySqlFlag = 0 + MySqlFlag_NOT_NULL_FLAG MySqlFlag = 1 + MySqlFlag_PRI_KEY_FLAG MySqlFlag = 2 + MySqlFlag_UNIQUE_KEY_FLAG MySqlFlag = 4 + MySqlFlag_MULTIPLE_KEY_FLAG MySqlFlag = 8 + MySqlFlag_BLOB_FLAG MySqlFlag = 16 + MySqlFlag_UNSIGNED_FLAG MySqlFlag = 32 + MySqlFlag_ZEROFILL_FLAG MySqlFlag = 64 + MySqlFlag_BINARY_FLAG MySqlFlag = 128 + MySqlFlag_ENUM_FLAG MySqlFlag = 256 + MySqlFlag_AUTO_INCREMENT_FLAG MySqlFlag = 512 + MySqlFlag_TIMESTAMP_FLAG MySqlFlag = 1024 + MySqlFlag_SET_FLAG MySqlFlag = 2048 + MySqlFlag_NO_DEFAULT_VALUE_FLAG MySqlFlag = 4096 + MySqlFlag_ON_UPDATE_NOW_FLAG MySqlFlag = 8192 + MySqlFlag_NUM_FLAG MySqlFlag = 32768 + MySqlFlag_PART_KEY_FLAG MySqlFlag = 16384 + MySqlFlag_GROUP_FLAG MySqlFlag = 32768 + MySqlFlag_UNIQUE_FLAG MySqlFlag = 65536 + MySqlFlag_BINCMP_FLAG MySqlFlag = 131072 +) + +var MySqlFlag_name = map[int32]string{ + 0: "EMPTY", + 1: "NOT_NULL_FLAG", + 2: "PRI_KEY_FLAG", + 4: "UNIQUE_KEY_FLAG", + 8: "MULTIPLE_KEY_FLAG", + 16: "BLOB_FLAG", + 32: "UNSIGNED_FLAG", + 64: "ZEROFILL_FLAG", + 128: "BINARY_FLAG", + 256: "ENUM_FLAG", + 512: "AUTO_INCREMENT_FLAG", + 1024: "TIMESTAMP_FLAG", + 2048: "SET_FLAG", + 4096: "NO_DEFAULT_VALUE_FLAG", + 8192: "ON_UPDATE_NOW_FLAG", + 32768: "NUM_FLAG", + 16384: "PART_KEY_FLAG", + // Duplicate value: 32768: "GROUP_FLAG", + 65536: "UNIQUE_FLAG", + 131072: "BINCMP_FLAG", +} +var MySqlFlag_value = map[string]int32{ + "EMPTY": 0, + "NOT_NULL_FLAG": 1, + "PRI_KEY_FLAG": 2, + "UNIQUE_KEY_FLAG": 4, + "MULTIPLE_KEY_FLAG": 8, + "BLOB_FLAG": 16, + "UNSIGNED_FLAG": 32, + "ZEROFILL_FLAG": 64, + "BINARY_FLAG": 128, + "ENUM_FLAG": 256, + "AUTO_INCREMENT_FLAG": 512, + "TIMESTAMP_FLAG": 1024, + "SET_FLAG": 2048, + "NO_DEFAULT_VALUE_FLAG": 4096, + "ON_UPDATE_NOW_FLAG": 8192, + "NUM_FLAG": 32768, + "PART_KEY_FLAG": 16384, + "GROUP_FLAG": 32768, + "UNIQUE_FLAG": 65536, + "BINCMP_FLAG": 131072, +} + +type Flag int32 + +const ( + Flag_NONE Flag = 0 + Flag_ISINTEGRAL Flag = 256 + Flag_ISUNSIGNED Flag = 512 + Flag_ISFLOAT Flag = 1024 + Flag_ISQUOTED Flag = 2048 + Flag_ISTEXT Flag = 4096 + Flag_ISBINARY Flag = 8192 +) + +var Flag_name = map[int32]string{ + 0: "NONE", + 256: "ISINTEGRAL", + 512: "ISUNSIGNED", + 1024: "ISFLOAT", + 2048: "ISQUOTED", + 4096: "ISTEXT", + 8192: "ISBINARY", +} +var Flag_value = map[string]int32{ + "NONE": 0, + "ISINTEGRAL": 256, + "ISUNSIGNED": 512, + "ISFLOAT": 1024, + "ISQUOTED": 2048, + "ISTEXT": 4096, + "ISBINARY": 8192, +} + +// Type defines the various supported data types in bind vars +// and query results. +type Type int32 + +const ( + // NULL_TYPE specifies a NULL type. + Type_NULL_TYPE Type = 0 + // INT8 specifies a TINYINT type. + // Properties: 1, IsNumber. + Type_INT8 Type = 257 + // UINT8 specifies a TINYINT UNSIGNED type. + // Properties: 2, IsNumber, IsUnsigned. + Type_UINT8 Type = 770 + // INT16 specifies a SMALLINT type. + // Properties: 3, IsNumber. + Type_INT16 Type = 259 + // UINT16 specifies a SMALLINT UNSIGNED type. + // Properties: 4, IsNumber, IsUnsigned. + Type_UINT16 Type = 772 + // INT24 specifies a MEDIUMINT type. + // Properties: 5, IsNumber. + Type_INT24 Type = 261 + // UINT24 specifies a MEDIUMINT UNSIGNED type. + // Properties: 6, IsNumber, IsUnsigned. + Type_UINT24 Type = 774 + // INT32 specifies a INTEGER type. + // Properties: 7, IsNumber. + Type_INT32 Type = 263 + // UINT32 specifies a INTEGER UNSIGNED type. + // Properties: 8, IsNumber, IsUnsigned. + Type_UINT32 Type = 776 + // INT64 specifies a BIGINT type. + // Properties: 9, IsNumber. + Type_INT64 Type = 265 + // UINT64 specifies a BIGINT UNSIGNED type. + // Properties: 10, IsNumber, IsUnsigned. + Type_UINT64 Type = 778 + // FLOAT32 specifies a FLOAT type. + // Properties: 11, IsFloat. + Type_FLOAT32 Type = 1035 + // FLOAT64 specifies a DOUBLE or REAL type. + // Properties: 12, IsFloat. + Type_FLOAT64 Type = 1036 + // TIMESTAMP specifies a TIMESTAMP type. + // Properties: 13, IsQuoted. + Type_TIMESTAMP Type = 2061 + // DATE specifies a DATE type. + // Properties: 14, IsQuoted. + Type_DATE Type = 2062 + // TIME specifies a TIME type. + // Properties: 15, IsQuoted. + Type_TIME Type = 2063 + // DATETIME specifies a DATETIME type. + // Properties: 16, IsQuoted. + Type_DATETIME Type = 2064 + // YEAR specifies a YEAR type. + // Properties: 17, IsNumber, IsUnsigned. + Type_YEAR Type = 785 + // DECIMAL specifies a DECIMAL or NUMERIC type. + // Properties: 18, None. + Type_DECIMAL Type = 18 + // TEXT specifies a TEXT type. + // Properties: 19, IsQuoted, IsText. + Type_TEXT Type = 6163 + // BLOB specifies a BLOB type. + // Properties: 20, IsQuoted, IsBinary. + Type_BLOB Type = 10260 + // VARCHAR specifies a VARCHAR type. + // Properties: 21, IsQuoted, IsText. + Type_VARCHAR Type = 6165 + // VARBINARY specifies a VARBINARY type. + // Properties: 22, IsQuoted, IsBinary. + Type_VARBINARY Type = 10262 + // CHAR specifies a CHAR type. + // Properties: 23, IsQuoted, IsText. + Type_CHAR Type = 6167 + // BINARY specifies a BINARY type. + // Properties: 24, IsQuoted, IsBinary. + Type_BINARY Type = 10264 + // BIT specifies a BIT type. + // Properties: 25, IsQuoted. + Type_BIT Type = 2073 + // ENUM specifies an ENUM type. + // Properties: 26, IsQuoted. + Type_ENUM Type = 2074 + // SET specifies a SET type. + // Properties: 27, IsQuoted. + Type_SET Type = 2075 + // TUPLE specifies a a tuple. This cannot + // be returned in a QueryResult, but it can + // be sent as a bind var. + // Properties: 28, None. + Type_TUPLE Type = 28 + // GEOMETRY specifies a GEOMETRY type. + // Properties: 29, IsQuoted. + Type_GEOMETRY Type = 2077 + // JSON specified a JSON type. + // Properties: 30, IsQuoted. + Type_JSON Type = 2078 +) + +var Type_name = map[int32]string{ + 0: "NULL_TYPE", + 257: "INT8", + 770: "UINT8", + 259: "INT16", + 772: "UINT16", + 261: "INT24", + 774: "UINT24", + 263: "INT32", + 776: "UINT32", + 265: "INT64", + 778: "UINT64", + 1035: "FLOAT32", + 1036: "FLOAT64", + 2061: "TIMESTAMP", + 2062: "DATE", + 2063: "TIME", + 2064: "DATETIME", + 785: "YEAR", + 18: "DECIMAL", + 6163: "TEXT", + 10260: "BLOB", + 6165: "VARCHAR", + 10262: "VARBINARY", + 6167: "CHAR", + 10264: "BINARY", + 2073: "BIT", + 2074: "ENUM", + 2075: "SET", + 28: "TUPLE", + 2077: "GEOMETRY", + 2078: "JSON", +} +var Type_value = map[string]int32{ + "NULL_TYPE": 0, + "INT8": 257, + "UINT8": 770, + "INT16": 259, + "UINT16": 772, + "INT24": 261, + "UINT24": 774, + "INT32": 263, + "UINT32": 776, + "INT64": 265, + "UINT64": 778, + "FLOAT32": 1035, + "FLOAT64": 1036, + "TIMESTAMP": 2061, + "DATE": 2062, + "TIME": 2063, + "DATETIME": 2064, + "YEAR": 785, + "DECIMAL": 18, + "TEXT": 6163, + "BLOB": 10260, + "VARCHAR": 6165, + "VARBINARY": 10262, + "CHAR": 6167, + "BINARY": 10264, + "BIT": 2073, + "ENUM": 2074, + "SET": 2075, + "TUPLE": 28, + "GEOMETRY": 2077, + "JSON": 2078, +} + +func (x Type) String() string { + return Type_name[int32(x)] +} + +// EventToken is a structure that describes a point in time in a +// replication stream on one shard. The most recent known replication +// position can be retrieved from vttablet when executing a query. It +// is also sent with the replication streams from the binlog service. +type EventToken struct { + // timestamp is the MySQL timestamp of the statements. Seconds since Epoch. + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp" json:"timestamp,omitempty"` + // The shard name that applied the statements. Note this is not set when + // streaming from a vttablet. It is only used on the client -> vtgate link. + Shard string `protobuf:"bytes,2,opt,name=shard" json:"shard,omitempty"` + // The position on the replication stream after this statement was applied. + // It is not the transaction ID / GTID, but the position / GTIDSet. + Position string `protobuf:"bytes,3,opt,name=position" json:"position,omitempty"` +} + +// Value represents a typed value. +type Value struct { + Type Type `protobuf:"varint,1,opt,name=type,enum=query.Type" json:"type,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +// BindVariable represents a single bind variable in a Query. +type BindVariable struct { + Type Type `protobuf:"varint,1,opt,name=type,enum=query.Type" json:"type,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // values are set if type is TUPLE. + Values []*Value `protobuf:"bytes,3,rep,name=values" json:"values,omitempty"` +} + +// Field describes a single column returned by a query +type Field struct { + // name of the field as returned by mysql C API + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // vitess-defined type. Conversion function is in sqltypes package. + Type Type `protobuf:"varint,2,opt,name=type,enum=query.Type" json:"type,omitempty"` + // Remaining fields from mysql C API. + // These fields are only populated when ExecuteOptions.included_fields + // is set to IncludedFields.ALL. + Table string `protobuf:"bytes,3,opt,name=table" json:"table,omitempty"` + OrgTable string `protobuf:"bytes,4,opt,name=org_table,json=orgTable" json:"org_table,omitempty"` + Database string `protobuf:"bytes,5,opt,name=database" json:"database,omitempty"` + OrgName string `protobuf:"bytes,6,opt,name=org_name,json=orgName" json:"org_name,omitempty"` + // column_length is really a uint32. All 32 bits can be used. + ColumnLength uint32 `protobuf:"varint,7,opt,name=column_length,json=columnLength" json:"column_length,omitempty"` + // charset is actually a uint16. Only the lower 16 bits are used. + Charset uint32 `protobuf:"varint,8,opt,name=charset" json:"charset,omitempty"` + // decimals is actualy a uint8. Only the lower 8 bits are used. + Decimals uint32 `protobuf:"varint,9,opt,name=decimals" json:"decimals,omitempty"` + // flags is actually a uint16. Only the lower 16 bits are used. + Flags uint32 `protobuf:"varint,10,opt,name=flags" json:"flags,omitempty"` +} + +// Row is a database row. +type Row struct { + // lengths contains the length of each value in values. + // A length of -1 means that the field is NULL. While + // reading values, you have to accummulate the length + // to know the offset where the next value begins in values. + Lengths []int64 `protobuf:"zigzag64,1,rep,packed,name=lengths" json:"lengths,omitempty"` + // values contains a concatenation of all values in the row. + Values []byte `protobuf:"bytes,2,opt,name=values,proto3" json:"values,omitempty"` +} + +// ResultExtras contains optional out-of-band information. Usually the +// extras are requested by adding ExecuteOptions flags. +type ResultExtras struct { + // event_token is populated if the include_event_token flag is set + // in ExecuteOptions. + EventToken *EventToken `protobuf:"bytes,1,opt,name=event_token,json=eventToken" json:"event_token,omitempty"` + // If set, it means the data returned with this result is fresher + // than the compare_token passed in the ExecuteOptions. + Fresher bool `protobuf:"varint,2,opt,name=fresher" json:"fresher,omitempty"` +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/aggregator.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/aggregator.go new file mode 100644 index 00000000..105ecb16 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/aggregator.go @@ -0,0 +1,125 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Copyright (c) XeLabs +// BohuTANG + +package sqltypes + +import ( + "bytes" +) + +// Operator used to do the aggregator for sum/min/max/ etc. +func Operator(v1 Value, v2 Value, fn func(x interface{}, y interface{}) interface{}) Value { + // Sum field type is Decimal, we convert it to golang Float64. + switch v1.Type() { + case Decimal: + v1 = MakeTrusted(Float64, v1.Raw()) + } + switch v2.Type() { + case Decimal: + v2 = MakeTrusted(Float64, v2.Raw()) + } + + v1n := v1.ToNative() + v2n := v2.ToNative() + val := fn(v1n, v2n) + v, err := BuildValue(val) + if err != nil { + panic(err) + } + return v +} + +// SumFn used to do sum of two values. +func SumFn(x interface{}, y interface{}) interface{} { + var v interface{} + switch x.(type) { + case int64: + v = (x.(int64) + y.(int64)) + case uint64: + v = (x.(uint64) + y.(uint64)) + case float64: + v = (x.(float64) + y.(float64)) + case []uint8: // We only support numerical value sum. + v = 0 + } + return v +} + +// MinFn returns the min value of two. +func MinFn(x interface{}, y interface{}) interface{} { + v := x + switch x.(type) { + case int64: + if x.(int64) > y.(int64) { + v = y + } + case uint64: + if x.(uint64) > y.(uint64) { + v = y + } + case float64: + if x.(float64) > y.(float64) { + v = y + } + case []uint8: + if bytes.Compare(x.([]uint8), y.([]uint8)) > 0 { + v = y + } + } + return v +} + +// MaxFn returns the max value of two. +func MaxFn(x interface{}, y interface{}) interface{} { + v := x + switch x.(type) { + case int64: + if x.(int64) < y.(int64) { + v = y + } + case uint64: + if x.(uint64) < y.(uint64) { + v = y + } + case float64: + if x.(float64) < y.(float64) { + v = y + } + case []uint8: + if bytes.Compare(x.([]uint8), y.([]uint8)) < 0 { + v = y + } + } + return v +} + +// DivFn returns the div value of two. +func DivFn(x interface{}, y interface{}) interface{} { + var v1, v2 float64 + + switch x.(type) { + case int64: + v1 = float64(x.(int64)) + case uint64: + v1 = float64(x.(uint64)) + case float64: + v1 = x.(float64) + case []uint8: // We only support numerical value div. + return 0 + } + switch y.(type) { + case int64: + v2 = float64(y.(int64)) + case uint64: + v2 = float64(y.(uint64)) + case float64: + v2 = y.(float64) + case []uint8: // We only support numerical value div. + return 0 + } + return v1 / v2 +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/aggregator_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/aggregator_test.go new file mode 100644 index 00000000..e8a54893 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/aggregator_test.go @@ -0,0 +1,160 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Copyright (c) XeLabs +// BohuTANG + +package sqltypes + +import ( + "fmt" + "testing" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" +) + +func testOperator(typ string, x []Value) func([]Value) []Value { + return func(y []Value) []Value { + c := len(x) + ret := Row(x).Copy() + for i := 0; i < c; i++ { + switch typ { + case "sum", "count": + v1, v2 := x[i], y[i] + ret[i] = Operator(v1, v2, SumFn) + case "min": + v1, v2 := x[i], y[i] + ret[i] = Operator(v1, v2, MinFn) + case "max": + v1, v2 := x[i], y[i] + ret[i] = Operator(v1, v2, MaxFn) + } + } + return ret + } +} + +func testAggregate(typ string, result *Result) { + key := "xx" + groups := make(map[string][]Value) + for _, row1 := range result.Rows { + if row2, ok := groups[key]; !ok { + groups[key] = row1 + } else { + groups[key] = testOperator(typ, row1)(row2) + } + } + + i := 0 + result.Rows = make([][]Value, len(groups)) + for _, v := range groups { + result.Rows[i] = v + i++ + } + result.OrderedByAsc(result.Fields[0].Name) + result.Sort() +} + +func TestAggregator(t *testing.T) { + rt := &Result{ + Fields: []*querypb.Field{{ + Name: "a", + Type: Int32, + }, { + Name: "b", + Type: Uint24, + }, { + Name: "c", + Type: Float32, + }, + }, + Rows: [][]Value{ + {testVal(Int32, "-5"), testVal(Uint64, "10"), testVal(Float32, "3.1415926")}, + {testVal(Int32, "-4"), testVal(Uint64, "9"), testVal(Float32, "3.1415927")}, + {testVal(Int32, "-3"), testVal(Uint64, "8"), testVal(Float32, "3.1415928")}, + {testVal(Int32, "1"), testVal(Uint64, "1"), testVal(Float32, "3.1415926")}, + {testVal(Int32, "1"), testVal(Uint64, "1"), testVal(Float32, "3.1415925")}, + }, + } + + // sum aggregator. + { + rs := rt.Copy() + testAggregate("sum", rs) + want := "[[-10 29 15.7079632]]" + got := fmt.Sprintf("%+v", rs.Rows) + if want != got { + t.Errorf("want:%s\n, got:%s", want, got) + } + } + + // count aggregator. + { + rs := rt.Copy() + testAggregate("count", rs) + want := "[[-10 29 15.7079632]]" + got := fmt.Sprintf("%+v", rs.Rows) + if want != got { + t.Errorf("want:%s\n, got:%s", want, got) + } + } + + // min aggregator. + { + rs := rt.Copy() + testAggregate("min", rs) + want := "[[-5 1 3.1415925]]" + got := fmt.Sprintf("%+v", rs.Rows) + if want != got { + t.Errorf("want:%s\n, got:%s", want, got) + } + } + + // max aggregator. + { + rs := rt.Copy() + testAggregate("max", rs) + want := "[[1 10 3.1415928]]" + got := fmt.Sprintf("%+v", rs.Rows) + if want != got { + t.Errorf("want:%s\n, got:%s", want, got) + } + } + + // div aggregator. + { + v1 := testVal(Int32, "7") + v2 := testVal(Float32, "3.1415926") + ret := Operator(v1, v2, DivFn) + want := "2.2281692412950043" + got := fmt.Sprintf("%+v", ret) + if want != got { + t.Errorf("want:%s\n, got:%s", want, got) + } + } +} + +func TestOperator(t *testing.T) { + { + x := testVal(Decimal, "3.1415926") + y := testVal(Decimal, "3") + f := Operator(x, y, SumFn) + got := fmt.Sprintf("%+v", f.Raw()) + want := "[54 46 49 52 49 53 57 50 54]" + if want != got { + t.Errorf("want:%s\n, got:%s", want, got) + } + } + + { + x := testVal(Null, "") + y := testVal(Decimal, "3") + f := Operator(x, y, SumFn) + got := fmt.Sprintf("%+v", f.Raw()) + want := "[]" + if want != got { + t.Errorf("want:%s\n, got:%s", want, got) + } + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/column.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/column.go new file mode 100644 index 00000000..527b865b --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/column.go @@ -0,0 +1,56 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Copyright (c) XeLabs +// BohuTANG + +package sqltypes + +import ( + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" +) + +// RemoveColumns used to remove columns who in the idxs. +func (result *Result) RemoveColumns(idxs ...int) { + c := len(idxs) + if c == 0 { + return + } + + if result.Fields != nil { + var fields []*querypb.Field + for i, f := range result.Fields { + in := false + for _, idx := range idxs { + if i == idx { + in = true + break + } + } + if !in { + fields = append(fields, f) + } + } + result.Fields = fields + } + + if result.Rows != nil { + for i, r := range result.Rows { + var row []Value + for i, v := range r { + in := false + for _, idx := range idxs { + if i == idx { + in = true + break + } + } + if !in { + row = append(row, v) + } + } + result.Rows[i] = row + } + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/column_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/column_test.go new file mode 100644 index 00000000..11058488 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/column_test.go @@ -0,0 +1,120 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Copyright (c) XeLabs +// BohuTANG + +package sqltypes + +import ( + "fmt" + "reflect" + "testing" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" +) + +func TestColumnRemove(t *testing.T) { + rt := &Result{ + Fields: []*querypb.Field{{ + Name: "a", + Type: Int32, + }, { + Name: "b", + Type: Uint24, + }, { + Name: "c", + Type: Float32, + }, + }, + Rows: [][]Value{ + {testVal(Int32, "-5"), testVal(Uint64, "10"), testVal(Float32, "3.1415926")}, + {testVal(Int32, "-4"), testVal(Uint64, "9"), testVal(Float32, "3.1415927")}, + {testVal(Int32, "-3"), testVal(Uint64, "8"), testVal(Float32, "3.1415928")}, + {testVal(Int32, "1"), testVal(Uint64, "1"), testVal(Float32, "3.1415926")}, + {testVal(Int32, "1"), testVal(Uint64, "1"), testVal(Float32, "3.1415925")}, + }, + } + + { + rs := rt.Copy() + rs.RemoveColumns(0) + { + want := []*querypb.Field{ + { + Name: "b", + Type: Uint24, + }, { + Name: "c", + Type: Float32, + }, + } + got := rs.Fields + if !reflect.DeepEqual(want, got) { + t.Errorf("want:%+v\n, got:%+v", want, got) + } + } + + { + want := "[[10 3.1415926] [9 3.1415927] [8 3.1415928] [1 3.1415926] [1 3.1415925]]" + got := fmt.Sprintf("%+v", rs.Rows) + if want != got { + t.Errorf("want:%s\n, got:%+s", want, got) + } + } + } + + { + rs := rt.Copy() + rs.RemoveColumns(2) + { + want := []*querypb.Field{ + { + Name: "a", + Type: Int32, + }, { + Name: "b", + Type: Uint24, + }, + } + got := rs.Fields + if !reflect.DeepEqual(want, got) { + t.Errorf("want:%+v\n, got:%+v", want, got) + } + } + + { + want := "[[-5 10] [-4 9] [-3 8] [1 1] [1 1]]" + got := fmt.Sprintf("%+v", rs.Rows) + if want != got { + t.Errorf("want:%s\n, got:%s", want, got) + } + } + } + + { + rs := rt.Copy() + rs.RemoveColumns(0, 1) + { + want := []*querypb.Field{ + { + Name: "c", + Type: Float32, + }, + } + got := rs.Fields + if !reflect.DeepEqual(want, got) { + t.Errorf("want:%+v\n, got:%+v", want, got) + } + } + + { + want := "[[3.1415926] [3.1415927] [3.1415928] [3.1415926] [3.1415925]]" + got := fmt.Sprintf("%+v", rs.Rows) + if want != got { + t.Errorf("want:%s\n, got:%s", want, got) + } + } + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/limit.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/limit.go new file mode 100644 index 00000000..30506bab --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/limit.go @@ -0,0 +1,22 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Copyright (c) XeLabs +// BohuTANG + +package sqltypes + +// Limit used to cutoff the rows based on the MySQL LIMIT and OFFSET clauses. +func (result *Result) Limit(offset, limit int) { + count := len(result.Rows) + start := offset + end := offset + limit + if start > count { + start = count + } + if end > count { + end = count + } + result.Rows = result.Rows[start:end] +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/limit_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/limit_test.go new file mode 100644 index 00000000..7e3214fb --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/limit_test.go @@ -0,0 +1,110 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Copyright (c) XeLabs +// BohuTANG + +package sqltypes + +import ( + "reflect" + "testing" +) + +func TestLimit(t *testing.T) { + rs := &Result{ + Rows: [][]Value{ + {testVal(VarChar, "1")}, {testVal(VarChar, "2")}, {testVal(VarChar, "3")}, {testVal(VarChar, "4")}, {testVal(VarChar, "5")}, + }, + } + + // normal: offset 0, limit 1. + { + rs1 := rs.Copy() + rs1.Limit(0, 1) + want := rs.Rows[0:1] + got := rs1.Rows + + if !reflect.DeepEqual(want, got) { + t.Errorf("want:\n%#v, got\n%#v", want, got) + } + } + + // normal: offset 0, limit 5. + { + rs1 := rs.Copy() + rs1.Limit(0, 5) + want := rs.Rows + got := rs1.Rows + + if !reflect.DeepEqual(want, got) { + t.Errorf("want:\n%#v, got\n%#v", want, got) + } + } + + // normal: offset 1, limit 4. + { + rs1 := rs.Copy() + rs1.Limit(1, 4) + want := rs.Rows[1:5] + got := rs1.Rows + + if !reflect.DeepEqual(want, got) { + t.Errorf("want:\n%#v, got\n%#v", want, got) + } + } + + // limit overflow: offset 0, limit 6. + { + rs1 := rs.Copy() + rs1.Limit(0, 6) + want := rs.Rows + got := rs1.Rows + + if !reflect.DeepEqual(want, got) { + t.Errorf("want:\n%#v, got\n%#v", want, got) + } + } + + // offset overflow: offset 5, limit 0. + { + rs1 := rs.Copy() + rs1.Limit(5, 0) + want := rs.Rows[5:5] + got := rs1.Rows + + if !reflect.DeepEqual(want, got) { + t.Errorf("want:\n%#v, got\n%#v", want, got) + } + } + + // (offset+limit) overflow: offset 3, limit 6. + { + rs1 := rs.Copy() + rs1.Limit(3, 6) + want := rs.Rows[3:5] + got := rs1.Rows + + if !reflect.DeepEqual(want, got) { + t.Errorf("want:\n%#v, got\n%#v", want, got) + } + } + + // Empty test. + { + rs1 := &Result{ + Rows: [][]Value{ + {}, + }, + } + + rs1.Limit(3, 6) + want := rs.Rows[0:0] + got := rs1.Rows + + if !reflect.DeepEqual(want, got) { + t.Errorf("want:\n%#v, got\n%#v", want, got) + } + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/plan_value.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/plan_value.go new file mode 100644 index 00000000..9495be76 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/plan_value.go @@ -0,0 +1,58 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import () + +// PlanValue represents a value or a list of values for +// a column that will later be resolved using bind vars and used +// to perform plan actions like generating the final query or +// deciding on a route. +// +// Plan values are typically used as a slice ([]planValue) +// where each entry is for one column. For situations where +// the required output is a list of rows (like in the case +// of multi-value inserts), the representation is pivoted. +// For example, a statement like this: +// INSERT INTO t VALUES (1, 2), (3, 4) +// will be represented as follows: +// []PlanValue{ +// Values: {1, 3}, +// Values: {2, 4}, +// } +// +// For WHERE clause items that contain a combination of +// equality expressions and IN clauses like this: +// WHERE pk1 = 1 AND pk2 IN (2, 3, 4) +// The plan values will be represented as follows: +// []PlanValue{ +// Value: 1, +// Values: {2, 3, 4}, +// } +// When converted into rows, columns with single values +// are replicated as the same for all rows: +// [][]Value{ +// {1, 2}, +// {1, 3}, +// {1, 4}, +// } +type PlanValue struct { + Key string + Value Value + ListKey string + Values []PlanValue +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/result.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/result.go new file mode 100644 index 00000000..84e15146 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/result.go @@ -0,0 +1,128 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqltypes + +import ( + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" +) + +// ResultState enum. +type ResultState int + +const ( + // RStateNone enum. + RStateNone ResultState = iota + // RStateFields enum. + RStateFields + // RStateRows enum. + RStateRows + // RStateFinished enum. + RStateFinished +) + +// Result represents a query result. +type Result struct { + Fields []*querypb.Field `json:"fields"` + RowsAffected uint64 `json:"rows_affected"` + InsertID uint64 `json:"insert_id"` + Warnings uint16 `json:"warnings"` + Rows [][]Value `json:"rows"` + Extras *querypb.ResultExtras `json:"extras"` + sorters []*sorter + State ResultState +} + +// ResultStream is an interface for receiving Result. It is used for +// RPC interfaces. +type ResultStream interface { + // Recv returns the next result on the stream. + // It will return io.EOF if the stream ended. + Recv() (*Result, error) +} + +// Repair fixes the type info in the rows +// to conform to the supplied field types. +func (result *Result) Repair(fields []*querypb.Field) { + // Usage of j is intentional. + for j, f := range fields { + for _, r := range result.Rows { + if r[j].typ != Null { + r[j].typ = f.Type + } + } + } +} + +// Copy creates a deep copy of Result. +func (result *Result) Copy() *Result { + out := &Result{ + InsertID: result.InsertID, + RowsAffected: result.RowsAffected, + } + if result.Fields != nil { + fieldsp := make([]*querypb.Field, len(result.Fields)) + fields := make([]querypb.Field, len(result.Fields)) + for i, f := range result.Fields { + fields[i] = *f + fieldsp[i] = &fields[i] + } + out.Fields = fieldsp + } + if result.Rows != nil { + rows := make([][]Value, len(result.Rows)) + for i, r := range result.Rows { + rows[i] = make([]Value, len(r)) + totalLen := 0 + for _, c := range r { + totalLen += len(c.val) + } + arena := make([]byte, 0, totalLen) + for j, c := range r { + start := len(arena) + arena = append(arena, c.val...) + rows[i][j] = MakeTrusted(c.typ, arena[start:start+len(c.val)]) + } + } + out.Rows = rows + } + return out +} + +// StripFieldNames will return a new Result that has the same Rows, +// but the Field objects will have their Name emptied. Note we don't +// proto.Copy each Field for performance reasons, but we only copy the +// individual fields. +func (result *Result) StripFieldNames() *Result { + if len(result.Fields) == 0 { + return result + } + r := *result + r.Fields = make([]*querypb.Field, len(result.Fields)) + newFieldsArray := make([]querypb.Field, len(result.Fields)) + for i, f := range result.Fields { + r.Fields[i] = &newFieldsArray[i] + newFieldsArray[i].Type = f.Type + } + return &r +} + +// AppendResult will combine the Results Objects of one result +// to another result.Note currently it doesn't handle cases like +// if two results have different fields.We will enhance this function. +func (result *Result) AppendResult(src *Result) { + if src.RowsAffected == 0 && len(src.Fields) == 0 { + return + } + if result.Fields == nil { + result.Fields = src.Fields + } + result.RowsAffected += src.RowsAffected + if src.InsertID != 0 { + result.InsertID = src.InsertID + } + if len(src.Rows) != 0 { + result.Rows = append(result.Rows, src.Rows...) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/result_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/result_test.go new file mode 100644 index 00000000..ca644f3c --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/result_test.go @@ -0,0 +1,173 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqltypes + +import ( + "reflect" + "testing" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" +) + +func TestRepair(t *testing.T) { + fields := []*querypb.Field{{ + Type: Int64, + }, { + Type: VarChar, + }} + in := Result{ + Rows: [][]Value{ + {testVal(VarBinary, "1"), testVal(VarBinary, "aa")}, + {testVal(VarBinary, "2"), testVal(VarBinary, "bb")}, + }, + } + want := Result{ + Rows: [][]Value{ + {testVal(Int64, "1"), testVal(VarChar, "aa")}, + {testVal(Int64, "2"), testVal(VarChar, "bb")}, + }, + } + in.Repair(fields) + if !reflect.DeepEqual(in, want) { + t.Errorf("Repair:\n%#v, want\n%#v", in, want) + } +} + +func TestCopy(t *testing.T) { + in := &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + }, { + Type: VarChar, + }}, + InsertID: 1, + RowsAffected: 2, + Rows: [][]Value{ + {testVal(Int64, "1"), MakeTrusted(Null, nil)}, + {testVal(Int64, "2"), MakeTrusted(VarChar, nil)}, + {testVal(Int64, "3"), testVal(VarChar, "")}, + }, + } + want := &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + }, { + Type: VarChar, + }}, + InsertID: 1, + RowsAffected: 2, + Rows: [][]Value{ + {testVal(Int64, "1"), MakeTrusted(Null, nil)}, + {testVal(Int64, "2"), testVal(VarChar, "")}, + {testVal(Int64, "3"), testVal(VarChar, "")}, + }, + } + out := in.Copy() + // Change in so we're sure out got actually copied + in.Fields[0].Type = VarChar + in.Rows[0][0] = testVal(VarChar, "aa") + if !reflect.DeepEqual(out, want) { + t.Errorf("Copy:\n%#v, want\n%#v", out, want) + } +} + +func TestStripFieldNames(t *testing.T) { + testcases := []struct { + name string + in *Result + expected *Result + }{{ + name: "no fields", + in: &Result{}, + expected: &Result{}, + }, { + name: "empty fields", + in: &Result{ + Fields: []*querypb.Field{}, + }, + expected: &Result{ + Fields: []*querypb.Field{}, + }, + }, { + name: "no name", + in: &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + }, { + Type: VarChar, + }}, + }, + expected: &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + }, { + Type: VarChar, + }}, + }, + }, { + name: "names", + in: &Result{ + Fields: []*querypb.Field{{ + Name: "field1", + Type: Int64, + }, { + Name: "field2", + Type: VarChar, + }}, + }, + expected: &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + }, { + Type: VarChar, + }}, + }, + }} + for _, tcase := range testcases { + inCopy := tcase.in.Copy() + out := inCopy.StripFieldNames() + if !reflect.DeepEqual(out, tcase.expected) { + t.Errorf("StripFieldNames unexpected result for %v: %v", tcase.name, out) + } + if len(tcase.in.Fields) > 0 { + // check the out array is different than the in array. + if out.Fields[0] == inCopy.Fields[0] { + t.Errorf("StripFieldNames modified original Field for %v", tcase.name) + } + } + // check we didn't change the original result. + if !reflect.DeepEqual(tcase.in, inCopy) { + t.Errorf("StripFieldNames modified original result") + } + } +} + +func TestAppendResult(t *testing.T) { + r1 := &Result{ + RowsAffected: 3, + Rows: [][]Value{ + {testVal(VarBinary, "1"), testVal(VarBinary, "aa")}, + }, + } + r2 := &Result{ + RowsAffected: 5, + Rows: [][]Value{ + {testVal(VarBinary, "2"), testVal(VarBinary, "aa2")}, + }, + } + r1.AppendResult(r2) + + got := r1 + want := &Result{ + RowsAffected: 8, + Rows: [][]Value{ + {testVal(VarBinary, "1"), testVal(VarBinary, "aa")}, + {testVal(VarBinary, "2"), testVal(VarBinary, "aa2")}, + }, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Append:\n%#v, want\n%#v", got, want) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/row.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/row.go new file mode 100644 index 00000000..a3f06014 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/row.go @@ -0,0 +1,20 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Copyright (c) XeLabs +// BohuTANG + +package sqltypes + +// Row operations. +type Row []Value + +// Copy used to clone the new value. +func (r Row) Copy() []Value { + ret := make([]Value, len(r)) + for i, v := range r { + ret[i] = v + } + return ret +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/sorter.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/sorter.go new file mode 100644 index 00000000..0a551379 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/sorter.go @@ -0,0 +1,142 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Copyright (c) XeLabs +// BohuTANG + +package sqltypes + +import ( + "bytes" + "fmt" + "sort" +) + +// Len is part of sort.Interface. +func (result *Result) Len() int { + return len(result.Rows) +} + +// Swap is part of sort.Interface. +func (result *Result) Swap(i, j int) { + result.Rows[i], result.Rows[j] = result.Rows[j], result.Rows[i] +} + +// Less is part of sort.Interface. It is implemented by looping along the +// less functions until it finds a comparison that is either Less or +// !Less. Note that it can call the less functions twice per call. We +// could change the functions to return -1, 0, 1 and reduce the +// number of calls for greater efficiency: an exercise for the reader. +func (result *Result) Less(i, j int) bool { + p, q := result.Rows[i], result.Rows[j] + // Try all but the last comparison. + var k int + for k = 0; k < len(result.sorters)-1; k++ { + ser := result.sorters[k] + switch { + case ser.less(ser.idx, p, q): + // p < q, so we have a decision. + return true + case ser.less(ser.idx, q, p): + // p > q, so we have a decision. + return false + } + // p == q; try the next comparison. + } + // All comparisons to here said "equal", so just return whatever + // the final comparison reports. + ser := result.sorters[k] + return ser.less(ser.idx, p, q) +} + +// LessFunc implements the Less function of sorter interface. +type LessFunc func(idx int, v1, v2 []Value) bool +type sorter struct { + idx int + less LessFunc +} + +func lessAscFn(idx int, v1, v2 []Value) bool { + vn1 := v1[idx].ToNative() + vn2 := v2[idx].ToNative() + switch vn1.(type) { + case int64: + return vn1.(int64) < vn2.(int64) + case uint64: + return vn1.(uint64) < vn2.(uint64) + case float64: + return vn1.(float64) < vn2.(float64) + case []byte: + return bytes.Compare(vn1.([]byte), vn2.([]byte)) < 0 + case nil: + return false + default: + panic(fmt.Sprintf("unsupported.orderby.type:%T", vn1)) + } +} + +// OrderedByAsc adds a 'order by asc' operator to the result. +func (result *Result) OrderedByAsc(fields ...string) error { + for _, field := range fields { + idx := -1 + for k, f := range result.Fields { + if f.Name == field { + idx = k + break + } + } + if idx == -1 { + return fmt.Errorf("can.not.find.the.orderby.field[%s].direction.asc", field) + } + ser := &sorter{idx: idx, less: lessAscFn} + result.sorters = append(result.sorters, ser) + } + return nil +} + +func lessDescFn(idx int, v1, v2 []Value) bool { + vn1 := v1[idx].ToNative() + vn2 := v2[idx].ToNative() + switch vn1.(type) { + case int64: + return vn2.(int64) < vn1.(int64) + case uint64: + return vn2.(uint64) < vn1.(uint64) + case float64: + return vn2.(float64) < vn1.(float64) + case []byte: + return bytes.Compare(vn2.([]byte), vn1.([]byte)) < 0 + case nil: + return false + default: + panic(fmt.Sprintf("unsupported.orderby.type:%T", vn1)) + } +} + +// OrderedByDesc adds a 'order by desc' operator to the result. +func (result *Result) OrderedByDesc(fields ...string) error { + for _, field := range fields { + idx := -1 + for k, f := range result.Fields { + if f.Name == field { + idx = k + break + } + } + if idx == -1 { + return fmt.Errorf("can.not.find.the.orderby.field[%s].direction.desc", field) + } + ser := &sorter{idx: idx, less: lessDescFn} + result.sorters = append(result.sorters, ser) + } + return nil +} + +// Sort sorts the argument slice according to the less functions passed to OrderedBy. +func (result *Result) Sort() { + if len(result.sorters) == 0 { + return + } + sort.Sort(result) +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/sorter_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/sorter_test.go new file mode 100644 index 00000000..92caf37c --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/sorter_test.go @@ -0,0 +1,244 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Copyright (c) XeLabs +// BohuTANG + +package sqltypes + +import ( + "fmt" + "testing" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" +) + +func TestSorter(t *testing.T) { + rs := &Result{ + Fields: []*querypb.Field{{ + Name: "user", + Type: VarChar, + }, { + Name: "language", + Type: VarChar, + }, { + Name: "lines", + Type: Int64, + }, + }, + Rows: [][]Value{ + {testVal(VarChar, "gri"), testVal(VarChar, "Go"), testVal(Int64, "100")}, + {testVal(VarChar, "ken"), testVal(VarChar, "C"), testVal(Int64, "150")}, + {testVal(VarChar, "glenda"), testVal(VarChar, "Go"), testVal(Int64, "200")}, + {testVal(VarChar, "rsc"), testVal(VarChar, "Go"), testVal(Int64, "200")}, + {testVal(VarChar, "r"), testVal(VarChar, "Go"), testVal(Int64, "200")}, + {testVal(VarChar, "ken"), testVal(VarChar, "Go"), testVal(Int64, "200")}, + {testVal(VarChar, "dmr"), testVal(VarChar, "C"), testVal(Int64, "100")}, + {testVal(VarChar, "r"), testVal(VarChar, "C"), testVal(Int64, "150")}, + {testVal(VarChar, "gri"), testVal(VarChar, "Smalltalk"), testVal(Int64, "80")}, + }, + } + + // asc + { + fields := []string{ + "user", + "language", + "lines", + } + wants := []string{ + "[[dmr C 100] [glenda Go 200] [gri Go 100] [gri Smalltalk 80] [ken C 150] [ken Go 200] [r Go 200] [r C 150] [rsc Go 200]]", + "[[dmr C 100] [ken C 150] [r C 150] [glenda Go 200] [rsc Go 200] [r Go 200] [ken Go 200] [gri Go 100] [gri Smalltalk 80]]", + "[[gri Smalltalk 80] [gri Go 100] [dmr C 100] [ken C 150] [r C 150] [rsc Go 200] [r Go 200] [ken Go 200] [glenda Go 200]]", + } + + for i, field := range fields { + rs1 := rs.Copy() + rs1.OrderedByAsc(field) + rs1.Sort() + + want := wants[i] + got := fmt.Sprintf("%+v", rs1.Rows) + if want != got { + t.Errorf("want:%s\n, got:%s", want, got) + } + } + } + + // desc + { + fields := []string{ + "user", + "language", + "lines", + } + wants := []string{ + "[[rsc Go 200] [r C 150] [r Go 200] [ken Go 200] [ken C 150] [gri Go 100] [gri Smalltalk 80] [glenda Go 200] [dmr C 100]]", + "[[gri Smalltalk 80] [gri Go 100] [rsc Go 200] [r Go 200] [ken Go 200] [glenda Go 200] [ken C 150] [dmr C 100] [r C 150]]", + "[[glenda Go 200] [rsc Go 200] [r Go 200] [ken Go 200] [ken C 150] [r C 150] [gri Go 100] [dmr C 100] [gri Smalltalk 80]]", + } + + for i, field := range fields { + rs1 := rs.Copy() + rs1.OrderedByDesc(field) + rs1.Sort() + + want := wants[i] + got := fmt.Sprintf("%+v", rs1.Rows) + if want != got { + t.Errorf("want:%s\n, got:%s", want, got) + } + } + } + + // user + language + lines asc + { + fields := []string{ + "user", + "language", + "lines", + } + + rs1 := rs.Copy() + rs1.OrderedByAsc(fields...) + rs1.Sort() + want := "[[dmr C 100] [glenda Go 200] [gri Go 100] [gri Smalltalk 80] [ken C 150] [ken Go 200] [r C 150] [r Go 200] [rsc Go 200]]" + got := fmt.Sprintf("%+v", rs1.Rows) + if want != got { + t.Errorf("want:%s\n, got:%s", want, got) + } + } + + // user + language + lines desc + { + fields := []string{ + "user", + "language", + "lines", + } + + rs1 := rs.Copy() + rs1.OrderedByDesc(fields...) + rs1.Sort() + want := "[[rsc Go 200] [r Go 200] [r C 150] [ken Go 200] [ken C 150] [gri Smalltalk 80] [gri Go 100] [glenda Go 200] [dmr C 100]]" + got := fmt.Sprintf("%+v", rs1.Rows) + if want != got { + t.Errorf("want:%s\n, got:%s", want, got) + } + } +} + +func TestSorterType(t *testing.T) { + rs := &Result{ + Fields: []*querypb.Field{{ + Name: "ID", + Type: Uint24, + }, { + Name: "cost", + Type: Float32, + }, { + Name: "nil", + Type: Null, + }}, + Rows: [][]Value{ + {testVal(Uint24, "3"), testVal(Float32, "3.1415926"), NULL}, + {testVal(Uint24, "7"), testVal(Float32, "3.1415926"), NULL}, + {testVal(Uint24, "2"), testVal(Float32, "3.1415927"), NULL}, + }, + } + + // asc + { + fields := []string{ + "ID", + "cost", + "nil", + } + wants := []string{ + "[[2 3.1415927 ] [3 3.1415926 ] [7 3.1415926 ]]", + "[[3 3.1415926 ] [7 3.1415926 ] [2 3.1415927 ]]", + "[[3 3.1415926 ] [7 3.1415926 ] [2 3.1415927 ]]", + } + + for i, field := range fields { + rs1 := rs.Copy() + rs1.OrderedByAsc(field) + rs1.Sort() + + want := wants[i] + got := fmt.Sprintf("%+v", rs1.Rows) + if want != got { + t.Errorf("want:%s\n, got:%s", want, got) + } + } + } + + // desc + { + fields := []string{ + "ID", + "cost", + "nil", + } + wants := []string{ + "[[7 3.1415926 ] [3 3.1415926 ] [2 3.1415927 ]]", + "[[2 3.1415927 ] [3 3.1415926 ] [7 3.1415926 ]]", + "[[3 3.1415926 ] [7 3.1415926 ] [2 3.1415927 ]]", + } + + for i, field := range fields { + rs1 := rs.Copy() + rs1.OrderedByDesc(field) + rs1.Sort() + + want := wants[i] + got := fmt.Sprintf("%+v", rs1.Rows) + if want != got { + t.Errorf("want:%s\n, got:%s", want, got) + } + } + } +} + +func TestSorterError(t *testing.T) { + rs := &Result{ + Fields: []*querypb.Field{{ + Name: "ID", + Type: Uint24, + }, { + Name: "cost", + Type: Float32, + }, + }, + Rows: [][]Value{ + {testVal(Uint24, "3"), testVal(Float32, "3.1415926")}, + {testVal(Uint24, "7"), testVal(Float32, "3.1415926")}, + {testVal(Uint24, "2"), testVal(Float32, "3.1415927")}, + }, + } + + // Field error. + { + { + rs1 := rs.Copy() + err := rs1.OrderedByAsc("xx") + want := "can.not.find.the.orderby.field[xx].direction.asc" + got := err.Error() + if want != got { + t.Errorf("want:%s\n, got:%s", want, got) + } + } + + { + rs1 := rs.Copy() + err := rs1.OrderedByDesc("xx") + want := "can.not.find.the.orderby.field[xx].direction.desc" + got := err.Error() + if want != got { + t.Errorf("want:%s\n, got:%s", want, got) + } + } + + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/type.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/type.go new file mode 100644 index 00000000..57bda28c --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/type.go @@ -0,0 +1,243 @@ +// Copyright 2015, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqltypes + +import ( + "fmt" + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" +) + +// This file provides wrappers and support +// functions for querypb.Type. + +// These bit flags can be used to query on the +// common properties of types. +const ( + flagIsIntegral = int(querypb.Flag_ISINTEGRAL) + flagIsUnsigned = int(querypb.Flag_ISUNSIGNED) + flagIsFloat = int(querypb.Flag_ISFLOAT) + flagIsQuoted = int(querypb.Flag_ISQUOTED) + flagIsText = int(querypb.Flag_ISTEXT) + flagIsBinary = int(querypb.Flag_ISBINARY) +) + +// IsIntegral returns true if querypb.Type is an integral +// (signed/unsigned) that can be represented using +// up to 64 binary bits. +func IsIntegral(t querypb.Type) bool { + return int(t)&flagIsIntegral == flagIsIntegral +} + +// IsSigned returns true if querypb.Type is a signed integral. +func IsSigned(t querypb.Type) bool { + return int(t)&(flagIsIntegral|flagIsUnsigned) == flagIsIntegral +} + +// IsUnsigned returns true if querypb.Type is an unsigned integral. +// Caution: this is not the same as !IsSigned. +func IsUnsigned(t querypb.Type) bool { + return int(t)&(flagIsIntegral|flagIsUnsigned) == flagIsIntegral|flagIsUnsigned +} + +// IsFloat returns true is querypb.Type is a floating point. +func IsFloat(t querypb.Type) bool { + return int(t)&flagIsFloat == flagIsFloat +} + +// IsQuoted returns true if querypb.Type is a quoted text or binary. +func IsQuoted(t querypb.Type) bool { + return int(t)&flagIsQuoted == flagIsQuoted +} + +// IsText returns true if querypb.Type is a text. +func IsText(t querypb.Type) bool { + return int(t)&flagIsText == flagIsText +} + +// IsBinary returns true if querypb.Type is a binary. +func IsBinary(t querypb.Type) bool { + return int(t)&flagIsBinary == flagIsBinary +} + +// Vitess data types. These are idiomatically +// named synonyms for the querypb.Type values. +const ( + Null = querypb.Type_NULL_TYPE + Int8 = querypb.Type_INT8 + Uint8 = querypb.Type_UINT8 + Int16 = querypb.Type_INT16 + Uint16 = querypb.Type_UINT16 + Int24 = querypb.Type_INT24 + Uint24 = querypb.Type_UINT24 + Int32 = querypb.Type_INT32 + Uint32 = querypb.Type_UINT32 + Int64 = querypb.Type_INT64 + Uint64 = querypb.Type_UINT64 + Float32 = querypb.Type_FLOAT32 + Float64 = querypb.Type_FLOAT64 + Timestamp = querypb.Type_TIMESTAMP + Date = querypb.Type_DATE + Time = querypb.Type_TIME + Datetime = querypb.Type_DATETIME + Year = querypb.Type_YEAR + Decimal = querypb.Type_DECIMAL + Text = querypb.Type_TEXT + Blob = querypb.Type_BLOB + VarChar = querypb.Type_VARCHAR + VarBinary = querypb.Type_VARBINARY + Char = querypb.Type_CHAR + Binary = querypb.Type_BINARY + Bit = querypb.Type_BIT + Enum = querypb.Type_ENUM + Set = querypb.Type_SET + Tuple = querypb.Type_TUPLE + Geometry = querypb.Type_GEOMETRY + TypeJSON = querypb.Type_JSON +) + +// bit-shift the mysql flags by two byte so we +// can merge them with the mysql or vitess types. +const ( + mysqlUnsigned = 32 + mysqlBinary = 128 + mysqlEnum = 256 + mysqlSet = 2048 +) + +// If you add to this map, make sure you add a test case +// in tabletserver/endtoend. +var mysqlToType = map[int64]querypb.Type{ + 1: Int8, + 2: Int16, + 3: Int32, + 4: Float32, + 5: Float64, + 6: Null, + 7: Timestamp, + 8: Int64, + 9: Int24, + 10: Date, + 11: Time, + 12: Datetime, + 13: Year, + 16: Bit, + 245: TypeJSON, + 246: Decimal, + 249: Text, + 250: Text, + 251: Text, + 252: Text, + 253: VarChar, + 254: Char, + 255: Geometry, +} + +// modifyType modifies the vitess type based on the +// mysql flag. The function checks specific flags based +// on the type. This allows us to ignore stray flags +// that MySQL occasionally sets. +func modifyType(typ querypb.Type, flags int64) querypb.Type { + switch typ { + case Int8: + if flags&mysqlUnsigned != 0 { + return Uint8 + } + return Int8 + case Int16: + if flags&mysqlUnsigned != 0 { + return Uint16 + } + return Int16 + case Int32: + if flags&mysqlUnsigned != 0 { + return Uint32 + } + return Int32 + case Int64: + if flags&mysqlUnsigned != 0 { + return Uint64 + } + return Int64 + case Int24: + if flags&mysqlUnsigned != 0 { + return Uint24 + } + return Int24 + case Text: + if flags&mysqlBinary != 0 { + return Blob + } + return Text + case VarChar: + if flags&mysqlBinary != 0 { + return VarBinary + } + return VarChar + case Char: + if flags&mysqlBinary != 0 { + return Binary + } + if flags&mysqlEnum != 0 { + return Enum + } + if flags&mysqlSet != 0 { + return Set + } + return Char + } + return typ +} + +// MySQLToType computes the vitess type from mysql type and flags. +func MySQLToType(mysqlType, flags int64) (typ querypb.Type, err error) { + result, ok := mysqlToType[mysqlType] + if !ok { + return 0, fmt.Errorf("unsupported type: %d", mysqlType) + } + return modifyType(result, flags), nil +} + +// typeToMySQL is the reverse of mysqlToType. +var typeToMySQL = map[querypb.Type]struct { + typ int64 + flags int64 +}{ + Int8: {typ: 1}, + Uint8: {typ: 1, flags: mysqlUnsigned}, + Int16: {typ: 2}, + Uint16: {typ: 2, flags: mysqlUnsigned}, + Int32: {typ: 3}, + Uint32: {typ: 3, flags: mysqlUnsigned}, + Float32: {typ: 4}, + Float64: {typ: 5}, + Null: {typ: 6, flags: mysqlBinary}, + Timestamp: {typ: 7}, + Int64: {typ: 8}, + Uint64: {typ: 8, flags: mysqlUnsigned}, + Int24: {typ: 9}, + Uint24: {typ: 9, flags: mysqlUnsigned}, + Date: {typ: 10, flags: mysqlBinary}, + Time: {typ: 11, flags: mysqlBinary}, + Datetime: {typ: 12, flags: mysqlBinary}, + Year: {typ: 13, flags: mysqlUnsigned}, + Bit: {typ: 16, flags: mysqlUnsigned}, + TypeJSON: {typ: 245}, + Decimal: {typ: 246}, + Text: {typ: 252}, + Blob: {typ: 252, flags: mysqlBinary}, + VarChar: {typ: 253}, + VarBinary: {typ: 253, flags: mysqlBinary}, + Char: {typ: 254}, + Binary: {typ: 254, flags: mysqlBinary}, + Enum: {typ: 254, flags: mysqlEnum}, + Set: {typ: 254, flags: mysqlSet}, + Geometry: {typ: 255}, +} + +// TypeToMySQL returns the equivalent mysql type and flag for a vitess type. +func TypeToMySQL(typ querypb.Type) (mysqlType, flags int64) { + val := typeToMySQL[typ] + return val.typ, val.flags +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/type_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/type_test.go new file mode 100644 index 00000000..322a2cd5 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/type_test.go @@ -0,0 +1,322 @@ +// Copyright 2015| Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqltypes + +import ( + "testing" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" +) + +func TestTypeValues(t *testing.T) { + testcases := []struct { + defined querypb.Type + expected int + }{{ + defined: Null, + expected: 0, + }, { + defined: Int8, + expected: 1 | flagIsIntegral, + }, { + defined: Uint8, + expected: 2 | flagIsIntegral | flagIsUnsigned, + }, { + defined: Int16, + expected: 3 | flagIsIntegral, + }, { + defined: Uint16, + expected: 4 | flagIsIntegral | flagIsUnsigned, + }, { + defined: Int24, + expected: 5 | flagIsIntegral, + }, { + defined: Uint24, + expected: 6 | flagIsIntegral | flagIsUnsigned, + }, { + defined: Int32, + expected: 7 | flagIsIntegral, + }, { + defined: Uint32, + expected: 8 | flagIsIntegral | flagIsUnsigned, + }, { + defined: Int64, + expected: 9 | flagIsIntegral, + }, { + defined: Uint64, + expected: 10 | flagIsIntegral | flagIsUnsigned, + }, { + defined: Float32, + expected: 11 | flagIsFloat, + }, { + defined: Float64, + expected: 12 | flagIsFloat, + }, { + defined: Timestamp, + expected: 13 | flagIsQuoted, + }, { + defined: Date, + expected: 14 | flagIsQuoted, + }, { + defined: Time, + expected: 15 | flagIsQuoted, + }, { + defined: Datetime, + expected: 16 | flagIsQuoted, + }, { + defined: Year, + expected: 17 | flagIsIntegral | flagIsUnsigned, + }, { + defined: Decimal, + expected: 18, + }, { + defined: Text, + expected: 19 | flagIsQuoted | flagIsText, + }, { + defined: Blob, + expected: 20 | flagIsQuoted | flagIsBinary, + }, { + defined: VarChar, + expected: 21 | flagIsQuoted | flagIsText, + }, { + defined: VarBinary, + expected: 22 | flagIsQuoted | flagIsBinary, + }, { + defined: Char, + expected: 23 | flagIsQuoted | flagIsText, + }, { + defined: Binary, + expected: 24 | flagIsQuoted | flagIsBinary, + }, { + defined: Bit, + expected: 25 | flagIsQuoted, + }, { + defined: Enum, + expected: 26 | flagIsQuoted, + }, { + defined: Set, + expected: 27 | flagIsQuoted, + }, { + defined: Tuple, + expected: 28, + }, { + defined: Geometry, + expected: 29 | flagIsQuoted, + }, { + defined: TypeJSON, + expected: 30 | flagIsQuoted, + }} + for _, tcase := range testcases { + if int(tcase.defined) != tcase.expected { + t.Errorf("Type %s: %d, want: %d", tcase.defined, int(tcase.defined), tcase.expected) + } + } +} + +func TestIsFunctions(t *testing.T) { + if IsIntegral(Null) { + t.Error("Null: IsIntegral, must be false") + } + if !IsIntegral(Int64) { + t.Error("Int64: !IsIntegral, must be true") + } + if IsSigned(Uint64) { + t.Error("Uint64: IsSigned, must be false") + } + if !IsSigned(Int64) { + t.Error("Int64: !IsSigned, must be true") + } + if IsUnsigned(Int64) { + t.Error("Int64: IsUnsigned, must be false") + } + if !IsUnsigned(Uint64) { + t.Error("Uint64: !IsUnsigned, must be true") + } + if IsFloat(Int64) { + t.Error("Int64: IsFloat, must be false") + } + if !IsFloat(Float64) { + t.Error("Uint64: !IsFloat, must be true") + } + if IsQuoted(Int64) { + t.Error("Int64: IsQuoted, must be false") + } + if !IsQuoted(Binary) { + t.Error("Binary: !IsQuoted, must be true") + } + if IsText(Int64) { + t.Error("Int64: IsText, must be false") + } + if !IsText(Char) { + t.Error("Char: !IsText, must be true") + } + if IsBinary(Int64) { + t.Error("Int64: IsBinary, must be false") + } + if !IsBinary(Binary) { + t.Error("Char: !IsBinary, must be true") + } +} + +func TestTypeToMySQL(t *testing.T) { + v, f := TypeToMySQL(Bit) + if v != 16 { + t.Errorf("Bit: %d, want 16", v) + } + if f != mysqlUnsigned { + t.Errorf("Bit flag: %x, want %x", f, mysqlUnsigned) + } + v, f = TypeToMySQL(Date) + if v != 10 { + t.Errorf("Bit: %d, want 10", v) + } + if f != mysqlBinary { + t.Errorf("Bit flag: %x, want %x", f, mysqlBinary) + } +} + +func TestMySQLToType(t *testing.T) { + testcases := []struct { + intype int64 + inflags int64 + outtype querypb.Type + }{{ + intype: 1, + outtype: Int8, + }, { + intype: 1, + inflags: mysqlUnsigned, + outtype: Uint8, + }, { + intype: 2, + outtype: Int16, + }, { + intype: 2, + inflags: mysqlUnsigned, + outtype: Uint16, + }, { + intype: 3, + outtype: Int32, + }, { + intype: 3, + inflags: mysqlUnsigned, + outtype: Uint32, + }, { + intype: 4, + outtype: Float32, + }, { + intype: 5, + outtype: Float64, + }, { + intype: 6, + outtype: Null, + }, { + intype: 7, + outtype: Timestamp, + }, { + intype: 8, + outtype: Int64, + }, { + intype: 8, + inflags: mysqlUnsigned, + outtype: Uint64, + }, { + intype: 9, + outtype: Int24, + }, { + intype: 9, + inflags: mysqlUnsigned, + outtype: Uint24, + }, { + intype: 10, + outtype: Date, + }, { + intype: 11, + outtype: Time, + }, { + intype: 12, + outtype: Datetime, + }, { + intype: 13, + outtype: Year, + }, { + intype: 16, + outtype: Bit, + }, { + intype: 245, + outtype: TypeJSON, + }, { + intype: 246, + outtype: Decimal, + }, { + intype: 249, + outtype: Text, + }, { + intype: 250, + outtype: Text, + }, { + intype: 251, + outtype: Text, + }, { + intype: 252, + outtype: Text, + }, { + intype: 252, + inflags: mysqlBinary, + outtype: Blob, + }, { + intype: 253, + outtype: VarChar, + }, { + intype: 253, + inflags: mysqlBinary, + outtype: VarBinary, + }, { + intype: 254, + outtype: Char, + }, { + intype: 254, + inflags: mysqlBinary, + outtype: Binary, + }, { + intype: 254, + inflags: mysqlEnum, + outtype: Enum, + }, { + intype: 254, + inflags: mysqlSet, + outtype: Set, + }, { + intype: 255, + outtype: Geometry, + }, { + // Binary flag must be ignored. + intype: 8, + inflags: mysqlUnsigned | mysqlBinary, + outtype: Uint64, + }, { + // Unsigned flag must be ignored + intype: 252, + inflags: mysqlUnsigned | mysqlBinary, + outtype: Blob, + }} + for _, tcase := range testcases { + got, err := MySQLToType(tcase.intype, tcase.inflags) + if err != nil { + t.Error(err) + } + if got != tcase.outtype { + t.Errorf("MySQLToType(%d, %x): %v, want %v", tcase.intype, tcase.inflags, got, tcase.outtype) + } + } +} + +func TestTypeError(t *testing.T) { + _, err := MySQLToType(15, 0) + want := "unsupported type: 15" + if err == nil || err.Error() != want { + t.Errorf("MySQLToType: %v, want %s", err, want) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/value.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/value.go new file mode 100644 index 00000000..c5c772c5 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/value.go @@ -0,0 +1,434 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sqltypes implements interfaces and types that represent SQL values. +package sqltypes + +import ( + "encoding/base64" + "errors" + "fmt" + "strconv" + "time" + + "github.com/xelabs/go-mysqlstack/sqlparser/depends/hack" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" +) + +var ( + // NULL represents the NULL value. + NULL = Value{} + // DontEscape tells you if a character should not be escaped. + DontEscape = byte(255) + nullstr = []byte("null") +) + +// BinWriter interface is used for encoding values. +// Types like bytes.Buffer conform to this interface. +// We expect the writer objects to be in-memory buffers. +// So, we don't expect the write operations to fail. +type BinWriter interface { + Write([]byte) (int, error) + WriteByte(byte) error +} + +// Value can store any SQL value. If the value represents +// an integral type, the bytes are always stored as a canonical +// representation that matches how MySQL returns such values. +type Value struct { + typ querypb.Type + val []byte +} + +// MakeTrusted makes a new Value based on the type. +// If the value is an integral, then val must be in its canonical +// form. This function should only be used if you know the value +// and type conform to the rules. Every place this function is +// called, a comment is needed that explains why it's justified. +// Functions within this package are exempt. +func MakeTrusted(typ querypb.Type, val []byte) Value { + if typ == Null { + return NULL + } + return Value{typ: typ, val: val} +} + +// NewInt64 builds an Int64 Value. +func NewInt64(v int64) Value { + return MakeTrusted(Int64, strconv.AppendInt(nil, v, 10)) +} + +// NewInt32 builds an Int64 Value. +func NewInt32(v int32) Value { + return MakeTrusted(Int32, strconv.AppendInt(nil, int64(v), 10)) +} + +// NewUint64 builds an Uint64 Value. +func NewUint64(v uint64) Value { + return MakeTrusted(Uint64, strconv.AppendUint(nil, v, 10)) +} + +// NewFloat64 builds an Float64 Value. +func NewFloat64(v float64) Value { + return MakeTrusted(Float64, strconv.AppendFloat(nil, v, 'g', -1, 64)) +} + +// NewVarChar builds a VarChar Value. +func NewVarChar(v string) Value { + return MakeTrusted(VarChar, []byte(v)) +} + +// NewVarBinary builds a VarBinary Value. +// The input is a string because it's the most common use case. +func NewVarBinary(v string) Value { + return MakeTrusted(VarBinary, []byte(v)) +} + +// NewIntegral builds an integral type from a string representation. +// The type will be Int64 or Uint64. Int64 will be preferred where possible. +func NewIntegral(val string) (n Value, err error) { + signed, err := strconv.ParseInt(val, 0, 64) + if err == nil { + return MakeTrusted(Int64, strconv.AppendInt(nil, signed, 10)), nil + } + unsigned, err := strconv.ParseUint(val, 0, 64) + if err != nil { + return Value{}, err + } + return MakeTrusted(Uint64, strconv.AppendUint(nil, unsigned, 10)), nil +} + +// MakeString makes a VarBinary Value. +func MakeString(val []byte) Value { + return MakeTrusted(VarBinary, val) +} + +// BuildValue builds a value from any go type. sqltype.Value is +// also allowed. +func BuildValue(goval interface{}) (v Value, err error) { + // Look for the most common types first. + switch goval := goval.(type) { + case nil: + // no op + case []byte: + v = MakeTrusted(VarBinary, goval) + case int64: + v = MakeTrusted(Int64, strconv.AppendInt(nil, int64(goval), 10)) + case uint64: + v = MakeTrusted(Uint64, strconv.AppendUint(nil, uint64(goval), 10)) + case float64: + v = MakeTrusted(Float64, strconv.AppendFloat(nil, goval, 'f', -1, 64)) + case int: + v = MakeTrusted(Int64, strconv.AppendInt(nil, int64(goval), 10)) + case int8: + v = MakeTrusted(Int8, strconv.AppendInt(nil, int64(goval), 10)) + case int16: + v = MakeTrusted(Int16, strconv.AppendInt(nil, int64(goval), 10)) + case int32: + v = MakeTrusted(Int32, strconv.AppendInt(nil, int64(goval), 10)) + case uint: + v = MakeTrusted(Uint64, strconv.AppendUint(nil, uint64(goval), 10)) + case uint8: + v = MakeTrusted(Uint8, strconv.AppendUint(nil, uint64(goval), 10)) + case uint16: + v = MakeTrusted(Uint16, strconv.AppendUint(nil, uint64(goval), 10)) + case uint32: + v = MakeTrusted(Uint32, strconv.AppendUint(nil, uint64(goval), 10)) + case float32: + v = MakeTrusted(Float32, strconv.AppendFloat(nil, float64(goval), 'f', -1, 64)) + case string: + v = MakeTrusted(VarBinary, []byte(goval)) + case time.Time: + v = MakeTrusted(Datetime, []byte(goval.Format("2006-01-02 15:04:05"))) + case Value: + v = goval + case *querypb.BindVariable: + return ValueFromBytes(goval.Type, goval.Value) + default: + return v, fmt.Errorf("unexpected type %T: %v", goval, goval) + } + return v, nil +} + +// BuildConverted is like BuildValue except that it tries to +// convert a string or []byte to an integral if the target type +// is an integral. We don't perform other implicit conversions +// because they're unsafe. +func BuildConverted(typ querypb.Type, goval interface{}) (v Value, err error) { + if IsIntegral(typ) { + switch goval := goval.(type) { + case []byte: + return ValueFromBytes(typ, goval) + case string: + return ValueFromBytes(typ, []byte(goval)) + case Value: + if goval.IsQuoted() { + return ValueFromBytes(typ, goval.Raw()) + } + } + } + return BuildValue(goval) +} + +// ValueFromBytes builds a Value using typ and val. It ensures that val +// matches the requested type. If type is an integral it's converted to +// a canonical form. Otherwise, the original representation is preserved. +func ValueFromBytes(typ querypb.Type, val []byte) (v Value, err error) { + switch { + case IsSigned(typ): + signed, err := strconv.ParseInt(string(val), 0, 64) + if err != nil { + return NULL, err + } + v = MakeTrusted(typ, strconv.AppendInt(nil, signed, 10)) + case IsUnsigned(typ): + unsigned, err := strconv.ParseUint(string(val), 0, 64) + if err != nil { + return NULL, err + } + v = MakeTrusted(typ, strconv.AppendUint(nil, unsigned, 10)) + case typ == Tuple: + return NULL, errors.New("tuple not allowed for ValueFromBytes") + case IsFloat(typ) || typ == Decimal: + _, err := strconv.ParseFloat(string(val), 64) + if err != nil { + return NULL, err + } + // After verification, we preserve the original representation. + fallthrough + default: + v = MakeTrusted(typ, val) + } + return v, nil +} + +// BuildIntegral builds an integral type from a string representation. +// The type will be Int64 or Uint64. Int64 will be preferred where possible. +func BuildIntegral(val string) (n Value, err error) { + signed, err := strconv.ParseInt(val, 0, 64) + if err == nil { + return MakeTrusted(Int64, strconv.AppendInt(nil, signed, 10)), nil + } + unsigned, err := strconv.ParseUint(val, 0, 64) + if err != nil { + return Value{}, err + } + return MakeTrusted(Uint64, strconv.AppendUint(nil, unsigned, 10)), nil +} + +// Type returns the type of Value. +func (v Value) Type() querypb.Type { + return v.typ +} + +// Raw returns the raw bytes. All types are currently implemented as []byte. +// You should avoid using this function. If you do, you should treat the +// bytes as read-only. +func (v Value) Raw() []byte { + return v.val +} + +// Len returns the length. +func (v Value) Len() int { + return len(v.val) +} + +// Values represents the array of Value. +type Values []Value + +// Len implements the interface. +func (vs Values) Len() int { + len := 0 + for _, v := range vs { + len += v.Len() + } + return len +} + +// String returns the raw value as a string. +func (v Value) String() string { + return hack.String(v.val) +} + +// ToNative converts Value to a native go type. +// This does not work for sqltypes.Tuple. The function +// panics if there are inconsistencies. +func (v Value) ToNative() interface{} { + var out interface{} + var err error + switch { + case v.typ == Null: + // no-op + case IsSigned(v.typ): + out, err = v.ParseInt64() + case IsUnsigned(v.typ): + out, err = v.ParseUint64() + case IsFloat(v.typ): + out, err = v.ParseFloat64() + case v.typ == Tuple: + err = errors.New("unexpected tuple") + default: + out = v.val + } + if err != nil { + panic(err) + } + return out +} + +// ParseInt64 will parse a Value into an int64. It does +// not check the type. +func (v Value) ParseInt64() (val int64, err error) { + return strconv.ParseInt(v.String(), 10, 64) +} + +// ParseUint64 will parse a Value into a uint64. It does +// not check the type. +func (v Value) ParseUint64() (val uint64, err error) { + return strconv.ParseUint(v.String(), 10, 64) +} + +// ParseFloat64 will parse a Value into an float64. It does +// not check the type. +func (v Value) ParseFloat64() (val float64, err error) { + return strconv.ParseFloat(v.String(), 64) +} + +// EncodeSQL encodes the value into an SQL statement. Can be binary. +func (v Value) EncodeSQL(b BinWriter) { + // ToNative panics if v is invalid. + _ = v.ToNative() + switch { + case v.typ == Null: + writebytes(nullstr, b) + case IsQuoted(v.typ): + encodeBytesSQL(v.val, b) + default: + writebytes(v.val, b) + } +} + +// EncodeASCII encodes the value using 7-bit clean ascii bytes. +func (v Value) EncodeASCII(b BinWriter) { + // ToNative panics if v is invalid. + _ = v.ToNative() + switch { + case v.typ == Null: + writebytes(nullstr, b) + case IsQuoted(v.typ): + encodeBytesASCII(v.val, b) + default: + writebytes(v.val, b) + } +} + +// IsNull returns true if Value is null. +func (v Value) IsNull() bool { + return v.typ == Null +} + +// IsIntegral returns true if Value is an integral. +func (v Value) IsIntegral() bool { + return IsIntegral(v.typ) +} + +// IsSigned returns true if Value is a signed integral. +func (v Value) IsSigned() bool { + return IsSigned(v.typ) +} + +// IsUnsigned returns true if Value is an unsigned integral. +func (v Value) IsUnsigned() bool { + return IsUnsigned(v.typ) +} + +// IsFloat returns true if Value is a float. +func (v Value) IsFloat() bool { + return IsFloat(v.typ) +} + +// IsQuoted returns true if Value must be SQL-quoted. +func (v Value) IsQuoted() bool { + return IsQuoted(v.typ) +} + +// IsText returns true if Value is a collatable text. +func (v Value) IsText() bool { + return IsText(v.typ) +} + +// IsBinary returns true if Value is binary. +func (v Value) IsBinary() bool { + return IsBinary(v.typ) +} + +func encodeBytesSQL(val []byte, b BinWriter) { + writebyte('\'', b) + for _, ch := range val { + if encodedChar := SQLEncodeMap[ch]; encodedChar == DontEscape { + writebyte(ch, b) + } else { + writebyte('\\', b) + writebyte(encodedChar, b) + } + } + writebyte('\'', b) +} + +func encodeBytesASCII(val []byte, b BinWriter) { + writebyte('\'', b) + encoder := base64.NewEncoder(base64.StdEncoding, b) + encoder.Write(val) + encoder.Close() + writebyte('\'', b) +} + +func writebyte(c byte, b BinWriter) { + if err := b.WriteByte(c); err != nil { + panic(err) + } +} + +func writebytes(val []byte, b BinWriter) { + n, err := b.Write(val) + if err != nil { + panic(err) + } + if n != len(val) { + panic(errors.New("short write")) + } +} + +// SQLEncodeMap specifies how to escape binary data with '\'. +// Complies to http://dev.mysql.com/doc/refman/5.1/en/string-syntax.html +var SQLEncodeMap [256]byte + +// SQLDecodeMap is the reverse of SQLEncodeMap +var SQLDecodeMap [256]byte + +var encodeRef = map[byte]byte{ + '\x00': '0', + '\'': '\'', + '"': '"', + '\b': 'b', + '\n': 'n', + '\r': 'r', + '\t': 't', + 26: 'Z', // ctl-Z + '\\': '\\', +} + +func init() { + for i := range SQLEncodeMap { + SQLEncodeMap[i] = DontEscape + SQLDecodeMap[i] = DontEscape + } + for i := range SQLEncodeMap { + if to, ok := encodeRef[byte(i)]; ok { + SQLEncodeMap[byte(i)] = to + SQLDecodeMap[to] = byte(i) + } + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/value_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/value_test.go new file mode 100644 index 00000000..892fce98 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes/value_test.go @@ -0,0 +1,675 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqltypes + +import ( + "bytes" + "reflect" + "strings" + "testing" + "time" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" +) + +func TestMake(t *testing.T) { + v := MakeTrusted(Null, []byte("abcd")) + if !reflect.DeepEqual(v, NULL) { + t.Errorf("MakeTrusted(Null...) = %v, want null", makePretty(v)) + } + v = MakeTrusted(Int64, []byte("1")) + want := testVal(Int64, "1") + if !reflect.DeepEqual(v, want) { + t.Errorf("MakeTrusted(Int64, \"1\") = %v, want %v", makePretty(v), makePretty(want)) + } + v = MakeString([]byte("a")) + want = testVal(VarBinary, "a") + if !reflect.DeepEqual(v, want) { + t.Errorf("MakeString(\"a\") = %v, want %v", makePretty(v), makePretty(want)) + } +} + +func TestBuildValue(t *testing.T) { + testcases := []struct { + in interface{} + out Value + }{{ + in: nil, + out: NULL, + }, { + in: []byte("a"), + out: testVal(VarBinary, "a"), + }, { + in: int64(1), + out: testVal(Int64, "1"), + }, { + in: uint64(1), + out: testVal(Uint64, "1"), + }, { + in: float64(1.2), + out: testVal(Float64, "1.2"), + }, { + in: int(1), + out: testVal(Int64, "1"), + }, { + in: int8(1), + out: testVal(Int8, "1"), + }, { + in: int16(1), + out: testVal(Int16, "1"), + }, { + in: int32(1), + out: testVal(Int32, "1"), + }, { + in: uint(1), + out: testVal(Uint64, "1"), + }, { + in: uint8(1), + out: testVal(Uint8, "1"), + }, { + in: uint16(1), + out: testVal(Uint16, "1"), + }, { + in: uint32(1), + out: testVal(Uint32, "1"), + }, { + in: float32(1), + out: testVal(Float32, "1"), + }, { + in: "a", + out: testVal(VarBinary, "a"), + }, { + in: time.Date(2012, time.February, 24, 23, 19, 43, 10, time.UTC), + out: testVal(Datetime, "2012-02-24 23:19:43"), + }, { + in: testVal(VarBinary, "a"), + out: testVal(VarBinary, "a"), + }, { + in: NewInt64(63), + out: testVal(Int64, "63"), + }, { + in: NewUint64(63), + out: testVal(Uint64, "63"), + }, { + in: NewFloat64(63.4), + out: testVal(Float64, "63.4"), + }, { + in: NewInt32(63), + out: testVal(Int32, "63"), + }, { + in: NewVarChar("63"), + out: testVal(VarChar, "63"), + }, { + in: NewVarBinary("63"), + out: testVal(VarBinary, "63"), + }} + for _, tcase := range testcases { + v, err := BuildValue(tcase.in) + if err != nil { + t.Errorf("BuildValue(%#v) error: %v", tcase.in, err) + continue + } + if !reflect.DeepEqual(v, tcase.out) { + t.Errorf("BuildValue(%#v) = %v, want %v", tcase.in, makePretty(v), makePretty(tcase.out)) + } + } + + _, err := BuildValue(make(chan bool)) + want := "unexpected" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("BuildValue(chan): %v, want %v", err, want) + } + + // Test NewIntegral. + _, err = NewIntegral("12") + if err != nil { + t.Errorf("NewIntegral error: %v", err) + } +} + +func TestBuildConverted(t *testing.T) { + testcases := []struct { + typ querypb.Type + val interface{} + out Value + }{{ + typ: Int64, + val: 123, + out: testVal(Int64, "123"), + }, { + typ: Int64, + val: "123", + out: testVal(Int64, "123"), + }, { + typ: Uint64, + val: "123", + out: testVal(Uint64, "123"), + }, { + typ: Int64, + val: []byte("123"), + out: testVal(Int64, "123"), + }, { + typ: Int64, + val: testVal(VarBinary, "123"), + out: testVal(Int64, "123"), + }, { + typ: Int64, + val: testVal(Float32, "123"), + out: testVal(Float32, "123"), + }} + for _, tcase := range testcases { + v, err := BuildConverted(tcase.typ, tcase.val) + if err != nil { + t.Errorf("BuildValue(%v, %#v) error: %v", tcase.typ, tcase.val, err) + continue + } + if !reflect.DeepEqual(v, tcase.out) { + t.Errorf("BuildValue(%v, %#v) = %v, want %v", tcase.typ, tcase.val, makePretty(v), makePretty(tcase.out)) + } + } +} + +const ( + InvalidNeg = "-9223372036854775809" + MinNeg = "-9223372036854775808" + MinPos = "18446744073709551615" + InvalidPos = "18446744073709551616" +) + +func TestValueFromBytes(t *testing.T) { + testcases := []struct { + inType querypb.Type + inVal string + outVal Value + outErr string + }{{ + inType: Null, + inVal: "", + outVal: NULL, + }, { + inType: Int8, + inVal: "1", + outVal: testVal(Int8, "1"), + }, { + inType: Int16, + inVal: "1", + outVal: testVal(Int16, "1"), + }, { + inType: Int24, + inVal: "1", + outVal: testVal(Int24, "1"), + }, { + inType: Int32, + inVal: "1", + outVal: testVal(Int32, "1"), + }, { + inType: Int64, + inVal: "1", + outVal: testVal(Int64, "1"), + }, { + inType: Uint8, + inVal: "1", + outVal: testVal(Uint8, "1"), + }, { + inType: Uint16, + inVal: "1", + outVal: testVal(Uint16, "1"), + }, { + inType: Uint24, + inVal: "1", + outVal: testVal(Uint24, "1"), + }, { + inType: Uint32, + inVal: "1", + outVal: testVal(Uint32, "1"), + }, { + inType: Uint64, + inVal: "1", + outVal: testVal(Uint64, "1"), + }, { + inType: Float32, + inVal: "1.00", + outVal: testVal(Float32, "1.00"), + }, { + inType: Float64, + inVal: "1.00", + outVal: testVal(Float64, "1.00"), + }, { + inType: Decimal, + inVal: "1.00", + outVal: testVal(Decimal, "1.00"), + }, { + inType: Timestamp, + inVal: "2012-02-24 23:19:43", + outVal: testVal(Timestamp, "2012-02-24 23:19:43"), + }, { + inType: Date, + inVal: "2012-02-24", + outVal: testVal(Date, "2012-02-24"), + }, { + inType: Time, + inVal: "23:19:43", + outVal: testVal(Time, "23:19:43"), + }, { + inType: Datetime, + inVal: "2012-02-24 23:19:43", + outVal: testVal(Datetime, "2012-02-24 23:19:43"), + }, { + inType: Year, + inVal: "1", + outVal: testVal(Year, "1"), + }, { + inType: Text, + inVal: "a", + outVal: testVal(Text, "a"), + }, { + inType: Blob, + inVal: "a", + outVal: testVal(Blob, "a"), + }, { + inType: VarChar, + inVal: "a", + outVal: testVal(VarChar, "a"), + }, { + inType: Binary, + inVal: "a", + outVal: testVal(Binary, "a"), + }, { + inType: Char, + inVal: "a", + outVal: testVal(Char, "a"), + }, { + inType: Bit, + inVal: "1", + outVal: testVal(Bit, "1"), + }, { + inType: Enum, + inVal: "a", + outVal: testVal(Enum, "a"), + }, { + inType: Set, + inVal: "a", + outVal: testVal(Set, "a"), + }, { + inType: VarBinary, + inVal: "a", + outVal: testVal(VarBinary, "a"), + }, { + inType: Int64, + inVal: InvalidNeg, + outErr: "out of range", + }, { + inType: Int64, + inVal: InvalidPos, + outErr: "out of range", + }, { + inType: Uint64, + inVal: "-1", + outErr: "invalid syntax", + }, { + inType: Uint64, + inVal: InvalidPos, + outErr: "out of range", + }, { + inType: Float64, + inVal: "a", + outErr: "invalid syntax", + }, { + inType: Tuple, + inVal: "a", + outErr: "not allowed", + }} + for _, tcase := range testcases { + v, err := ValueFromBytes(tcase.inType, []byte(tcase.inVal)) + if tcase.outErr != "" { + if err == nil || !strings.Contains(err.Error(), tcase.outErr) { + t.Errorf("ValueFromBytes(%v, %v) error: %v, must contain %v", tcase.inType, tcase.inVal, err, tcase.outErr) + } + continue + } + if err != nil { + t.Errorf("ValueFromBytes(%v, %v) error: %v", tcase.inType, tcase.inVal, err) + continue + } + if !reflect.DeepEqual(v, tcase.outVal) { + t.Errorf("ValueFromBytes(%v, %v) = %v, want %v", tcase.inType, tcase.inVal, makePretty(v), makePretty(tcase.outVal)) + } + } +} + +func TestBuildIntegral(t *testing.T) { + testcases := []struct { + in string + outVal Value + outErr string + }{{ + in: MinNeg, + outVal: testVal(Int64, MinNeg), + }, { + in: "1", + outVal: testVal(Int64, "1"), + }, { + in: MinPos, + outVal: testVal(Uint64, MinPos), + }, { + in: InvalidPos, + outErr: "out of range", + }} + for _, tcase := range testcases { + v, err := BuildIntegral(tcase.in) + if tcase.outErr != "" { + if err == nil || !strings.Contains(err.Error(), tcase.outErr) { + t.Errorf("BuildIntegral(%v) error: %v, must contain %v", tcase.in, err, tcase.outErr) + } + continue + } + if err != nil { + t.Errorf("BuildIntegral(%v) error: %v", tcase.in, err) + continue + } + if !reflect.DeepEqual(v, tcase.outVal) { + t.Errorf("BuildIntegral(%v) = %v, want %v", tcase.in, makePretty(v), makePretty(tcase.outVal)) + } + } +} + +func TestAccessors(t *testing.T) { + v := testVal(Int64, "1") + if v.Type() != Int64 { + t.Errorf("v.Type=%v, want Int64", v.Type()) + } + if !bytes.Equal(v.Raw(), []byte("1")) { + t.Errorf("v.Raw=%s, want 1", v.Raw()) + } + if v.Len() != 1 { + t.Errorf("v.Len=%d, want 1", v.Len()) + } + if v.String() != "1" { + t.Errorf("v.String=%s, want 1", v.String()) + } + if v.IsNull() { + t.Error("v.IsNull: true, want false") + } + if !v.IsIntegral() { + t.Error("v.IsIntegral: false, want true") + } + if !v.IsSigned() { + t.Error("v.IsSigned: false, want true") + } + if v.IsUnsigned() { + t.Error("v.IsUnsigned: true, want false") + } + if v.IsFloat() { + t.Error("v.IsFloat: true, want false") + } + if v.IsQuoted() { + t.Error("v.IsQuoted: true, want false") + } + if v.IsText() { + t.Error("v.IsText: true, want false") + } + if v.IsBinary() { + t.Error("v.IsBinary: true, want false") + } +} + +func TestToNative(t *testing.T) { + testcases := []struct { + in Value + out interface{} + }{{ + in: NULL, + out: nil, + }, { + in: testVal(Int8, "1"), + out: int64(1), + }, { + in: testVal(Int16, "1"), + out: int64(1), + }, { + in: testVal(Int24, "1"), + out: int64(1), + }, { + in: testVal(Int32, "1"), + out: int64(1), + }, { + in: testVal(Int64, "1"), + out: int64(1), + }, { + in: testVal(Uint8, "1"), + out: uint64(1), + }, { + in: testVal(Uint16, "1"), + out: uint64(1), + }, { + in: testVal(Uint24, "1"), + out: uint64(1), + }, { + in: testVal(Uint32, "1"), + out: uint64(1), + }, { + in: testVal(Uint64, "1"), + out: uint64(1), + }, { + in: testVal(Float32, "1"), + out: float64(1), + }, { + in: testVal(Float64, "1"), + out: float64(1), + }, { + in: testVal(Timestamp, "2012-02-24 23:19:43"), + out: []byte("2012-02-24 23:19:43"), + }, { + in: testVal(Date, "2012-02-24"), + out: []byte("2012-02-24"), + }, { + in: testVal(Time, "23:19:43"), + out: []byte("23:19:43"), + }, { + in: testVal(Datetime, "2012-02-24 23:19:43"), + out: []byte("2012-02-24 23:19:43"), + }, { + in: testVal(Year, "1"), + out: uint64(1), + }, { + in: testVal(Decimal, "1"), + out: []byte("1"), + }, { + in: testVal(Text, "a"), + out: []byte("a"), + }, { + in: testVal(Blob, "a"), + out: []byte("a"), + }, { + in: testVal(VarChar, "a"), + out: []byte("a"), + }, { + in: testVal(VarBinary, "a"), + out: []byte("a"), + }, { + in: testVal(Char, "a"), + out: []byte("a"), + }, { + in: testVal(Binary, "a"), + out: []byte("a"), + }, { + in: testVal(Bit, "1"), + out: []byte("1"), + }, { + in: testVal(Enum, "a"), + out: []byte("a"), + }, { + in: testVal(Set, "a"), + out: []byte("a"), + }} + for _, tcase := range testcases { + v := tcase.in.ToNative() + if !reflect.DeepEqual(v, tcase.out) { + t.Errorf("%v.ToNative = %#v, want %#v", makePretty(tcase.in), v, tcase.out) + } + } +} + +func TestPanics(t *testing.T) { + testcases := []struct { + in Value + out string + }{{ + in: testVal(Int64, InvalidNeg), + out: "out of range", + }, { + in: testVal(Uint64, InvalidPos), + out: "out of range", + }, { + in: testVal(Uint64, "-1"), + out: "invalid syntax", + }, { + in: testVal(Float64, "a"), + out: "invalid syntax", + }, { + in: testVal(Tuple, "a"), + out: "unexpected", + }} + for _, tcase := range testcases { + func() { + defer func() { + x := recover() + if x == nil { + t.Errorf("%v.ToNative did not panic", makePretty(tcase.in)) + } + err, ok := x.(error) + if !ok { + t.Errorf("%v.ToNative did not panic with an error", makePretty(tcase.in)) + } + if !strings.Contains(err.Error(), tcase.out) { + t.Errorf("%v.ToNative error: %v, must contain; %v ", makePretty(tcase.in), err, tcase.out) + } + }() + _ = tcase.in.ToNative() + }() + } + for _, tcase := range testcases { + func() { + defer func() { + x := recover() + if x == nil { + t.Errorf("%v.EncodeSQL did not panic", makePretty(tcase.in)) + } + err, ok := x.(error) + if !ok { + t.Errorf("%v.EncodeSQL did not panic with an error", makePretty(tcase.in)) + } + if !strings.Contains(err.Error(), tcase.out) { + t.Errorf("%v.EncodeSQL error: %v, must contain; %v ", makePretty(tcase.in), err, tcase.out) + } + }() + tcase.in.EncodeSQL(&bytes.Buffer{}) + }() + } + for _, tcase := range testcases { + func() { + defer func() { + x := recover() + if x == nil { + t.Errorf("%v.EncodeASCII did not panic", makePretty(tcase.in)) + } + err, ok := x.(error) + if !ok { + t.Errorf("%v.EncodeASCII did not panic with an error", makePretty(tcase.in)) + } + if !strings.Contains(err.Error(), tcase.out) { + t.Errorf("%v.EncodeASCII error: %v, must contain; %v ", makePretty(tcase.in), err, tcase.out) + } + }() + tcase.in.EncodeASCII(&bytes.Buffer{}) + }() + } +} + +func TestParseNumbers(t *testing.T) { + v := testVal(VarChar, "1") + sval, err := v.ParseInt64() + if err != nil { + t.Error(err) + } + if sval != 1 { + t.Errorf("v.ParseInt64 = %d, want 1", sval) + } + uval, err := v.ParseUint64() + if err != nil { + t.Error(err) + } + if uval != 1 { + t.Errorf("v.ParseUint64 = %d, want 1", uval) + } + fval, err := v.ParseFloat64() + if err != nil { + t.Error(err) + } + if fval != 1 { + t.Errorf("v.ParseFloat64 = %f, want 1", fval) + } +} + +func TestEncode(t *testing.T) { + testcases := []struct { + in Value + outSQL string + outASCII string + }{{ + in: NULL, + outSQL: "null", + outASCII: "null", + }, { + in: testVal(Int64, "1"), + outSQL: "1", + outASCII: "1", + }, { + in: testVal(VarChar, "foo"), + outSQL: "'foo'", + outASCII: "'Zm9v'", + }, { + in: testVal(VarChar, "\x00'\"\b\n\r\t\x1A\\"), + outSQL: "'\\0\\'\\\"\\b\\n\\r\\t\\Z\\\\'", + outASCII: "'ACciCAoNCRpc'", + }} + for _, tcase := range testcases { + buf := &bytes.Buffer{} + tcase.in.EncodeSQL(buf) + if tcase.outSQL != buf.String() { + t.Errorf("%v.EncodeSQL = %q, want %q", makePretty(tcase.in), buf.String(), tcase.outSQL) + } + buf = &bytes.Buffer{} + tcase.in.EncodeASCII(buf) + if tcase.outASCII != buf.String() { + t.Errorf("%v.EncodeASCII = %q, want %q", makePretty(tcase.in), buf.String(), tcase.outASCII) + } + } +} + +// TestEncodeMap ensures DontEscape is not escaped +func TestEncodeMap(t *testing.T) { + if SQLEncodeMap[DontEscape] != DontEscape { + t.Errorf("SQLEncodeMap[DontEscape] = %v, want %v", SQLEncodeMap[DontEscape], DontEscape) + } + if SQLDecodeMap[DontEscape] != DontEscape { + t.Errorf("SQLDecodeMap[DontEscape] = %v, want %v", SQLEncodeMap[DontEscape], DontEscape) + } +} + +// testVal makes it easy to build a Value for testing. +func testVal(typ querypb.Type, val string) Value { + return Value{typ: typ, val: []byte(val)} +} + +type prettyVal struct { + Type querypb.Type + Value string +} + +// makePretty converts Value to a struct that's readable when printed. +func makePretty(v Value) prettyVal { + return prettyVal{v.typ, string(v.val)} +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/explain.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/explain.go new file mode 100644 index 00000000..ee9d4bf1 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/explain.go @@ -0,0 +1,23 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlparser + +import () + +func (*Explain) iStatement() {} + +// Explain represents a explain statement. +type Explain struct { +} + +// Format formats the node. +func (node *Explain) Format(buf *TrackedBuffer) { + buf.WriteString("explain") +} + +// WalkSubtree walks the nodes of the subtree. +func (node *Explain) WalkSubtree(visit Visit) error { + return nil +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/explain_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/explain_test.go new file mode 100644 index 00000000..35897037 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/explain_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import "strings" +import "testing" + +func TestExplain(t *testing.T) { + validSQL := []struct { + input string + output string + }{ + { + input: "explain select * from 1", + output: "explain", + }, + } + + for _, exp := range validSQL { + sql := strings.TrimSpace(exp.input) + tree, err := Parse(sql) + if err != nil { + t.Errorf("input: %s, err: %v", sql, err) + continue + } + + // Walk. + Walk(func(node SQLNode) (bool, error) { + return true, nil + }, tree) + + got := String(tree.(*Explain)) + if exp.output != got { + t.Errorf("want:\n%s\ngot:\n%s", exp.output, got) + } + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/kill.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/kill.go new file mode 100644 index 00000000..2cf303c9 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/kill.go @@ -0,0 +1,40 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlparser + +import ( + "strconv" +) + +// NumVal represents numval tuple. +type NumVal struct { + raw string +} + +// AsUint64 returns uint64 value. +func (exp *NumVal) AsUint64() uint64 { + v, err := strconv.ParseUint(exp.raw, 10, 64) + if err != nil { + return 1<<63 - 1 + } + return v +} + +func (*Kill) iStatement() {} + +// Kill represents a KILL statement. +type Kill struct { + QueryID *NumVal +} + +// Format formats the node. +func (node *Kill) Format(buf *TrackedBuffer) { + buf.Myprintf("kill %s", node.QueryID.raw) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *Kill) WalkSubtree(visit Visit) error { + return nil +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/kill_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/kill_test.go new file mode 100644 index 00000000..8f518b6b --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/kill_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "strings" + "testing" +) + +func TestKill(t *testing.T) { + validSQL := []struct { + input string + output string + }{ + { + input: "kill 1", + output: "kill 1", + }, + + { + input: "kill 10000000000000000000000000000000", + output: "kill 10000000000000000000000000000000", + }, + } + + for _, exp := range validSQL { + sql := strings.TrimSpace(exp.input) + tree, err := Parse(sql) + if err != nil { + t.Errorf("input: %s, err: %v", sql, err) + continue + } + + // Walk. + Walk(func(node SQLNode) (bool, error) { + return true, nil + }, tree) + + node := tree.(*Kill) + node.QueryID.AsUint64() + + // Format. + got := String(node) + if exp.output != got { + t.Errorf("want:\n%s\ngot:\n%s", exp.output, got) + } + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/normalizer.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/normalizer.go new file mode 100644 index 00000000..7e1ff6d8 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/normalizer.go @@ -0,0 +1,131 @@ +// Copyright 2016, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlparser + +import ( + "fmt" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +// Normalize changes the statement to use bind values, and +// updates the bind vars to those values. The supplied prefix +// is used to generate the bind var names. The function ensures +// that there are no collisions with existing bind vars. +func Normalize(stmt Statement, bindVars map[string]interface{}, prefix string) { + reserved := GetBindvars(stmt) + // vals allows us to reuse bindvars for + // identical values. + counter := 1 + vals := make(map[string]string) + _ = Walk(func(node SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + case *SQLVal: + // Make the bindvar + bval := sqlToBindvar(node) + if bval == nil { + // If unsuccessful continue. + return true, nil + } + // Check if there's a bindvar for that value already. + var key string + if bval.Type == sqltypes.VarBinary { + // Prefixing strings with "'" ensures that a string + // and number that have the same representation don't + // collide. + key = "'" + string(node.Val) + } else { + key = string(node.Val) + } + bvname, ok := vals[key] + if !ok { + // If there's no such bindvar, make a new one. + bvname, counter = newName(prefix, counter, reserved) + vals[key] = bvname + bindVars[bvname] = bval + } + // Modify the AST node to a bindvar. + node.Type = ValArg + node.Val = append([]byte(":"), bvname...) + case *ComparisonExpr: + switch node.Operator { + case InStr, NotInStr: + default: + return true, nil + } + // It's either IN or NOT IN. + tupleVals, ok := node.Right.(ValTuple) + if !ok { + return true, nil + } + // The RHS is a tuple of values. + // Make a list bindvar. + bvals := &querypb.BindVariable{ + Type: sqltypes.Tuple, + } + for _, val := range tupleVals { + bval := sqlToBindvar(val) + if bval == nil { + return true, nil + } + bvals.Values = append(bvals.Values, &querypb.Value{ + Type: bval.Type, + Value: bval.Value, + }) + } + var bvname string + bvname, counter = newName(prefix, counter, reserved) + bindVars[bvname] = bvals + // Modify RHS to be a list bindvar. + node.Right = ListArg(append([]byte("::"), bvname...)) + } + return true, nil + }, stmt) +} + +func sqlToBindvar(node SQLNode) *querypb.BindVariable { + if node, ok := node.(*SQLVal); ok { + switch node.Type { + case StrVal: + return &querypb.BindVariable{Type: sqltypes.VarBinary, Value: node.Val} + case IntVal: + return &querypb.BindVariable{Type: sqltypes.Int64, Value: node.Val} + case FloatVal: + return &querypb.BindVariable{Type: sqltypes.Float64, Value: node.Val} + } + } + return nil +} + +func newName(prefix string, counter int, reserved map[string]struct{}) (string, int) { + for { + newName := fmt.Sprintf("%s%d", prefix, counter) + if _, ok := reserved[newName]; !ok { + reserved[newName] = struct{}{} + return newName, counter + 1 + } + counter++ + } +} + +// GetBindvars returns a map of the bind vars referenced in the statement. +// TODO(sougou); This function gets called again from vtgate/planbuilder. +// Ideally, this should be done only once. +func GetBindvars(stmt Statement) map[string]struct{} { + bindvars := make(map[string]struct{}) + _ = Walk(func(node SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + case *SQLVal: + if node.Type == ValArg { + bindvars[string(node.Val[1:])] = struct{}{} + } + case ListArg: + bindvars[string(node[2:])] = struct{}{} + } + return true, nil + }, stmt) + return bindvars +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/normalizer_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/normalizer_test.go new file mode 100644 index 00000000..d6ce075b --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/normalizer_test.go @@ -0,0 +1,186 @@ +// Copyright 2016, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlparser + +import ( + "reflect" + "testing" + + querypb "github.com/xelabs/go-mysqlstack/sqlparser/depends/query" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +func TestNormalize(t *testing.T) { + prefix := "bv" + testcases := []struct { + in string + outstmt string + outbv map[string]interface{} + }{{ + // str val + in: "select * from t where v1 = 'aa'", + outstmt: "select * from t where v1 = :bv1", + outbv: map[string]interface{}{ + "bv1": &querypb.BindVariable{ + Type: sqltypes.VarBinary, + Value: []byte("aa"), + }, + }, + }, { + // int val + in: "select * from t where v1 = 1", + outstmt: "select * from t where v1 = :bv1", + outbv: map[string]interface{}{ + "bv1": &querypb.BindVariable{ + Type: sqltypes.Int64, + Value: []byte("1"), + }, + }, + }, { + // float val + in: "select * from t where v1 = 1.2", + outstmt: "select * from t where v1 = :bv1", + outbv: map[string]interface{}{ + "bv1": &querypb.BindVariable{ + Type: sqltypes.Float64, + Value: []byte("1.2"), + }, + }, + }, { + // multiple vals + in: "select * from t where v1 = 1.2 and v2 = 2", + outstmt: "select * from t where v1 = :bv1 and v2 = :bv2", + outbv: map[string]interface{}{ + "bv1": &querypb.BindVariable{ + Type: sqltypes.Float64, + Value: []byte("1.2"), + }, + "bv2": &querypb.BindVariable{ + Type: sqltypes.Int64, + Value: []byte("2"), + }, + }, + }, { + // bv collision + in: "select * from t where v1 = :bv1 and v2 = 1", + outstmt: "select * from t where v1 = :bv1 and v2 = :bv2", + outbv: map[string]interface{}{ + "bv2": &querypb.BindVariable{ + Type: sqltypes.Int64, + Value: []byte("1"), + }, + }, + }, { + // val reuse + in: "select * from t where v1 = 1 and v2 = 1", + outstmt: "select * from t where v1 = :bv1 and v2 = :bv1", + outbv: map[string]interface{}{ + "bv1": &querypb.BindVariable{ + Type: sqltypes.Int64, + Value: []byte("1"), + }, + }, + }, { + // ints and strings are different + in: "select * from t where v1 = 1 and v2 = '1'", + outstmt: "select * from t where v1 = :bv1 and v2 = :bv2", + outbv: map[string]interface{}{ + "bv1": &querypb.BindVariable{ + Type: sqltypes.Int64, + Value: []byte("1"), + }, + "bv2": &querypb.BindVariable{ + Type: sqltypes.VarBinary, + Value: []byte("1"), + }, + }, + }, { + // comparison with no vals + in: "select * from t where v1 = v2", + outstmt: "select * from t where v1 = v2", + outbv: map[string]interface{}{}, + }, { + // IN clause with existing bv + in: "select * from t where v1 in ::list", + outstmt: "select * from t where v1 in ::list", + outbv: map[string]interface{}{}, + }, { + // IN clause with non-val values + in: "select * from t where v1 in (1, a)", + outstmt: "select * from t where v1 in (:bv1, a)", + outbv: map[string]interface{}{ + "bv1": &querypb.BindVariable{ + Type: sqltypes.Int64, + Value: []byte("1"), + }, + }, + }, { + // IN clause with vals + in: "select * from t where v1 in (1, '2')", + outstmt: "select * from t where v1 in ::bv1", + outbv: map[string]interface{}{ + "bv1": &querypb.BindVariable{ + Type: sqltypes.Tuple, + Values: []*querypb.Value{{ + Type: sqltypes.Int64, + Value: []byte("1"), + }, { + Type: sqltypes.VarBinary, + Value: []byte("2"), + }}, + }, + }, + }, { + // NOT IN clause + in: "select * from t where v1 not in (1, '2')", + outstmt: "select * from t where v1 not in ::bv1", + outbv: map[string]interface{}{ + "bv1": &querypb.BindVariable{ + Type: sqltypes.Tuple, + Values: []*querypb.Value{{ + Type: sqltypes.Int64, + Value: []byte("1"), + }, { + Type: sqltypes.VarBinary, + Value: []byte("2"), + }}, + }, + }, + }} + for _, tc := range testcases { + stmt, err := Parse(tc.in) + if err != nil { + t.Error(err) + continue + } + bv := make(map[string]interface{}) + Normalize(stmt, bv, prefix) + outstmt := String(stmt) + if outstmt != tc.outstmt { + t.Errorf("Query:\n%s:\n%s, want\n%s", tc.in, outstmt, tc.outstmt) + } + if !reflect.DeepEqual(tc.outbv, bv) { + t.Errorf("Query:\n%s:\n%v, want\n%v", tc.in, bv, tc.outbv) + } + } +} + +func TestGetBindVars(t *testing.T) { + stmt, err := Parse("select * from t where :v1 = :v2 and :v2 = :v3 and :v4 in ::v5") + if err != nil { + t.Fatal(err) + } + got := GetBindvars(stmt) + want := map[string]struct{}{ + "v1": {}, + "v2": {}, + "v3": {}, + "v4": {}, + "v5": {}, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("GetBindVars: %v, want %v", got, want) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/parse_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/parse_test.go new file mode 100644 index 00000000..c961040e --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/parse_test.go @@ -0,0 +1,1314 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import "strings" +import "testing" + +func TestValid(t *testing.T) { + validSQL := []struct { + input string + output string + }{{ + input: "select 1", + output: "select 1 from dual", + }, { + input: "select 1 from t", + }, { + input: "select .1 from t", + }, { + input: "select 1.2e1 from t", + }, { + input: "select 1.2e+1 from t", + }, { + input: "select 1.2e-1 from t", + }, { + input: "select 08.3 from t", + }, { + input: "select -1 from t where b = -2", + }, { + input: "select - -1 from t", + output: "select 1 from t", + }, { + input: "select 1 from t // aa", + output: "select 1 from t", + }, { + input: "select 1 from t -- aa", + output: "select 1 from t", + }, { + input: "select 1 from t # aa", + output: "select 1 from t", + }, { + input: "select 1 --aa\nfrom t", + output: "select 1 from t", + }, { + input: "select 1 #aa\nfrom t", + output: "select 1 from t", + }, { + input: "select /* simplest */ 1 from t", + }, { + input: "select /* double star **/ 1 from t", + }, { + input: "select /* double */ /* comment */ 1 from t", + }, { + input: "select /* back-quote keyword */ `By` from t", + }, { + input: "select /* back-quote num */ `2a` from t", + }, { + input: "select /* back-quote . */ `a.b` from t", + }, { + input: "select /* back-quote back-quote */ `a``b` from t", + }, { + input: "select /* back-quote unnecessary */ 1 from `t`", + output: "select /* back-quote unnecessary */ 1 from t", + }, { + input: "select /* back-quote idnum */ 1 from `a1`", + output: "select /* back-quote idnum */ 1 from a1", + }, { + input: "select /* @ */ @@a from b", + }, { + input: "select /* \\0 */ '\\0' from a", + }, { + input: "select 1 /* drop this comment */ from t", + output: "select 1 from t", + }, { + input: "select /* union */ 1 from t union select 1 from t", + }, { + input: "select /* double union */ 1 from t union select 1 from t union select 1 from t", + }, { + input: "select /* union all */ 1 from t union all select 1 from t", + }, { + input: "select /* union distinct */ 1 from t union distinct select 1 from t", + }, { + input: "(select /* union parenthesized select */ 1 from t order by a) union select 1 from t", + output: "(select /* union parenthesized select */ 1 from t order by a asc) union select 1 from t", + }, { + input: "select /* union parenthesized select 2 */ 1 from t union (select 1 from t)", + }, { + input: "select /* union order by */ 1 from t union select 1 from t order by a", + output: "select /* union order by */ 1 from t union select 1 from t order by a asc", + }, { + input: "select /* union order by limit lock */ 1 from t union select 1 from t order by a limit 1 for update", + output: "select /* union order by limit lock */ 1 from t union select 1 from t order by a asc limit 1 for update", + }, { + input: "select /* union with limit on lhs */ 1 from t limit 1 union select 1 from t", + }, { + input: "(select id, a from t order by id limit 1) union (select id, b as a from s order by id limit 1) order by a limit 1", + output: "(select id, a from t order by id asc limit 1) union (select id, b as a from s order by id asc limit 1) order by a asc limit 1", + }, { + input: "select a from (select 1 as a from tbl1 union select 2 from tbl2) as t", + }, { + input: "select * from t1 join (select * from t2 union select * from t3) as t", + }, { + input: "select * from t1 where col in (select 1 from dual union select 2 from dual)", + }, { + input: "select * from t1 where exists (select a from t2 union select b from t3)", + }, { + input: "select /* distinct */ distinct 1 from t", + }, { + input: "select /* straight_join */ straight_join 1 from t", + }, { + input: "select /* for update */ 1 from t for update", + }, { + input: "select /* lock in share mode */ 1 from t lock in share mode", + }, { + input: "select /* select list */ 1, 2 from t", + }, { + input: "select /* * */ * from t", + }, { + input: "select /* a.* */ a.* from t", + }, { + input: "select /* a.b.* */ a.b.* from t", + }, { + input: "select /* column alias */ a b from t", + output: "select /* column alias */ a as b from t", + }, { + input: "select /* column alias with as */ a as b from t", + }, { + input: "select /* keyword column alias */ a as `By` from t", + }, { + input: "select /* column alias as string */ a as \"b\" from t", + output: "select /* column alias as string */ a as b from t", + }, { + input: "select /* column alias as string without as */ a \"b\" from t", + output: "select /* column alias as string without as */ a as b from t", + }, { + input: "select /* a.* */ a.* from t", + }, { + input: "select next value for t", + output: "select next 1 values from t", + }, { + input: "select next value from t", + output: "select next 1 values from t", + }, { + input: "select next 10 values from t", + }, { + input: "select next :a values from t", + }, { + input: "select /* `By`.* */ `By`.* from t", + }, { + input: "select /* select with bool expr */ a = b from t", + }, { + input: "select /* case_when */ case when a = b then c end from t", + }, { + input: "select /* case_when_else */ case when a = b then c else d end from t", + }, { + input: "select /* case_when_when_else */ case when a = b then c when b = d then d else d end from t", + }, { + input: "select /* case */ case aa when a = b then c end from t", + }, { + input: "select /* parenthesis */ 1 from (t)", + }, { + input: "select /* parenthesis multi-table */ 1 from (t1, t2)", + }, { + input: "select /* table list */ 1 from t1, t2", + }, { + input: "select /* parenthessis in table list 1 */ 1 from (t1), t2", + }, { + input: "select /* parenthessis in table list 2 */ 1 from t1, (t2)", + }, { + input: "select /* use */ 1 from t1 use index (a) where b = 1", + }, { + input: "select /* keyword index */ 1 from t1 use index (`By`) where b = 1", + }, { + input: "select /* use */ 1 from t1 as t2 use index (a), t3 use index (b) where b = 1", + }, { + input: "select /* force */ 1 from t1 as t2 force index (a), t3 force index (b) where b = 1", + }, { + input: "select /* table alias */ 1 from t t1", + output: "select /* table alias */ 1 from t as t1", + }, { + input: "select /* table alias with as */ 1 from t as t1", + }, { + input: "select /* string table alias */ 1 from t as 't1'", + output: "select /* string table alias */ 1 from t as t1", + }, { + input: "select /* string table alias without as */ 1 from t 't1'", + output: "select /* string table alias without as */ 1 from t as t1", + }, { + input: "select /* keyword table alias */ 1 from t as `By`", + }, { + input: "select /* join */ 1 from t1 join t2", + }, { + input: "select /* join on */ 1 from t1 join t2 on a = b", + }, { + input: "select /* inner join */ 1 from t1 inner join t2", + output: "select /* inner join */ 1 from t1 join t2", + }, { + input: "select /* cross join */ 1 from t1 cross join t2", + output: "select /* cross join */ 1 from t1 join t2", + }, { + input: "select /* straight_join */ 1 from t1 straight_join t2", + }, { + input: "select /* straight_join on */ 1 from t1 straight_join t2 on a = b", + }, { + input: "select /* left join */ 1 from t1 left join t2 on a = b", + }, { + input: "select /* left outer join */ 1 from t1 left outer join t2 on a = b", + output: "select /* left outer join */ 1 from t1 left join t2 on a = b", + }, { + input: "select /* right join */ 1 from t1 right join t2 on a = b", + }, { + input: "select /* right outer join */ 1 from t1 right outer join t2 on a = b", + output: "select /* right outer join */ 1 from t1 right join t2 on a = b", + }, { + input: "select /* natural join */ 1 from t1 natural join t2", + }, { + input: "select /* natural left join */ 1 from t1 natural left join t2", + }, { + input: "select /* natural left outer join */ 1 from t1 natural left join t2", + output: "select /* natural left outer join */ 1 from t1 natural left join t2", + }, { + input: "select /* natural right join */ 1 from t1 natural right join t2", + }, { + input: "select /* natural right outer join */ 1 from t1 natural right join t2", + output: "select /* natural right outer join */ 1 from t1 natural right join t2", + }, { + input: "select /* join on */ 1 from t1 join t2 on a = b", + }, { + input: "select /* s.t */ 1 from s.t", + }, { + input: "select /* keyword schema & table name */ 1 from `By`.`bY`", + }, { + input: "select /* select in from */ 1 from (select 1 from t) as a", + }, { + input: "select /* select in from with no as */ 1 from (select 1 from t) a", + output: "select /* select in from with no as */ 1 from (select 1 from t) as a", + }, { + input: "select /* where */ 1 from t where a = b", + }, { + input: "select /* and */ 1 from t where a = b and a = c", + }, { + input: "select /* && */ 1 from t where a = b && a = c", + output: "select /* && */ 1 from t where a = b and a = c", + }, { + input: "select /* or */ 1 from t where a = b or a = c", + }, { + input: "select /* || */ 1 from t where a = b || a = c", + output: "select /* || */ 1 from t where a = b or a = c", + }, { + input: "select /* not */ 1 from t where not a = b", + }, { + input: "select /* ! */ 1 from t where a = !1", + }, { + input: "select /* bool is */ 1 from t where a = b is null", + }, { + input: "select /* bool is not */ 1 from t where a = b is not false", + }, { + input: "select /* true */ 1 from t where true", + }, { + input: "select /* false */ 1 from t where false", + }, { + input: "select /* false on left */ 1 from t where false = 0", + }, { + input: "select /* exists */ 1 from t where exists (select 1 from t)", + }, { + input: "select /* (boolean) */ 1 from t where not (a = b)", + }, { + input: "select /* in value list */ 1 from t where a in (b, c)", + }, { + input: "select /* in select */ 1 from t where a in (select 1 from t)", + }, { + input: "select /* not in */ 1 from t where a not in (b, c)", + }, { + input: "select /* like */ 1 from t where a like b", + }, { + input: "select /* like escape */ 1 from t where a like b escape '!'", + }, { + input: "select /* not like */ 1 from t where a not like b", + }, { + input: "select /* not like escape */ 1 from t where a not like b escape '$'", + }, { + input: "select /* regexp */ 1 from t where a regexp b", + }, { + input: "select /* not regexp */ 1 from t where a not regexp b", + }, { + input: "select /* rlike */ 1 from t where a rlike b", + output: "select /* rlike */ 1 from t where a regexp b", + }, { + input: "select /* not rlike */ 1 from t where a not rlike b", + output: "select /* not rlike */ 1 from t where a not regexp b", + }, { + input: "select /* between */ 1 from t where a between b and c", + }, { + input: "select /* not between */ 1 from t where a not between b and c", + }, { + input: "select /* is null */ 1 from t where a is null", + }, { + input: "select /* is not null */ 1 from t where a is not null", + }, { + input: "select /* is true */ 1 from t where a is true", + }, { + input: "select /* is not true */ 1 from t where a is not true", + }, { + input: "select /* is false */ 1 from t where a is false", + }, { + input: "select /* is not false */ 1 from t where a is not false", + }, { + input: "select /* < */ 1 from t where a < b", + }, { + input: "select /* <= */ 1 from t where a <= b", + }, { + input: "select /* >= */ 1 from t where a >= b", + }, { + input: "select /* > */ 1 from t where a > b", + }, { + input: "select /* != */ 1 from t where a != b", + }, { + input: "select /* <> */ 1 from t where a <> b", + output: "select /* <> */ 1 from t where a != b", + }, { + input: "select /* <=> */ 1 from t where a <=> b", + }, { + input: "select /* != */ 1 from t where a != b", + }, { + input: "select /* single value expre list */ 1 from t where a in (b)", + }, { + input: "select /* select as a value expression */ 1 from t where a = (select a from t)", + }, { + input: "select /* parenthesised value */ 1 from t where a = (b)", + }, { + input: "select /* over-parenthesize */ ((1)) from t where ((a)) in (((1))) and ((a, b)) in ((((1, 1))), ((2, 2)))", + }, { + input: "select /* dot-parenthesize */ (a.b) from t where (b.c) = 2", + }, { + input: "select /* & */ 1 from t where a = b & c", + }, { + input: "select /* & */ 1 from t where a = b & c", + }, { + input: "select /* | */ 1 from t where a = b | c", + }, { + input: "select /* ^ */ 1 from t where a = b ^ c", + }, { + input: "select /* + */ 1 from t where a = b + c", + }, { + input: "select /* - */ 1 from t where a = b - c", + }, { + input: "select /* * */ 1 from t where a = b * c", + }, { + input: "select /* / */ 1 from t where a = b / c", + }, { + input: "select /* % */ 1 from t where a = b % c", + }, { + input: "select /* div */ 1 from t where a = b div c", + }, { + input: "select /* MOD */ 1 from t where a = b MOD c", + output: "select /* MOD */ 1 from t where a = b % c", + }, { + input: "select /* << */ 1 from t where a = b << c", + }, { + input: "select /* >> */ 1 from t where a = b >> c", + }, { + input: "select /* % no space */ 1 from t where a = b%c", + output: "select /* % no space */ 1 from t where a = b % c", + }, { + input: "select /* u+ */ 1 from t where a = +b", + }, { + input: "select /* u- */ 1 from t where a = -b", + }, { + input: "select /* u~ */ 1 from t where a = ~b", + }, { + input: "select /* -> */ a.b -> 'ab' from t", + }, { + input: "select /* -> */ a.b ->> 'ab' from t", + }, { + input: "select /* empty function */ 1 from t where a = b()", + }, { + input: "select /* function with 1 param */ 1 from t where a = b(c)", + }, { + input: "select /* function with many params */ 1 from t where a = b(c, d)", + }, { + input: "select /* function with distinct */ count(distinct a) from t", + }, { + input: "select /* if as func */ 1 from t where a = if(b)", + }, { + input: "select /* current_timestamp as func */ current_timestamp() from t", + }, { + input: "select /* mod as func */ a from tab where mod(b, 2) = 0", + }, { + input: "select /* database as func no param */ database() from t", + }, { + input: "select /* database as func 1 param */ database(1) from t", + }, { + input: "select /* a */ a from t", + }, { + input: "select /* a.b */ a.b from t", + }, { + input: "select /* a.b.c */ a.b.c from t", + }, { + input: "select /* keyword a.b */ `By`.`bY` from t", + }, { + input: "select /* string */ 'a' from t", + }, { + input: "select /* double quoted string */ \"a\" from t", + output: "select /* double quoted string */ 'a' from t", + }, { + input: "select /* quote quote in string */ 'a''a' from t", + output: "select /* quote quote in string */ 'a\\'a' from t", + }, { + input: "select /* double quote quote in string */ \"a\"\"a\" from t", + output: "select /* double quote quote in string */ 'a\\\"a' from t", + }, { + input: "select /* quote in double quoted string */ \"a'a\" from t", + output: "select /* quote in double quoted string */ 'a\\'a' from t", + }, { + input: "select /* backslash quote in string */ 'a\\'a' from t", + }, { + input: "select /* literal backslash in string */ 'a\\\\na' from t", + }, { + input: "select /* all escapes */ '\\0\\'\\\"\\b\\n\\r\\t\\Z\\\\' from t", + }, { + input: "select /* non-escape */ '\\x' from t", + output: "select /* non-escape */ 'x' from t", + }, { + input: "select /* unescaped backslash */ '\\n' from t", + }, { + input: "select /* value argument */ :a from t", + }, { + input: "select /* value argument with digit */ :a1 from t", + }, { + input: "select /* value argument with dot */ :a.b from t", + }, { + input: "select /* positional argument */ ? from t", + output: "select /* positional argument */ :v1 from t", + }, { + input: "select /* multiple positional arguments */ ?, ? from t", + output: "select /* multiple positional arguments */ :v1, :v2 from t", + }, { + input: "select /* list arg */ * from t where a in ::list", + }, { + input: "select /* list arg not in */ * from t where a not in ::list", + }, { + input: "select /* null */ null from t", + }, { + input: "select /* octal */ 010 from t", + }, { + input: "select /* hex */ x'f0A1' from t", + output: "select /* hex */ X'f0A1' from t", + }, { + input: "select /* hex caps */ X'F0a1' from t", + }, { + input: "select /* 0x */ 0xf0 from t", + }, { + input: "select /* float */ 0.1 from t", + }, { + input: "select /* group by */ 1 from t group by a", + }, { + input: "select /* having */ 1 from t having a = b", + }, { + input: "select /* simple order by */ 1 from t order by a", + output: "select /* simple order by */ 1 from t order by a asc", + }, { + input: "select /* order by asc */ 1 from t order by a asc", + }, { + input: "select /* order by desc */ 1 from t order by a desc", + }, { + input: "select /* order by null */ 1 from t order by null", + }, { + input: "select /* limit a */ 1 from t limit a", + }, { + input: "select /* limit a,b */ 1 from t limit a, b", + }, { + input: "select /* binary unary */ a- -b from t", + output: "select /* binary unary */ a - -b from t", + }, { + input: "select /* - - */ - -b from t", + }, { + input: "select /* binary binary */ binary binary b from t", + }, { + input: "select /* binary ~ */ binary ~b from t", + }, { + input: "select /* ~ binary */ ~ binary b from t", + }, { + input: "select /* interval */ adddate('2008-01-02', interval 31 day) from t", + }, { + input: "select /* dual */ 1 from dual", + }, { + input: "select /* Dual */ 1 from Dual", + output: "select /* Dual */ 1 from dual", + }, { + input: "select /* DUAL */ 1 from Dual", + output: "select /* DUAL */ 1 from dual", + }, { + input: "select /* column as bool in where */ a from t where b", + }, { + input: "select /* OR of columns in where */ * from t where a or b", + }, { + input: "select /* OR of mixed columns in where */ * from t where a = 5 or b and c is not null", + }, { + input: "select /* OR in select columns */ (a or b) from t where c = 5", + }, { + input: "select /* bool as select value */ a, true from t", + }, { + input: "select /* bool column in ON clause */ * from t join s on t.id = s.id and s.foo where t.bar", + }, { + input: "select /* bool in order by */ * from t order by a is null or b asc", + }, { + input: "select /* string in case statement */ if(max(case a when 'foo' then 1 else 0 end) = 1, 'foo', 'bar') as foobar from t", + }, { + input: "select /* dual */ 1 from dual", + }, { + input: "select /* dual */ 1 from dual", + }, { + input: "insert /* simple */ into a values (1)", + }, { + input: "insert into a values (rand())", + }, { + input: "insert /* a.b */ into a.b values (1)", + }, { + input: "insert /* multi-value */ into a values (1, 2)", + }, { + input: "insert /* multi-value list */ into a values (1, 2), (3, 4)", + }, { + input: "insert /* no values */ into a values ()", + }, { + input: "insert /* set */ into a set a = 1, b = 2", + output: "insert /* set */ into a(a, b) values (1, 2)", + }, { + input: "insert /* set default */ into a set a = default, b = 2", + output: "insert /* set default */ into a(a, b) values (default, 2)", + }, { + input: "insert /* value expression list */ into a values (a + 1, 2 * 3)", + }, { + input: "insert /* default */ into a values (default, 2 * 3)", + }, { + input: "insert /* column list */ into a(a, b) values (1, 2)", + }, { + input: "insert into a(a, b) values (1, ifnull(null, default(b)))", + }, { + input: "insert /* qualified column list */ into a(a, b) values (1, 2)", + }, { + input: "insert /* qualified columns */ into t (t.a, t.b) values (1, 2)", + output: "insert /* qualified columns */ into t(a, b) values (1, 2)", + }, { + input: "insert /* select */ into a select b, c from d", + }, { + input: "insert /* no cols & paren select */ into a(select * from t)", + output: "insert /* no cols & paren select */ into a select * from t", + }, { + input: "insert /* cols & paren select */ into a(a,b,c) (select * from t)", + output: "insert /* cols & paren select */ into a(a, b, c) select * from t", + }, { + input: "insert /* cols & union with paren select */ into a(b, c) (select d, e from f) union (select g from h)", + }, { + input: "insert /* on duplicate */ into a values (1, 2) on duplicate key update b = func(a), c = d", + }, { + input: "insert /* bool in insert value */ into a values (1, true, false)", + }, { + input: "insert /* bool in on duplicate */ into a values (1, 2) on duplicate key update b = false, c = d", + }, { + input: "insert /* bool expression on duplicate */ into a values (1, 2) on duplicate key update b = func(a), c = a > d", + }, { + input: "update /* simple */ a set b = 3", + }, { + input: "update /* a.b */ a.b set b = 3", + }, { + input: "update /* list */ a set b = 3, c = 4", + }, { + input: "update /* expression */ a set b = 3 + 4", + }, { + input: "update /* where */ a set b = 3 where a = b", + }, { + input: "update /* order */ a set b = 3 order by c desc", + }, { + input: "update /* limit */ a set b = 3 limit c", + }, { + input: "update /* bool in update */ a set b = true", + }, { + input: "update /* bool expr in update */ a set b = 5 > 2", + }, { + input: "update /* bool in update where */ a set b = 5 where c", + }, { + input: "update /* table qualifier */ a set a.b = 3", + }, { + input: "update /* table qualifier */ a set t.a.b = 3", + }, { + input: "delete /* simple */ from a", + }, { + input: "delete /* a.b */ from a.b", + }, { + input: "delete /* where */ from a where a = b", + }, { + input: "delete /* order */ from a order by b desc", + }, { + input: "delete /* limit */ from a limit b", + }, { + input: "alter table a alter foo", + output: "alter table a", + }, { + input: "alter table a change foo", + output: "alter table a", + }, { + input: "alter table a rename index foo to bar", + output: "alter table a", + }, { + input: "alter table a rename key foo to bar", + output: "alter table a", + }, { + input: "alter table e auto_increment = 20", + output: "alter table e", + }, { + input: "alter table e character set = 'ascii'", + output: "alter table e", + }, { + input: "alter table e default character set = 'ascii'", + output: "alter table e", + }, { + input: "alter table e comment = 'hello'", + output: "alter table e", + }, { + input: "create table a (\n\t`a` int\n)", + }, { + input: "create table `by` (\n\t`by` char\n)", + }, { + input: "create table if not exists a (\n\t`a` int\n)", + output: "create table if not exists a (\n\t`a` int\n)", + }, { + input: "create index a on b", + output: "create index a on b", + }, { + input: "drop table a", + output: "drop table a", + }, { + input: "drop table if exists a", + output: "drop table if exists a", + }, { + input: "drop index b on a", + output: "drop index b on a", + }, { + input: "analyze table a", + output: "alter table a", + }, { + input: "show databases", + output: "show databases", + }, { + input: "show tables", + output: "show tables", + }, { + input: "show foobar", + output: "show unsupported", + }, { + input: "use db", + output: "use db", + }, { + input: "use duplicate", + output: "use `duplicate`", + }, { + input: "use `ks:-80@master`", + output: "use `ks:-80@master`", + }, { + input: "describe foobar", + output: "otherread", + }, { + input: "desc foobar", + output: "otherread", + }, { + input: "truncate table foo", + output: "truncate table foo", + }, { + input: "repair foo", + output: "otheradmin", + }, { + input: "optimize foo", + output: "otheradmin", + }, { + input: "select /* EQ true */ 1 from t where a = true", + }, { + input: "select /* EQ false */ 1 from t where a = false", + }, { + input: "select /* NE true */ 1 from t where a != true", + }, { + input: "select /* NE false */ 1 from t where a != false", + }, { + input: "select /* LT true */ 1 from t where a < true", + }, { + input: "select /* LT false */ 1 from t where a < false", + }, { + input: "select /* GT true */ 1 from t where a > true", + }, { + input: "select /* GT false */ 1 from t where a > false", + }, { + input: "select /* LE true */ 1 from t where a <= true", + }, { + input: "select /* LE false */ 1 from t where a <= false", + }, { + input: "select /* GE true */ 1 from t where a >= true", + }, { + input: "select /* GE false */ 1 from t where a >= false", + }, { + input: "select * from t order by a collate utf8_general_ci", + output: "select * from t order by a collate utf8_general_ci asc", + }, { + input: "select k collate latin1_german2_ci as k1 from t1 order by k1 asc", + }, { + input: "select * from t group by a collate utf8_general_ci", + }, { + input: "select MAX(k collate latin1_german2_ci) from t1", + }, { + input: "select distinct k collate latin1_german2_ci from t1", + }, { + input: "select * from t1 where 'Müller' collate latin1_german2_ci = k", + }, { + input: "select * from t1 where k like 'Müller' collate latin1_german2_ci", + }, { + input: "select k from t1 group by k having k = 'Müller' collate latin1_german2_ci", + }, { + input: "select k from t1 join t2 order by a collate latin1_german2_ci asc, b collate latin1_german2_ci asc", + }, { + input: "select k collate 'latin1_german2_ci' as k1 from t1 order by k1 asc", + output: "select k collate latin1_german2_ci as k1 from t1 order by k1 asc", + }, { + input: "select /* drop trailing semicolon */ 1 from dual;", + output: "select /* drop trailing semicolon */ 1 from dual", + }, { + input: "select /* cache directive */ sql_no_cache 'foo' from t", + }, { + input: "select binary 'a' = 'A' from t", + }, { + input: "select match(a) against ('foo') from t", + }, { + input: "select match(a1, a2) against ('foo' in natural language mode with query expansion) from t", + }, { + input: "select title from video as v where match(v.title, v.tag) against ('DEMO' in boolean mode)", + }, { + input: "select name, group_concat(score) from t group by name", + }, { + input: "select name, group_concat(distinct id, score order by id desc separator ':') from t group by name", + }} + + for _, tcase := range validSQL { + if tcase.output == "" { + tcase.output = tcase.input + } + tree, err := Parse(tcase.input) + if err != nil { + t.Errorf("input: %s, err: %v", tcase.input, err) + continue + } + out := String(tree) + if out != tcase.output { + t.Errorf("out: %s, want %s", out, tcase.output) + } + // This test just exercises the tree walking functionality. + // There's no way automated way to verify that a node calls + // all its children. But we can examine code coverage and + // ensure that all WalkSubtree functions were called. + Walk(func(node SQLNode) (bool, error) { + return true, nil + }, tree) + } +} + +func TestCaseSensitivity(t *testing.T) { + validSQL := []struct { + input string + output string + }{{ + input: "create table A (\n\t`B` int\n)", + }, { + input: "create index b on A", + output: "create index b on A", + }, { + input: "alter table A foo", + output: "alter table A", + }, { + input: "alter table A convert", + output: "alter table A", + }, { + input: "drop table B", + output: "drop table B", + }, { + input: "drop table if exists B", + output: "drop table if exists B", + }, { + input: "drop index b on A", + output: "drop index b on A", + }, { + input: "select a from B", + }, { + input: "select A as B from C", + }, { + input: "select B.* from c", + }, { + input: "select B.A from c", + }, { + input: "select * from B as C", + }, { + input: "select * from A.B", + }, { + input: "update A set b = 1", + }, { + input: "update A.B set b = 1", + }, { + input: "select A() from b", + }, { + input: "select A(B, C) from b", + }, { + input: "select A(distinct B, C) from b", + }, { + // IF is an exception. It's always lower-cased. + input: "select IF(B, C) from b", + output: "select if(B, C) from b", + }, { + input: "select * from b use index (A)", + }, { + input: "insert into A(A, B) values (1, 2)", + }, { + input: "CREATE TABLE A (\n\t`A` int\n)", + output: "create table A (\n\t`A` int\n)", + }, { + input: "select /* lock in SHARE MODE */ 1 from t lock in SHARE MODE", + output: "select /* lock in SHARE MODE */ 1 from t lock in share mode", + }, { + input: "select next VALUE from t", + output: "select next 1 values from t", + }, { + input: "select /* use */ 1 from t1 use index (A) where b = 1", + }} + for _, tcase := range validSQL { + if tcase.output == "" { + tcase.output = tcase.input + } + tree, err := Parse(tcase.input) + if err != nil { + t.Errorf("input: %s, err: %v", tcase.input, err) + continue + } + out := String(tree) + if out != tcase.output { + t.Errorf("out: %s, want %s", out, tcase.output) + } + } +} + +func TestKeywords(t *testing.T) { + validSQL := []struct { + input string + output string + }{{ + input: "select current_timestamp", + output: "select current_timestamp() from dual", + }, { + input: "update t set a = current_timestamp()", + }, { + input: "select a, current_date from t", + output: "select a, current_date() from t", + }, { + input: "insert into t(a, b) values (current_date, current_date())", + output: "insert into t(a, b) values (current_date(), current_date())", + }, { + input: "select * from t where a > utc_timestmp()", + }, { + input: "update t set b = utc_timestamp + 5", + output: "update t set b = utc_timestamp() + 5", + }, { + input: "select utc_time, utc_date", + output: "select utc_time(), utc_date() from dual", + }, { + input: "select 1 from dual where localtime > utc_time", + output: "select 1 from dual where localtime() > utc_time()", + }, { + input: "update t set a = localtimestamp(), b = utc_timestamp", + output: "update t set a = localtimestamp(), b = utc_timestamp()", + }, { + input: "insert into t(a) values (unix_timestamp)", + }, { + input: "select replace(a, 'foo', 'bar') from t", + }, { + input: "update t set a = replace('1234', '2', '1')", + }, { + input: "insert into t(a, b) values ('foo', 'bar') on duplicate key update a = replace(hex('foo'), 'f', 'b')", + }, { + input: "update t set a = left('1234', 3)", + }, { + input: "select left(a, 5) from t", + }, { + input: "update t set d = adddate(date('2003-12-31 01:02:03'), interval 5 days)", + }, { + input: "insert into t(a, b) values (left('foo', 1), 'b')", + }, { + input: "insert /* qualified function */ into t(a, b) values (test.PI(), 'b')", + }, { + input: "select /* keyword in qualified id */ * from t join z on t.key = z.key", + output: "select /* keyword in qualified id */ * from t join z on t.`key` = z.`key`", + }, { + input: "select /* non-reserved keywords as unqualified cols */ date, view, offset from t", + output: "select /* non-reserved keywords as unqualified cols */ `date`, `view`, `offset` from t", + }, { + input: "select /* share and mode as cols */ share, mode from t where share = 'foo'", + output: "select /* share and mode as cols */ `share`, `mode` from t where `share` = 'foo'", + }, { + input: "select /* unused keywords as cols */ write, varying from t where trailing = 'foo'", + output: "select /* unused keywords as cols */ `write`, `varying` from t where `trailing` = 'foo'", + }} + + for _, tcase := range validSQL { + if tcase.output == "" { + tcase.output = tcase.input + } + tree, err := Parse(tcase.input) + if err != nil { + t.Errorf("input: %s, err: %v", tcase.input, err) + continue + } + out := String(tree) + if out != tcase.output { + t.Errorf("out: %s, want %s", out, tcase.output) + } + } +} + +func TestConvert(t *testing.T) { + validSQL := []struct { + input string + output string + }{{ + input: "select cast('abc' as date) from t", + output: "select convert('abc', date) from t", + }, { + input: "select convert('abc', binary(4)) from t", + }, { + input: "select convert('abc', binary) from t", + }, { + input: "select convert('abc', char character set binary) from t", + }, { + input: "select convert('abc', char(4) ascii) from t", + }, { + input: "select convert('abc', char unicode) from t", + }, { + input: "select convert('abc', char(4)) from t", + }, { + input: "select convert('abc', char) from t", + }, { + input: "select convert('abc', nchar(4)) from t", + }, { + input: "select convert('abc', nchar) from t", + }, { + input: "select convert('abc', signed) from t", + }, { + input: "select convert('abc', signed integer) from t", + output: "select convert('abc', signed) from t", + }, { + input: "select convert('abc', unsigned) from t", + }, { + input: "select convert('abc', unsigned integer) from t", + output: "select convert('abc', unsigned) from t", + }, { + input: "select convert('abc', decimal(3, 4)) from t", + }, { + input: "select convert('abc', decimal(4)) from t", + }, { + input: "select convert('abc', decimal) from t", + }, { + input: "select convert('abc', date) from t", + }, { + input: "select convert('abc', time(4)) from t", + }, { + input: "select convert('abc', time) from t", + }, { + input: "select convert('abc', datetime(9)) from t", + }, { + input: "select convert('abc', datetime) from t", + }, { + input: "select convert('abc', json) from t", + }, { + input: "select convert('abc' using ascii) from t", + }} + + for _, tcase := range validSQL { + if tcase.output == "" { + tcase.output = tcase.input + } + tree, err := Parse(tcase.input) + if err != nil { + t.Errorf("input: %s, err: %v", tcase.input, err) + continue + } + + // Walk. + Walk(func(node SQLNode) (bool, error) { + return true, nil + }, tree) + + out := String(tree) + if out != tcase.output { + t.Errorf("out: %s, want %s", out, tcase.output) + } + } + + invalidSQL := []struct { + input string + output string + }{{ + input: "select convert('abc' as date) from t", + output: "syntax error at position 24 near 'as'", + }, { + input: "select convert from t", + output: "syntax error at position 20 near 'from'", + }, { + input: "select cast('foo', decimal) from t", + output: "syntax error at position 19", + }, { + input: "select convert('abc', datetime(4+9)) from t", + output: "syntax error at position 34", + }, { + input: "select convert('abc', decimal(4+9)) from t", + output: "syntax error at position 33", + }} + + for _, tcase := range invalidSQL { + _, err := Parse(tcase.input) + if err == nil || err.Error() != tcase.output { + t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) + } + } +} + +func TestCreateTable(t *testing.T) { + validSQL := []string{ + "create table t (\n" + + " `id` int primary key,\n" + + " `name` varchar(10)\n" + + ")", + + // test all the data types and options + "create table t (\n" + + " `col_bit` bit,\n" + + " `col_tinyint` tinyint auto_increment,\n" + + " `col_tinyint3` tinyint(3) unsigned,\n" + + " `col_smallint` smallint,\n" + + " `col_smallint4` smallint(4) zerofill,\n" + + " `col_mediumint` mediumint,\n" + + " `col_mediumint5` mediumint(5) unsigned not null,\n" + + " `col_int` int,\n" + + " `col_int10` int(10) not null,\n" + + " `col_integer` integer comment 'this is an integer',\n" + + " `col_bigint` bigint,\n" + + " `col_bigint10` bigint(10) zerofill not null default 10,\n" + + " `col_real` real,\n" + + " `col_real2` real(1,2) not null default 1.23,\n" + + " `col_double` double,\n" + + " `col_double2` double(3,4) not null default 1.23,\n" + + " `col_float` float,\n" + + " `col_float2` float(3,4) not null default 1.23,\n" + + " `col_decimal` decimal,\n" + + " `col_decimal2` decimal(2),\n" + + " `col_decimal3` decimal(2,3),\n" + + " `col_numeric` numeric,\n" + + " `col_numeric2` numeric(2),\n" + + " `col_numeric3` numeric(2,3),\n" + + " `col_date` date,\n" + + " `col_time` time,\n" + + " `col_timestamp` timestamp,\n" + + " `col_datetime` datetime,\n" + + " `col_year` year,\n" + + " `col_char` char,\n" + + " `col_char2` char(2),\n" + + " `col_char3` char(3) character set ascii,\n" + + " `col_char4` char(4) character set ascii collate ascii_bin,\n" + + " `col_varchar` varchar,\n" + + " `col_varchar2` varchar(2),\n" + + " `col_varchar3` varchar(3) character set ascii,\n" + + " `col_varchar4` varchar(4) character set ascii collate ascii_bin,\n" + + " `col_binary` binary,\n" + + " `col_varbinary` varbinary(10),\n" + + " `col_tinyblob` tinyblob,\n" + + " `col_blob` blob,\n" + + " `col_mediumblob` mediumblob,\n" + + " `col_longblob` longblob,\n" + + " `col_tinytext` tinytext,\n" + + " `col_text` text,\n" + + " `col_mediumtext` mediumtext,\n" + + " `col_longtext` longtext,\n" + + " `col_text` text character set ascii collate ascii_bin,\n" + + " `col_json` json,\n" + + " `col_enum` enum('a', 'b', 'c', 'd')\n" + + ")", + + // test defaults + "create table t (\n" + + " `i1` int default 1,\n" + + " `i2` int default null,\n" + + " `f1` float default 1.23,\n" + + " `s1` varchar default 'c',\n" + + " `s2` varchar default 'this is a string',\n" + + " `s3` varchar default null\n" + + ")", + + // test key field options + "create table t (\n" + + " `id` int auto_increment primary key,\n" + + " `username` varchar unique key,\n" + + " `email` varchar unique,\n" + + " `full_name` varchar key\n" + + ")", + + // test defining indexes separately + "create table t (\n" + + " `id` int auto_increment,\n" + + " `username` varchar,\n" + + " `email` varchar,\n" + + " `full_name` varchar,\n" + + " `status` varchar,\n" + + " primary key (`id`),\n" + + " unique key `by_username` (`username`),\n" + + " unique `by_username2` (`username`),\n" + + " unique index `by_username3` (`username`),\n" + + " index `by_status` (`status`),\n" + + " key `by_full_name` (`full_name`)\n" + + ")", + + // multi-column indexes + "create table t (\n" + + " `id` int auto_increment,\n" + + " `username` varchar,\n" + + " `email` varchar,\n" + + " `full_name` varchar,\n" + + " `a` int,\n" + + " `b` int,\n" + + " `c` int,\n" + + " primary key (`id`, `username`),\n" + + " unique key `by_abc` (`a`, `b`, `c`),\n" + + " key `by_email` (`email`(10), `username`)\n" + + ")", + + // table options + "create table t (\n" + + " `id` int auto_increment\n" + + ") default charset=utf8mb4\n", + } + for _, sql := range validSQL { + sql = strings.TrimSpace(sql) + tree, err := Parse(sql) + if err != nil { + t.Errorf("input: %s, err: %v", sql, err) + continue + } + got := String(tree.(*DDL)) + + if sql != got { + t.Errorf("want:\n%s\ngot:\n%s", sql, got) + } + } +} + +func TestErrors(t *testing.T) { + invalidSQL := []struct { + input string + output string + }{{ + input: "select $ from t", + output: "syntax error at position 9 near '$'", + }, { + input: "select : from t", + output: "syntax error at position 9 near ':'", + }, { + input: "select 0xH from t", + output: "syntax error at position 10 near '0x'", + }, { + input: "select x'78 from t", + output: "syntax error at position 12 near '78'", + }, { + input: "select x'777' from t", + output: "syntax error at position 14 near '777'", + }, { + input: "select 'aa\\", + output: "syntax error at position 12 near 'aa'", + }, { + input: "select 'aa", + output: "syntax error at position 12 near 'aa'", + }, { + input: "select * from t where :1 = 2", + output: "syntax error at position 24 near ':'", + }, { + input: "select * from t where :. = 2", + output: "syntax error at position 24 near ':'", + }, { + input: "select * from t where ::1 = 2", + output: "syntax error at position 25 near '::'", + }, { + input: "select * from t where ::. = 2", + output: "syntax error at position 25 near '::'", + }, { + input: "update a set c = values(1)", + output: "syntax error at position 26 near '1'", + }, { + input: "select(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + + "(F(F(F(F(F(F(F(F(F(F(F(F(", + output: "max nesting level reached at position 406", + }, { + input: "select(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + + "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + + "(F(F(F(F(F(F(F(F(F(F(F(", + output: "syntax error at position 405", + }, { + input: "select /* aa", + output: "syntax error at position 13 near '/* aa'", + }, { + // This construct is considered invalid due to a grammar conflict. + input: "insert into a select * from b join c on duplicate key update d=e", + output: "syntax error at position 54 near 'key'", + }, { + input: "select * from a left join b", + output: "syntax error at position 29", + }, { + input: "select * from a natural join b on c = d", + output: "syntax error at position 34 near 'on'", + }, { + input: "select next id from a", + output: "expecting value after next at position 15 near 'id'", + }, { + input: "select next 1+1 values from a", + output: "syntax error at position 15", + }, { + input: "insert into a values (select * from b)", + output: "syntax error at position 29 near 'select'", + }, { + input: "select database", + output: "syntax error at position 17", + }, { + input: "select mod from t", + output: "syntax error at position 16 near 'from'", + }, { + input: "select 1 from t where div 5", + output: "syntax error at position 26 near 'div'", + }, { + input: "select 1 from t where binary", + output: "syntax error at position 30", + }, { + input: "select match(a1, a2) against ('foo' in boolean mode with query expansion) from t", + output: "syntax error at position 57 near 'with'", + }, { + input: "select /* reserved keyword as unqualified column */ * from t where key = 'test'", + output: "syntax error at position 71 near 'key'", + }, { + input: "select /* vitess-reserved keyword as unqualified column */ * from t where escape = 'test'", + output: "syntax error at position 81 near 'escape'", + }, { + input: "(select /* parenthesized select */ * from t)", + output: "syntax error at position 46", + }, { + input: "select * from t where id = ((select a from t1 union select b from t2) order by a limit 1)", + output: "syntax error at position 76 near 'order'", + }} + for _, tcase := range invalidSQL { + if tcase.output == "" { + tcase.output = tcase.input + } + _, err := Parse(tcase.input) + if err == nil || err.Error() != tcase.output { + t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) + } + } +} + +// Benchmark run on 6/23/17, prior to improvements: +// BenchmarkParse1-4 100000 16334 ns/op +// BenchmarkParse2-4 30000 44121 ns/op + +func BenchmarkParse1(b *testing.B) { + sql := "select 'abcd', 20, 30.0, eid from a where 1=eid and name='3'" + for i := 0; i < b.N; i++ { + ast, err := Parse(sql) + if err != nil { + b.Fatal(err) + } + _ = String(ast) + } +} + +func BenchmarkParse2(b *testing.B) { + sql := "select aaaa, bbb, ccc, ddd, eeee, ffff, gggg, hhhh, iiii from tttt, ttt1, ttt3 where aaaa = bbbb and bbbb = cccc and dddd+1 = eeee group by fff, gggg having hhhh = iiii and iiii = jjjj order by kkkk, llll limit 3, 4" + for i := 0; i < b.N; i++ { + ast, err := Parse(sql) + if err != nil { + b.Fatal(err) + } + _ = String(ast) + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/precedence_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/precedence_test.go new file mode 100644 index 00000000..f6a1c93f --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/precedence_test.go @@ -0,0 +1,112 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "fmt" + "testing" +) + +func readable(node Expr) string { + switch node := node.(type) { + case *OrExpr: + return fmt.Sprintf("(%s or %s)", readable(node.Left), readable(node.Right)) + case *AndExpr: + return fmt.Sprintf("(%s and %s)", readable(node.Left), readable(node.Right)) + case *BinaryExpr: + return fmt.Sprintf("(%s %s %s)", readable(node.Left), node.Operator, readable(node.Right)) + case *IsExpr: + return fmt.Sprintf("(%s %s)", readable(node.Expr), node.Operator) + default: + return String(node) + } +} + +func TestAndOrPrecedence(t *testing.T) { + validSQL := []struct { + input string + output string + }{{ + input: "select * from a where a=b and c=d or e=f", + output: "((a = b and c = d) or e = f)", + }, { + input: "select * from a where a=b or c=d and e=f", + output: "(a = b or (c = d and e = f))", + }} + for _, tcase := range validSQL { + tree, err := Parse(tcase.input) + if err != nil { + t.Error(err) + continue + } + expr := readable(tree.(*Select).Where.Expr) + if expr != tcase.output { + t.Errorf("Parse: \n%s, want: \n%s", expr, tcase.output) + } + } +} + +func TestPlusStarPrecedence(t *testing.T) { + validSQL := []struct { + input string + output string + }{{ + input: "select 1+2*3 from a", + output: "(1 + (2 * 3))", + }, { + input: "select 1*2+3 from a", + output: "((1 * 2) + 3)", + }} + for _, tcase := range validSQL { + tree, err := Parse(tcase.input) + if err != nil { + t.Error(err) + continue + } + expr := readable(tree.(*Select).SelectExprs[0].(*AliasedExpr).Expr) + if expr != tcase.output { + t.Errorf("Parse: \n%s, want: \n%s", expr, tcase.output) + } + } +} + +func TestIsPrecedence(t *testing.T) { + validSQL := []struct { + input string + output string + }{{ + input: "select * from a where a+b is true", + output: "((a + b) is true)", + }, { + input: "select * from a where a=1 and b=2 is true", + output: "(a = 1 and (b = 2 is true))", + }, { + input: "select * from a where (a=1 and b=2) is true", + output: "((a = 1 and b = 2) is true)", + }} + for _, tcase := range validSQL { + tree, err := Parse(tcase.input) + if err != nil { + t.Error(err) + continue + } + expr := readable(tree.(*Select).Where.Expr) + if expr != tcase.output { + t.Errorf("Parse: \n%s, want: \n%s", expr, tcase.output) + } + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/select_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/select_test.go new file mode 100644 index 00000000..f9614098 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/select_test.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import "strings" +import "testing" + +func TestSelect1(t *testing.T) { + validSQL := []struct { + input string + output string + }{ + { + input: "select * from xx", + output: "select * from xx", + }, + { + input: "select /*backup*/ * from xx", + output: "select /*backup*/ * from xx", + }, + { + input: "select /*backup*/ * from xx where id=1", + output: "select /*backup*/ * from xx where id = 1", + }, + } + + for _, sel := range validSQL { + sql := strings.TrimSpace(sel.input) + tree, err := Parse(sql) + if err != nil { + t.Errorf("input: %s, err: %v", sql, err) + continue + } + got := String(tree.(*Select)) + if sel.output != got { + t.Errorf("want:\n%s\ngot:\n%s", sel.output, got) + } + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/set_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/set_test.go new file mode 100644 index 00000000..8fafbac5 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/set_test.go @@ -0,0 +1,60 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import "strings" +import "testing" + +func TestSet(t *testing.T) { + validSQL := []struct { + input string + output string + }{ + { + input: "SET autocommit=0", + output: "set", + }, + + { + input: "SET SESSION wait_timeout = 2147483", + output: "set", + }, + { + input: "SET NAMES utf8", + output: "set", + }, + } + + for _, exp := range validSQL { + sql := strings.TrimSpace(exp.input) + tree, err := Parse(sql) + if err != nil { + t.Errorf("input: %s, err: %v", sql, err) + continue + } + + // Walk. + Walk(func(node SQLNode) (bool, error) { + return true, nil + }, tree) + + got := String(tree.(*Set)) + if exp.output != got { + t.Errorf("want:\n%s\ngot:\n%s", exp.output, got) + } + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/show_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/show_test.go new file mode 100644 index 00000000..467df1f4 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/show_test.go @@ -0,0 +1,127 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import "strings" +import "testing" + +func TestShow1(t *testing.T) { + validSQL := []struct { + input string + output string + }{ + { + input: "show create table t1", + output: "show create table t1", + }, + + { + input: "show tables", + output: "show tables", + }, + + { + input: "show tables from t1", + output: "show tables", + }, + + { + input: "show databases", + output: "show databases", + }, + + { + input: "show create database sbtest", + output: "show create database sbtest", + }, + + { + input: "show engines", + output: "show engines", + }, + + { + input: "show status", + output: "show status", + }, + + { + input: "show versions", + output: "show versions", + }, + + { + input: "show processlist", + output: "show processlist", + }, + + { + input: "show queryz", + output: "show queryz", + }, + + { + input: "show txnz", + output: "show txnz", + }, + + { + input: "show warnings", + output: "show warnings", + }, + + { + input: "show variables", + output: "show variables", + }, + { + input: "show binlog events", + output: "show binlog events", + }, + { + input: "show binlog events limit 10", + output: "show binlog events limit 10", + }, + { + input: "show binlog events from gtid '20171225083823'", + output: "show binlog events from gtid '20171225083823'", + }, + { + input: "show binlog events from gtid '20171225083823' limit 1", + output: "show binlog events from gtid '20171225083823' limit 1", + }, + } + + for _, show := range validSQL { + sql := strings.TrimSpace(show.input) + tree, err := Parse(sql) + if err != nil { + t.Errorf("input: %s, err: %v", sql, err) + continue + } + + // Walk. + Walk(func(node SQLNode) (bool, error) { + return true, nil + }, tree) + + got := String(tree.(*Show)) + if show.output != got { + t.Errorf("want:\n%s\ngot:\n%s", show.output, got) + } + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/sql.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/sql.go new file mode 100644 index 00000000..4ee2f4d3 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/sql.go @@ -0,0 +1,4632 @@ +//line sql.y:18 +package sqlparser + +import __yyfmt__ "fmt" + +//line sql.y:18 +func setParseTree(yylex interface{}, stmt Statement) { + yylex.(*Tokenizer).ParseTree = stmt +} + +func setAllowComments(yylex interface{}, allow bool) { + yylex.(*Tokenizer).AllowComments = allow +} + +func setDDL(yylex interface{}, ddl *DDL) { + yylex.(*Tokenizer).partialDDL = ddl +} + +func incNesting(yylex interface{}) bool { + yylex.(*Tokenizer).nesting++ + if yylex.(*Tokenizer).nesting == 200 { + return true + } + return false +} + +func decNesting(yylex interface{}) { + yylex.(*Tokenizer).nesting-- +} + +func forceEOF(yylex interface{}) { + yylex.(*Tokenizer).ForceEOF = true +} + +//line sql.y:50 +type yySymType struct { + yys int + empty struct{} + statement Statement + selStmt SelectStatement + ddl *DDL + ins *Insert + byt byte + bytes []byte + bytes2 [][]byte + str string + strs []string + selectExprs SelectExprs + selectExpr SelectExpr + columns Columns + colName *ColName + tableExprs TableExprs + tableExpr TableExpr + tableName TableName + indexHints *IndexHints + expr Expr + exprs Exprs + boolVal BoolVal + colTuple ColTuple + values Values + valTuple ValTuple + subquery *Subquery + whens []*When + when *When + orderBy OrderBy + order *Order + limit *Limit + updateExprs UpdateExprs + updateExpr *UpdateExpr + colIdent ColIdent + colIdents []ColIdent + tableIdent TableIdent + convertType *ConvertType + aliasedTableName *AliasedTableExpr + TableSpec *TableSpec + TableOptions TableOptions + columnType ColumnType + colKeyOpt ColumnKeyOption + optVal *SQLVal + LengthScaleOption LengthScaleOption + columnDefinition *ColumnDefinition + indexDefinition *IndexDefinition + indexInfo *IndexInfo + indexColumn *IndexColumn + indexColumns []*IndexColumn +} + +const LEX_ERROR = 57346 +const UNION = 57347 +const SELECT = 57348 +const INSERT = 57349 +const UPDATE = 57350 +const DELETE = 57351 +const FROM = 57352 +const WHERE = 57353 +const GROUP = 57354 +const HAVING = 57355 +const ORDER = 57356 +const BY = 57357 +const LIMIT = 57358 +const OFFSET = 57359 +const FOR = 57360 +const ALL = 57361 +const DISTINCT = 57362 +const AS = 57363 +const EXISTS = 57364 +const ASC = 57365 +const DESC = 57366 +const INTO = 57367 +const DUPLICATE = 57368 +const KEY = 57369 +const DEFAULT = 57370 +const SET = 57371 +const LOCK = 57372 +const VALUES = 57373 +const LAST_INSERT_ID = 57374 +const NEXT = 57375 +const VALUE = 57376 +const SHARE = 57377 +const MODE = 57378 +const SQL_NO_CACHE = 57379 +const SQL_CACHE = 57380 +const JOIN = 57381 +const STRAIGHT_JOIN = 57382 +const LEFT = 57383 +const RIGHT = 57384 +const INNER = 57385 +const OUTER = 57386 +const CROSS = 57387 +const NATURAL = 57388 +const USE = 57389 +const FORCE = 57390 +const ON = 57391 +const ID = 57392 +const HEX = 57393 +const STRING = 57394 +const INTEGRAL = 57395 +const FLOAT = 57396 +const HEXNUM = 57397 +const VALUE_ARG = 57398 +const LIST_ARG = 57399 +const COMMENT = 57400 +const COMMENT_KEYWORD = 57401 +const NULL = 57402 +const TRUE = 57403 +const FALSE = 57404 +const OR = 57405 +const AND = 57406 +const NOT = 57407 +const BETWEEN = 57408 +const CASE = 57409 +const WHEN = 57410 +const THEN = 57411 +const ELSE = 57412 +const END = 57413 +const LE = 57414 +const GE = 57415 +const NE = 57416 +const NULL_SAFE_EQUAL = 57417 +const IS = 57418 +const LIKE = 57419 +const REGEXP = 57420 +const IN = 57421 +const SHIFT_LEFT = 57422 +const SHIFT_RIGHT = 57423 +const DIV = 57424 +const MOD = 57425 +const UNARY = 57426 +const COLLATE = 57427 +const BINARY = 57428 +const INTERVAL = 57429 +const JSON_EXTRACT_OP = 57430 +const JSON_UNQUOTE_EXTRACT_OP = 57431 +const CREATE = 57432 +const ALTER = 57433 +const DROP = 57434 +const RENAME = 57435 +const ANALYZE = 57436 +const ADD = 57437 +const MODIFY = 57438 +const TABLE = 57439 +const INDEX = 57440 +const VIEW = 57441 +const TO = 57442 +const IGNORE = 57443 +const IF = 57444 +const UNIQUE = 57445 +const USING = 57446 +const PRIMARY = 57447 +const COLUMN = 57448 +const SHOW = 57449 +const DESCRIBE = 57450 +const EXPLAIN = 57451 +const DATE = 57452 +const ESCAPE = 57453 +const REPAIR = 57454 +const OPTIMIZE = 57455 +const TRUNCATE = 57456 +const BIT = 57457 +const TINYINT = 57458 +const SMALLINT = 57459 +const MEDIUMINT = 57460 +const INT = 57461 +const INTEGER = 57462 +const BIGINT = 57463 +const INTNUM = 57464 +const REAL = 57465 +const DOUBLE = 57466 +const FLOAT_TYPE = 57467 +const DECIMAL = 57468 +const NUMERIC = 57469 +const TIME = 57470 +const TIMESTAMP = 57471 +const DATETIME = 57472 +const YEAR = 57473 +const CHAR = 57474 +const VARCHAR = 57475 +const BOOL = 57476 +const CHARACTER = 57477 +const VARBINARY = 57478 +const NCHAR = 57479 +const CHARSET = 57480 +const TEXT = 57481 +const TINYTEXT = 57482 +const MEDIUMTEXT = 57483 +const LONGTEXT = 57484 +const BLOB = 57485 +const TINYBLOB = 57486 +const MEDIUMBLOB = 57487 +const LONGBLOB = 57488 +const JSON = 57489 +const ENUM = 57490 +const NULLX = 57491 +const AUTO_INCREMENT = 57492 +const APPROXNUM = 57493 +const SIGNED = 57494 +const UNSIGNED = 57495 +const ZEROFILL = 57496 +const DATABASES = 57497 +const TABLES = 57498 +const VITESS_KEYSPACES = 57499 +const VITESS_SHARDS = 57500 +const VSCHEMA_TABLES = 57501 +const WARNINGS = 57502 +const VARIABLES = 57503 +const EVENTS = 57504 +const BINLOG = 57505 +const GTID = 57506 +const CURRENT_TIMESTAMP = 57507 +const DATABASE = 57508 +const CURRENT_DATE = 57509 +const CURRENT_TIME = 57510 +const LOCALTIME = 57511 +const LOCALTIMESTAMP = 57512 +const UTC_DATE = 57513 +const UTC_TIME = 57514 +const UTC_TIMESTAMP = 57515 +const REPLACE = 57516 +const CONVERT = 57517 +const CAST = 57518 +const GROUP_CONCAT = 57519 +const SEPARATOR = 57520 +const MATCH = 57521 +const AGAINST = 57522 +const BOOLEAN = 57523 +const LANGUAGE = 57524 +const WITH = 57525 +const QUERY = 57526 +const EXPANSION = 57527 +const UNUSED = 57528 +const PARTITION = 57529 +const PARTITIONS = 57530 +const HASH = 57531 +const XA = 57532 +const ENGINES = 57533 +const STATUS = 57534 +const VERSIONS = 57535 +const PROCESSLIST = 57536 +const QUERYZ = 57537 +const TXNZ = 57538 +const KILL = 57539 +const START = 57540 +const TRANSACTION = 57541 +const COMMIT = 57542 +const SESSION = 57543 +const ENGINE = 57544 + +var yyToknames = [...]string{ + "$end", + "error", + "$unk", + "LEX_ERROR", + "UNION", + "SELECT", + "INSERT", + "UPDATE", + "DELETE", + "FROM", + "WHERE", + "GROUP", + "HAVING", + "ORDER", + "BY", + "LIMIT", + "OFFSET", + "FOR", + "ALL", + "DISTINCT", + "AS", + "EXISTS", + "ASC", + "DESC", + "INTO", + "DUPLICATE", + "KEY", + "DEFAULT", + "SET", + "LOCK", + "VALUES", + "LAST_INSERT_ID", + "NEXT", + "VALUE", + "SHARE", + "MODE", + "SQL_NO_CACHE", + "SQL_CACHE", + "JOIN", + "STRAIGHT_JOIN", + "LEFT", + "RIGHT", + "INNER", + "OUTER", + "CROSS", + "NATURAL", + "USE", + "FORCE", + "ON", + "'('", + "','", + "')'", + "ID", + "HEX", + "STRING", + "INTEGRAL", + "FLOAT", + "HEXNUM", + "VALUE_ARG", + "LIST_ARG", + "COMMENT", + "COMMENT_KEYWORD", + "NULL", + "TRUE", + "FALSE", + "OR", + "AND", + "NOT", + "'!'", + "BETWEEN", + "CASE", + "WHEN", + "THEN", + "ELSE", + "END", + "'='", + "'<'", + "'>'", + "LE", + "GE", + "NE", + "NULL_SAFE_EQUAL", + "IS", + "LIKE", + "REGEXP", + "IN", + "'|'", + "'&'", + "SHIFT_LEFT", + "SHIFT_RIGHT", + "'+'", + "'-'", + "'*'", + "'/'", + "DIV", + "'%'", + "MOD", + "'^'", + "'~'", + "UNARY", + "COLLATE", + "BINARY", + "INTERVAL", + "'.'", + "JSON_EXTRACT_OP", + "JSON_UNQUOTE_EXTRACT_OP", + "CREATE", + "ALTER", + "DROP", + "RENAME", + "ANALYZE", + "ADD", + "MODIFY", + "TABLE", + "INDEX", + "VIEW", + "TO", + "IGNORE", + "IF", + "UNIQUE", + "USING", + "PRIMARY", + "COLUMN", + "SHOW", + "DESCRIBE", + "EXPLAIN", + "DATE", + "ESCAPE", + "REPAIR", + "OPTIMIZE", + "TRUNCATE", + "BIT", + "TINYINT", + "SMALLINT", + "MEDIUMINT", + "INT", + "INTEGER", + "BIGINT", + "INTNUM", + "REAL", + "DOUBLE", + "FLOAT_TYPE", + "DECIMAL", + "NUMERIC", + "TIME", + "TIMESTAMP", + "DATETIME", + "YEAR", + "CHAR", + "VARCHAR", + "BOOL", + "CHARACTER", + "VARBINARY", + "NCHAR", + "CHARSET", + "TEXT", + "TINYTEXT", + "MEDIUMTEXT", + "LONGTEXT", + "BLOB", + "TINYBLOB", + "MEDIUMBLOB", + "LONGBLOB", + "JSON", + "ENUM", + "NULLX", + "AUTO_INCREMENT", + "APPROXNUM", + "SIGNED", + "UNSIGNED", + "ZEROFILL", + "DATABASES", + "TABLES", + "VITESS_KEYSPACES", + "VITESS_SHARDS", + "VSCHEMA_TABLES", + "WARNINGS", + "VARIABLES", + "EVENTS", + "BINLOG", + "GTID", + "CURRENT_TIMESTAMP", + "DATABASE", + "CURRENT_DATE", + "CURRENT_TIME", + "LOCALTIME", + "LOCALTIMESTAMP", + "UTC_DATE", + "UTC_TIME", + "UTC_TIMESTAMP", + "REPLACE", + "CONVERT", + "CAST", + "GROUP_CONCAT", + "SEPARATOR", + "MATCH", + "AGAINST", + "BOOLEAN", + "LANGUAGE", + "WITH", + "QUERY", + "EXPANSION", + "UNUSED", + "PARTITION", + "PARTITIONS", + "HASH", + "XA", + "ENGINES", + "STATUS", + "VERSIONS", + "PROCESSLIST", + "QUERYZ", + "TXNZ", + "KILL", + "START", + "TRANSACTION", + "COMMIT", + "SESSION", + "ENGINE", + "';'", +} +var yyStatenames = [...]string{} + +const yyEofCode = 1 +const yyErrCode = 2 +const yyInitialStackSize = 16 + +//line yacctab:1 +var yyExca = [...]int{ + -1, 1, + 1, -1, + -2, 0, + -1, 3, + 5, 25, + -2, 4, + -1, 343, + 104, 446, + -2, 442, + -1, 344, + 104, 447, + -2, 443, + -1, 507, + 5, 25, + -2, 399, + -1, 635, + 104, 449, + -2, 445, + -1, 748, + 5, 26, + -2, 278, + -1, 772, + 5, 26, + -2, 400, + -1, 855, + 5, 25, + -2, 402, + -1, 957, + 5, 26, + -2, 403, +} + +const yyPrivate = 57344 + +const yyLast = 5846 + +var yyAct = [...]int{ + + 344, 630, 985, 464, 846, 910, 297, 510, 321, 666, + 787, 558, 632, 845, 896, 243, 706, 667, 299, 825, + 520, 907, 740, 628, 545, 618, 732, 511, 60, 50, + 262, 663, 647, 531, 80, 232, 696, 633, 286, 346, + 595, 352, 295, 554, 252, 709, 284, 625, 49, 525, + 322, 44, 54, 236, 522, 260, 997, 984, 232, 319, + 996, 976, 994, 983, 975, 838, 890, 277, 59, 463, + 3, 232, 232, 67, 68, 583, 56, 57, 58, 79, + 281, 962, 430, 429, 439, 440, 432, 433, 434, 435, + 436, 437, 438, 431, 692, 44, 441, 538, 791, 22, + 45, 24, 25, 248, 233, 234, 64, 63, 237, 238, + 239, 240, 241, 681, 242, 476, 861, 40, 810, 546, + 885, 930, 26, 883, 826, 721, 720, 719, 952, 954, + 270, 265, 66, 575, 718, 972, 278, 533, 751, 971, + 34, 275, 69, 47, 279, 280, 714, 574, 533, 828, + 970, 268, 716, 418, 417, 868, 71, 70, 453, 454, + 917, 875, 775, 686, 746, 830, 539, 834, 744, 829, + 419, 827, 577, 676, 263, 62, 832, 462, 359, 697, + 431, 573, 602, 441, 800, 441, 831, 851, 416, 753, + 417, 833, 835, 418, 417, 963, 600, 601, 599, 953, + 28, 29, 30, 419, 32, 919, 419, 840, 752, 784, + 419, 717, 491, 492, 682, 675, 974, 33, 41, 36, + 546, 648, 42, 43, 31, 532, 269, 570, 568, 564, + 402, 567, 569, 801, 418, 417, 532, 418, 417, 690, + 715, 530, 713, 529, 842, 257, 289, 347, 869, 264, + 867, 419, 65, 922, 419, 418, 417, 232, 273, 274, + 232, 535, 648, 354, 758, 232, 536, 421, 232, 232, + 232, 572, 419, 232, 872, 871, 348, 232, 232, 232, + 588, 590, 591, 862, 46, 589, 571, 434, 435, 436, + 437, 438, 431, 422, 705, 441, 272, 704, 693, 44, + 35, 420, 350, 235, 282, 283, 960, 37, 38, 256, + 39, 566, 933, 451, 267, 47, 418, 417, 349, 725, + 726, 727, 576, 870, 465, 598, 627, 792, 793, 794, + 619, 474, 620, 419, 723, 795, 703, 959, 565, 991, + 285, 894, 285, 285, 450, 452, 311, 310, 312, 313, + 314, 315, 501, 927, 263, 316, 812, 232, 864, 863, + 232, 738, 285, 926, 493, 512, 809, 799, 320, 789, + 461, 806, 805, 466, 467, 468, 469, 470, 471, 472, + 785, 475, 477, 477, 477, 477, 477, 477, 477, 477, + 485, 486, 487, 488, 526, 495, 494, 515, 803, 802, + 547, 548, 549, 230, 232, 517, 508, 781, 687, 232, + 621, 497, 560, 774, 285, 637, 285, 20, 513, 585, + 586, 271, 592, 593, 266, 507, 258, 363, 362, 582, + 925, 796, 51, 596, 22, 664, 767, 674, 637, 258, + 258, 556, 557, 597, 357, 674, 770, 360, 478, 479, + 480, 481, 482, 483, 484, 404, 405, 406, 894, 854, + 22, 501, 521, 804, 410, 411, 412, 738, 465, 738, + 247, 642, 643, 489, 249, 22, 501, 738, 47, 579, + 580, 581, 432, 433, 434, 435, 436, 437, 438, 431, + 47, 347, 441, 540, 559, 61, 622, 623, 505, 683, + 506, 966, 674, 44, 47, 555, 550, 664, 635, 501, + 636, 638, 512, 665, 645, 668, 408, 466, 47, 47, + 624, 403, 634, 678, 650, 945, 989, 503, 943, 655, + 946, 673, 656, 944, 652, 649, 639, 541, 542, 543, + 544, 969, 968, 947, 509, 902, 903, 942, 982, 941, + 677, 724, 551, 552, 553, 253, 254, 669, 584, 44, + 661, 353, 660, 501, 685, 513, 694, 695, 672, 287, + 768, 873, 698, 679, 351, 708, 358, 783, 670, 561, + 689, 288, 924, 923, 699, 700, 701, 852, 684, 407, + 906, 562, 250, 251, 353, 244, 578, 429, 439, 440, + 432, 433, 434, 435, 436, 437, 438, 431, 596, 659, + 441, 936, 361, 707, 935, 245, 51, 658, 597, 893, + 521, 414, 634, 276, 55, 258, 259, 914, 258, 415, + 53, 501, 710, 401, 48, 1, 258, 258, 258, 747, + 728, 409, 786, 528, 523, 258, 258, 258, 261, 527, + 759, 722, 702, 898, 901, 902, 903, 899, 232, 900, + 904, 866, 790, 967, 534, 691, 537, 680, 735, 524, + 782, 465, 736, 921, 688, 366, 367, 778, 365, 757, + 369, 512, 368, 745, 501, 364, 748, 749, 750, 779, + 742, 754, 72, 780, 905, 909, 760, 769, 761, 762, + 763, 764, 777, 776, 898, 901, 902, 903, 899, 739, + 900, 904, 232, 712, 292, 711, 771, 772, 773, 563, + 449, 657, 635, 671, 490, 258, 345, 514, 516, 934, + 892, 797, 798, 756, 513, 473, 634, 646, 298, 587, + 309, 306, 501, 788, 308, 307, 496, 501, 811, 504, + 423, 296, 813, 290, 951, 848, 814, 355, 897, 824, + 841, 895, 819, 847, 820, 766, 413, 889, 232, 961, + 502, 23, 258, 837, 52, 501, 501, 258, 836, 668, + 255, 808, 822, 19, 818, 839, 849, 843, 853, 844, + 635, 14, 859, 823, 640, 641, 13, 12, 644, 27, + 10, 742, 9, 8, 634, 7, 634, 6, 5, 4, + 246, 21, 651, 2, 653, 654, 18, 17, 850, 16, + 15, 669, 11, 0, 856, 0, 0, 0, 860, 662, + 631, 516, 0, 0, 857, 858, 631, 631, 0, 0, + 631, 0, 881, 855, 891, 765, 0, 0, 0, 0, + 0, 232, 232, 0, 631, 631, 631, 631, 0, 0, + 0, 501, 0, 668, 915, 501, 0, 0, 0, 849, + 876, 631, 877, 708, 514, 920, 501, 918, 0, 0, + 0, 0, 0, 886, 887, 0, 0, 0, 824, 888, + 0, 0, 0, 0, 0, 232, 232, 232, 232, 807, + 938, 908, 940, 0, 929, 669, 232, 44, 937, 232, + 939, 707, 232, 849, 849, 849, 849, 955, 501, 948, + 634, 512, 956, 0, 788, 0, 916, 849, 0, 0, + 0, 0, 0, 0, 0, 634, 932, 964, 465, 0, + 965, 0, 0, 0, 0, 850, 850, 850, 850, 0, + 0, 0, 0, 737, 950, 865, 0, 0, 0, 908, + 0, 0, 0, 957, 0, 0, 0, 0, 977, 978, + 0, 0, 755, 0, 513, 0, 0, 958, 639, 0, + 501, 501, 501, 987, 988, 0, 0, 0, 0, 0, + 0, 0, 501, 878, 879, 631, 880, 0, 0, 882, + 0, 884, 0, 0, 0, 0, 0, 0, 973, 0, + 0, 0, 0, 0, 631, 455, 456, 457, 458, 459, + 460, 979, 980, 981, 0, 0, 258, 0, 0, 0, + 0, 0, 990, 0, 992, 993, 0, 0, 0, 986, + 986, 986, 0, 514, 0, 516, 0, 0, 0, 0, + 0, 995, 0, 0, 0, 0, 0, 0, 0, 203, + 0, 0, 0, 741, 0, 0, 0, 0, 185, 0, + 0, 0, 0, 0, 194, 0, 0, 209, 200, 0, + 258, 0, 430, 429, 439, 440, 432, 433, 434, 435, + 436, 437, 438, 431, 0, 500, 441, 743, 0, 0, + 0, 0, 0, 0, 180, 0, 0, 631, 418, 417, + 0, 0, 0, 516, 631, 0, 0, 0, 0, 0, + 0, 0, 0, 733, 0, 419, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 258, 0, 594, 0, + 0, 603, 604, 605, 606, 607, 608, 609, 610, 611, + 612, 613, 614, 615, 616, 617, 0, 0, 223, 0, + 0, 0, 0, 0, 205, 0, 0, 0, 0, 181, + 0, 208, 204, 218, 176, 216, 211, 198, 190, 191, + 175, 0, 207, 184, 189, 183, 202, 213, 214, 182, + 228, 179, 222, 178, 0, 221, 201, 0, 212, 217, + 199, 196, 177, 215, 197, 195, 192, 186, 0, 0, + 0, 210, 219, 229, 0, 0, 224, 225, 226, 258, + 912, 0, 0, 0, 0, 0, 815, 0, 439, 440, + 432, 433, 434, 435, 436, 437, 438, 431, 0, 174, + 441, 193, 227, 206, 188, 220, 430, 429, 439, 440, + 432, 433, 434, 435, 436, 437, 438, 431, 0, 0, + 441, 187, 0, 258, 258, 258, 258, 0, 0, 0, + 0, 0, 0, 0, 949, 0, 425, 258, 428, 0, + 912, 0, 0, 514, 442, 443, 444, 445, 446, 447, + 448, 0, 426, 427, 424, 430, 429, 439, 440, 432, + 433, 434, 435, 436, 437, 438, 431, 0, 0, 441, + 0, 0, 0, 0, 0, 729, 730, 731, 430, 429, + 439, 440, 432, 433, 434, 435, 436, 437, 438, 431, + 0, 0, 441, 0, 0, 0, 0, 0, 0, 0, + 0, 162, 151, 123, 164, 100, 115, 173, 116, 117, + 143, 87, 131, 203, 113, 0, 103, 82, 110, 83, + 101, 125, 185, 128, 99, 153, 134, 170, 194, 138, + 0, 209, 200, 0, 0, 127, 156, 129, 150, 122, + 144, 93, 137, 165, 114, 141, 0, 0, 0, 78, + 0, 0, 0, 0, 0, 0, 0, 0, 180, 140, + 160, 112, 142, 81, 139, 0, 85, 88, 172, 158, + 106, 107, 0, 0, 0, 0, 0, 0, 0, 126, + 130, 147, 120, 0, 0, 0, 0, 0, 0, 0, + 0, 104, 0, 136, 0, 0, 0, 91, 86, 124, + 0, 0, 0, 74, 0, 105, 148, 0, 816, 817, + 157, 121, 223, 159, 119, 118, 163, 166, 205, 0, + 154, 102, 111, 181, 109, 208, 204, 218, 176, 216, + 211, 198, 190, 191, 175, 0, 207, 184, 189, 183, + 202, 213, 214, 182, 228, 179, 222, 178, 89, 221, + 201, 90, 212, 217, 199, 196, 177, 215, 197, 195, + 192, 186, 0, 84, 0, 210, 219, 229, 98, 73, + 224, 225, 226, 75, 76, 0, 77, 0, 96, 97, + 94, 95, 132, 133, 167, 168, 169, 149, 92, 0, + 874, 152, 135, 174, 0, 193, 227, 206, 188, 220, + 0, 0, 0, 0, 108, 155, 171, 146, 145, 161, + 0, 0, 0, 0, 0, 187, 162, 151, 123, 164, + 100, 115, 173, 116, 117, 143, 87, 131, 203, 113, + 0, 103, 82, 110, 83, 101, 125, 185, 128, 99, + 153, 134, 170, 194, 138, 0, 209, 200, 0, 0, + 127, 156, 129, 150, 122, 144, 93, 137, 165, 114, + 141, 0, 0, 931, 500, 0, 0, 0, 0, 0, + 0, 0, 0, 180, 140, 160, 112, 142, 81, 139, + 0, 85, 88, 172, 158, 106, 107, 0, 0, 0, + 0, 0, 0, 0, 126, 130, 147, 120, 0, 0, + 0, 0, 0, 0, 928, 0, 104, 0, 136, 0, + 0, 0, 91, 86, 124, 0, 0, 0, 518, 0, + 105, 148, 0, 0, 0, 157, 121, 223, 159, 119, + 118, 163, 166, 205, 0, 154, 102, 111, 181, 109, + 208, 204, 218, 176, 216, 211, 198, 190, 191, 175, + 0, 207, 184, 189, 183, 202, 213, 214, 182, 228, + 179, 222, 178, 89, 221, 201, 90, 212, 217, 199, + 196, 177, 215, 197, 195, 192, 186, 0, 84, 0, + 210, 219, 229, 98, 519, 224, 225, 226, 0, 0, + 0, 0, 0, 96, 97, 94, 95, 132, 133, 167, + 168, 169, 149, 92, 0, 0, 152, 135, 174, 0, + 193, 227, 206, 188, 220, 0, 0, 0, 0, 108, + 155, 171, 146, 145, 161, 0, 0, 0, 0, 0, + 187, 162, 151, 123, 164, 100, 115, 173, 116, 117, + 143, 87, 131, 203, 113, 0, 103, 82, 110, 83, + 101, 125, 185, 128, 99, 153, 134, 170, 194, 138, + 0, 209, 200, 0, 0, 127, 156, 129, 150, 122, + 144, 93, 137, 165, 114, 141, 47, 0, 0, 500, + 0, 0, 0, 0, 0, 0, 0, 0, 180, 140, + 160, 112, 142, 81, 139, 0, 85, 88, 172, 158, + 106, 107, 0, 0, 0, 0, 0, 0, 0, 126, + 130, 147, 120, 0, 0, 0, 0, 0, 0, 0, + 0, 104, 0, 136, 0, 0, 0, 91, 86, 124, + 0, 0, 0, 518, 0, 105, 148, 0, 0, 0, + 157, 121, 223, 159, 119, 118, 163, 166, 205, 0, + 154, 102, 111, 181, 109, 208, 204, 218, 176, 216, + 211, 198, 190, 191, 175, 0, 207, 184, 189, 183, + 202, 213, 214, 182, 228, 179, 222, 178, 89, 221, + 201, 90, 212, 217, 199, 196, 177, 215, 197, 195, + 192, 186, 0, 84, 0, 210, 219, 229, 98, 519, + 224, 225, 226, 0, 0, 0, 0, 0, 96, 97, + 94, 95, 132, 133, 167, 168, 169, 149, 92, 0, + 0, 152, 135, 174, 0, 193, 227, 206, 188, 220, + 0, 0, 0, 0, 108, 155, 171, 146, 145, 161, + 0, 0, 0, 0, 0, 187, 162, 151, 123, 164, + 100, 115, 173, 116, 117, 143, 87, 131, 203, 113, + 0, 103, 82, 110, 83, 101, 125, 185, 128, 99, + 153, 134, 170, 194, 138, 0, 209, 200, 0, 0, + 127, 156, 129, 150, 122, 144, 93, 137, 165, 114, + 141, 0, 0, 0, 343, 0, 0, 0, 0, 0, + 0, 0, 0, 180, 140, 160, 112, 142, 81, 139, + 0, 85, 88, 172, 158, 106, 107, 0, 0, 0, + 0, 0, 0, 0, 126, 130, 147, 120, 0, 0, + 0, 0, 0, 0, 821, 0, 104, 0, 136, 0, + 0, 0, 91, 86, 124, 0, 0, 0, 518, 0, + 105, 148, 0, 0, 0, 157, 121, 223, 159, 119, + 118, 163, 166, 205, 0, 154, 102, 111, 181, 109, + 208, 204, 218, 176, 216, 211, 198, 190, 191, 175, + 0, 207, 184, 189, 183, 202, 213, 214, 182, 228, + 179, 222, 178, 89, 221, 201, 90, 212, 217, 199, + 196, 177, 215, 197, 195, 192, 186, 0, 84, 0, + 210, 219, 229, 98, 519, 224, 225, 226, 0, 0, + 0, 0, 0, 96, 97, 94, 95, 132, 133, 167, + 168, 169, 149, 92, 0, 0, 152, 135, 174, 0, + 193, 227, 206, 188, 220, 0, 0, 0, 0, 108, + 155, 171, 146, 145, 161, 0, 0, 0, 0, 0, + 187, 162, 151, 123, 164, 100, 115, 173, 116, 117, + 143, 87, 131, 203, 113, 0, 103, 82, 110, 83, + 101, 125, 185, 128, 99, 153, 134, 170, 194, 138, + 0, 209, 200, 0, 0, 127, 156, 129, 150, 122, + 144, 93, 137, 165, 114, 141, 0, 0, 0, 500, + 0, 0, 0, 0, 0, 0, 0, 0, 180, 140, + 160, 112, 142, 81, 139, 0, 85, 88, 172, 158, + 106, 107, 0, 0, 0, 0, 0, 0, 0, 126, + 130, 147, 120, 0, 0, 0, 0, 0, 0, 0, + 0, 104, 0, 136, 0, 0, 0, 91, 86, 124, + 0, 0, 0, 518, 0, 105, 148, 0, 0, 0, + 157, 121, 223, 159, 119, 118, 163, 166, 205, 0, + 154, 102, 111, 181, 109, 208, 204, 218, 176, 216, + 211, 198, 190, 191, 175, 0, 207, 184, 189, 183, + 202, 213, 214, 182, 228, 179, 222, 178, 89, 221, + 201, 90, 212, 217, 199, 196, 177, 215, 197, 195, + 192, 186, 0, 84, 0, 210, 219, 229, 98, 519, + 224, 225, 226, 0, 0, 0, 0, 0, 96, 97, + 94, 95, 132, 133, 167, 168, 169, 149, 92, 0, + 0, 152, 135, 174, 0, 193, 227, 206, 188, 220, + 0, 0, 0, 0, 108, 155, 171, 146, 145, 161, + 0, 0, 0, 0, 0, 187, 162, 151, 123, 164, + 100, 115, 173, 116, 117, 143, 87, 131, 203, 113, + 0, 103, 82, 110, 83, 101, 125, 185, 128, 99, + 153, 134, 170, 194, 138, 0, 209, 200, 0, 0, + 127, 156, 129, 150, 122, 144, 93, 137, 165, 114, + 141, 0, 0, 0, 343, 0, 0, 0, 0, 0, + 0, 0, 0, 180, 140, 160, 112, 142, 81, 139, + 0, 85, 88, 172, 158, 106, 107, 0, 0, 0, + 0, 0, 0, 0, 126, 130, 147, 120, 0, 0, + 0, 0, 0, 0, 0, 0, 104, 0, 136, 0, + 0, 0, 91, 86, 124, 0, 0, 0, 518, 0, + 105, 148, 0, 0, 0, 157, 121, 223, 159, 119, + 118, 163, 166, 205, 0, 154, 102, 111, 181, 109, + 208, 204, 218, 176, 216, 211, 198, 190, 191, 175, + 0, 207, 184, 189, 183, 202, 213, 214, 182, 228, + 179, 222, 178, 89, 221, 201, 90, 212, 217, 199, + 196, 177, 215, 197, 195, 192, 186, 0, 84, 0, + 210, 219, 229, 98, 519, 224, 225, 226, 0, 0, + 0, 0, 0, 96, 97, 94, 95, 132, 133, 167, + 168, 169, 149, 92, 0, 0, 152, 135, 174, 0, + 193, 227, 206, 188, 220, 0, 0, 0, 0, 108, + 155, 171, 146, 145, 161, 0, 0, 0, 0, 0, + 187, 162, 151, 123, 164, 100, 115, 173, 116, 117, + 143, 87, 131, 203, 113, 0, 103, 82, 110, 83, + 101, 125, 185, 128, 99, 153, 134, 170, 194, 138, + 0, 209, 200, 0, 0, 127, 156, 129, 150, 122, + 144, 93, 137, 165, 114, 141, 0, 0, 0, 231, + 0, 0, 0, 0, 0, 0, 0, 0, 180, 140, + 160, 112, 142, 81, 139, 0, 85, 88, 172, 158, + 106, 107, 0, 0, 0, 0, 0, 0, 0, 126, + 130, 147, 120, 0, 0, 0, 0, 0, 0, 0, + 0, 104, 0, 136, 0, 0, 0, 91, 86, 124, + 0, 0, 0, 518, 0, 105, 148, 0, 0, 0, + 157, 121, 223, 159, 119, 118, 163, 166, 205, 0, + 154, 102, 111, 181, 109, 208, 204, 218, 176, 216, + 211, 198, 190, 191, 175, 0, 207, 184, 189, 183, + 202, 213, 214, 182, 228, 179, 222, 178, 89, 221, + 201, 90, 212, 217, 199, 196, 177, 215, 197, 195, + 192, 186, 0, 84, 0, 210, 219, 229, 98, 519, + 224, 225, 226, 0, 0, 0, 0, 0, 96, 97, + 94, 95, 132, 133, 167, 168, 169, 149, 92, 0, + 0, 152, 135, 174, 0, 193, 227, 206, 188, 220, + 0, 0, 0, 0, 108, 155, 171, 146, 145, 161, + 0, 203, 0, 0, 626, 187, 294, 0, 0, 0, + 185, 0, 293, 0, 0, 330, 194, 0, 0, 209, + 200, 0, 0, 0, 0, 323, 324, 0, 0, 0, + 0, 0, 0, 0, 47, 0, 734, 343, 311, 310, + 312, 313, 314, 315, 0, 0, 180, 316, 317, 318, + 0, 0, 291, 304, 0, 329, 430, 429, 439, 440, + 432, 433, 434, 435, 436, 437, 438, 431, 0, 0, + 441, 0, 0, 0, 0, 301, 302, 629, 0, 0, + 0, 341, 0, 303, 0, 0, 300, 305, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 223, 0, 0, 339, 0, 0, 205, 0, 0, 0, + 0, 181, 0, 208, 204, 218, 176, 216, 211, 198, + 190, 191, 175, 0, 207, 184, 189, 183, 202, 213, + 214, 182, 228, 179, 222, 178, 0, 221, 201, 0, + 212, 217, 199, 196, 177, 215, 197, 195, 192, 186, + 0, 0, 0, 210, 219, 229, 0, 0, 224, 225, + 226, 0, 0, 0, 0, 0, 331, 340, 337, 338, + 335, 336, 334, 333, 332, 342, 325, 326, 328, 0, + 327, 174, 0, 193, 227, 206, 188, 220, 0, 0, + 203, 0, 0, 0, 0, 294, 0, 0, 0, 185, + 0, 293, 0, 187, 330, 194, 0, 0, 209, 200, + 0, 0, 0, 0, 323, 324, 0, 0, 0, 0, + 0, 0, 0, 47, 0, 0, 343, 311, 310, 312, + 313, 314, 315, 0, 0, 180, 316, 317, 318, 0, + 0, 291, 304, 0, 329, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 301, 302, 629, 0, 0, 0, + 341, 0, 303, 0, 0, 300, 305, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 223, + 0, 0, 339, 0, 0, 205, 0, 0, 0, 0, + 181, 0, 208, 204, 218, 176, 216, 211, 198, 190, + 191, 175, 0, 207, 184, 189, 183, 202, 213, 214, + 182, 228, 179, 222, 178, 0, 221, 201, 0, 212, + 217, 199, 196, 177, 215, 197, 195, 192, 186, 0, + 0, 0, 210, 219, 229, 0, 0, 224, 225, 226, + 0, 0, 0, 0, 0, 331, 340, 337, 338, 335, + 336, 334, 333, 332, 342, 325, 326, 328, 0, 327, + 174, 0, 193, 227, 206, 188, 220, 0, 0, 203, + 0, 0, 0, 0, 294, 0, 0, 0, 185, 0, + 293, 0, 187, 330, 194, 0, 0, 209, 200, 0, + 0, 0, 0, 323, 324, 0, 0, 0, 0, 0, + 0, 0, 47, 0, 285, 343, 311, 310, 312, 313, + 314, 315, 0, 0, 180, 316, 317, 318, 0, 0, + 291, 304, 0, 329, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 301, 302, 0, 0, 0, 0, 341, + 0, 303, 0, 0, 300, 305, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 223, 0, + 0, 339, 0, 0, 205, 0, 0, 0, 0, 181, + 0, 208, 204, 218, 176, 216, 211, 198, 190, 191, + 175, 0, 207, 184, 189, 183, 202, 213, 214, 182, + 228, 179, 222, 178, 0, 221, 201, 0, 212, 217, + 199, 196, 177, 215, 197, 195, 192, 186, 0, 0, + 0, 210, 219, 229, 0, 0, 224, 225, 226, 0, + 0, 0, 0, 0, 331, 340, 337, 338, 335, 336, + 334, 333, 332, 342, 325, 326, 328, 22, 327, 174, + 0, 193, 227, 206, 188, 220, 0, 0, 203, 0, + 0, 0, 0, 294, 0, 0, 0, 185, 0, 293, + 0, 187, 330, 194, 0, 0, 209, 200, 0, 0, + 0, 0, 323, 324, 0, 0, 0, 0, 0, 0, + 0, 47, 0, 0, 343, 311, 310, 312, 313, 314, + 315, 0, 0, 180, 316, 317, 318, 0, 0, 291, + 304, 0, 329, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 301, 302, 0, 0, 0, 0, 341, 0, + 303, 0, 0, 300, 305, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 223, 0, 0, + 339, 0, 0, 205, 0, 0, 0, 0, 181, 0, + 208, 204, 218, 176, 216, 211, 198, 190, 191, 175, + 0, 207, 184, 189, 183, 202, 213, 214, 182, 228, + 179, 222, 178, 0, 221, 201, 0, 212, 217, 199, + 196, 177, 215, 197, 195, 192, 186, 0, 0, 0, + 210, 219, 229, 0, 0, 224, 225, 226, 0, 0, + 0, 0, 0, 331, 340, 337, 338, 335, 336, 334, + 333, 332, 342, 325, 326, 328, 0, 327, 174, 0, + 193, 227, 206, 188, 220, 0, 0, 203, 0, 0, + 0, 0, 294, 0, 0, 0, 185, 0, 293, 0, + 187, 330, 194, 0, 0, 209, 200, 0, 0, 0, + 0, 323, 324, 0, 0, 0, 0, 0, 0, 0, + 47, 0, 0, 343, 311, 310, 312, 313, 314, 315, + 0, 0, 180, 316, 317, 318, 0, 0, 291, 304, + 0, 329, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 301, 302, 0, 0, 0, 0, 341, 0, 303, + 0, 0, 300, 305, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 223, 0, 0, 339, + 0, 0, 205, 0, 0, 0, 0, 181, 0, 208, + 204, 218, 176, 216, 211, 198, 190, 191, 175, 0, + 207, 184, 189, 183, 202, 213, 214, 182, 228, 179, + 222, 178, 0, 221, 201, 0, 212, 217, 199, 196, + 177, 215, 197, 195, 192, 186, 0, 0, 0, 210, + 219, 229, 0, 0, 224, 225, 226, 0, 0, 0, + 0, 0, 331, 340, 337, 338, 335, 336, 334, 333, + 332, 342, 325, 326, 328, 0, 327, 174, 203, 193, + 227, 206, 188, 220, 0, 0, 0, 185, 0, 0, + 0, 0, 330, 194, 0, 0, 209, 200, 0, 187, + 0, 0, 323, 324, 0, 0, 0, 0, 0, 0, + 0, 47, 0, 0, 343, 311, 310, 312, 313, 314, + 315, 0, 0, 180, 316, 317, 318, 0, 0, 0, + 304, 0, 329, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 301, 302, 0, 0, 0, 0, 341, 0, + 303, 0, 0, 300, 305, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 223, 0, 0, + 339, 0, 0, 205, 0, 0, 0, 0, 181, 0, + 208, 204, 218, 176, 216, 211, 198, 190, 191, 175, + 0, 207, 184, 189, 183, 202, 213, 214, 182, 228, + 179, 222, 178, 0, 221, 201, 0, 212, 217, 199, + 196, 177, 215, 197, 195, 192, 186, 0, 0, 0, + 210, 219, 229, 0, 0, 224, 225, 226, 0, 0, + 0, 0, 0, 331, 340, 337, 338, 335, 336, 334, + 333, 332, 342, 325, 326, 328, 203, 327, 174, 0, + 193, 227, 206, 188, 220, 185, 0, 0, 0, 0, + 0, 194, 0, 0, 209, 200, 0, 0, 0, 0, + 187, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 500, 0, 0, 0, 0, 0, 0, 0, + 0, 180, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 430, 429, 439, 440, + 432, 433, 434, 435, 436, 437, 438, 431, 0, 0, + 441, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 223, 0, 0, 0, 0, + 0, 205, 0, 0, 0, 0, 181, 0, 208, 204, + 218, 176, 216, 211, 198, 190, 191, 175, 0, 207, + 184, 189, 183, 202, 213, 214, 182, 228, 179, 222, + 178, 0, 221, 201, 0, 212, 217, 199, 196, 177, + 215, 197, 195, 192, 186, 22, 0, 0, 210, 219, + 229, 0, 0, 224, 225, 226, 203, 0, 0, 0, + 0, 0, 0, 0, 0, 185, 0, 0, 0, 0, + 0, 194, 372, 0, 209, 200, 174, 0, 193, 227, + 206, 188, 220, 0, 0, 0, 0, 0, 0, 47, + 0, 0, 231, 0, 0, 0, 0, 384, 187, 0, + 0, 180, 389, 390, 391, 392, 393, 394, 395, 0, + 396, 397, 398, 399, 400, 385, 386, 387, 388, 370, + 371, 0, 0, 373, 0, 0, 374, 375, 376, 377, + 378, 379, 380, 381, 382, 383, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 223, 0, 0, 0, 0, + 0, 205, 0, 0, 0, 0, 181, 0, 208, 204, + 218, 176, 216, 211, 198, 190, 191, 175, 0, 207, + 184, 189, 183, 202, 213, 214, 182, 228, 179, 222, + 178, 0, 221, 201, 0, 212, 217, 199, 196, 177, + 215, 197, 195, 192, 186, 0, 0, 203, 210, 219, + 229, 911, 0, 224, 225, 226, 185, 0, 0, 0, + 0, 0, 194, 0, 0, 209, 200, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 174, 0, 193, 227, + 206, 188, 220, 231, 0, 913, 0, 0, 0, 0, + 0, 0, 180, 0, 0, 0, 0, 0, 187, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 223, 0, 0, 0, + 0, 0, 205, 0, 0, 0, 0, 181, 0, 208, + 204, 218, 176, 216, 211, 198, 190, 191, 175, 0, + 207, 184, 189, 183, 202, 213, 214, 182, 228, 179, + 222, 178, 0, 221, 201, 0, 212, 217, 199, 196, + 177, 215, 197, 195, 192, 186, 22, 0, 0, 210, + 219, 229, 0, 0, 224, 225, 226, 203, 0, 0, + 0, 0, 0, 0, 0, 0, 185, 0, 0, 0, + 0, 0, 194, 0, 0, 209, 200, 174, 0, 193, + 227, 206, 188, 220, 0, 0, 0, 0, 0, 0, + 47, 0, 0, 500, 0, 0, 0, 0, 0, 187, + 0, 0, 180, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 223, 0, 0, 0, + 0, 0, 205, 0, 0, 0, 0, 181, 0, 208, + 204, 218, 176, 216, 211, 198, 190, 191, 175, 0, + 207, 184, 189, 183, 202, 213, 214, 182, 228, 179, + 222, 178, 0, 221, 201, 0, 212, 217, 199, 196, + 177, 215, 197, 195, 192, 186, 0, 0, 203, 210, + 219, 229, 0, 0, 224, 225, 226, 185, 0, 0, + 0, 0, 0, 194, 0, 0, 209, 200, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 174, 0, 193, + 227, 206, 188, 220, 500, 0, 0, 498, 0, 0, + 499, 0, 0, 180, 0, 0, 0, 0, 0, 187, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 223, 0, 0, + 0, 0, 0, 205, 0, 0, 0, 0, 181, 0, + 208, 204, 218, 176, 216, 211, 198, 190, 191, 175, + 0, 207, 184, 189, 183, 202, 213, 214, 182, 228, + 179, 222, 178, 0, 221, 201, 0, 212, 217, 199, + 196, 177, 215, 197, 195, 192, 186, 0, 0, 203, + 210, 219, 229, 0, 0, 224, 225, 226, 185, 0, + 0, 0, 0, 0, 194, 0, 0, 209, 200, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 174, 0, + 193, 227, 206, 188, 220, 231, 0, 913, 0, 0, + 0, 0, 0, 0, 180, 0, 0, 0, 0, 0, + 187, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 223, 0, + 0, 0, 0, 0, 205, 0, 0, 0, 0, 181, + 0, 208, 204, 218, 176, 216, 211, 198, 190, 191, + 175, 0, 207, 184, 189, 183, 202, 213, 214, 182, + 228, 179, 222, 178, 0, 221, 201, 0, 212, 217, + 199, 196, 177, 215, 197, 195, 192, 186, 0, 0, + 0, 210, 219, 229, 203, 0, 224, 225, 226, 0, + 0, 0, 0, 185, 0, 0, 0, 0, 0, 194, + 0, 0, 209, 200, 0, 0, 0, 0, 0, 174, + 0, 193, 227, 206, 188, 220, 0, 47, 0, 0, + 231, 0, 0, 0, 0, 0, 0, 0, 0, 180, + 0, 187, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 223, 0, 0, 0, 0, 0, 205, + 0, 0, 0, 0, 181, 0, 208, 204, 218, 176, + 216, 211, 198, 190, 191, 175, 0, 207, 184, 189, + 183, 202, 213, 214, 182, 228, 179, 222, 178, 0, + 221, 201, 0, 212, 217, 199, 196, 177, 215, 197, + 195, 192, 186, 0, 0, 203, 210, 219, 229, 0, + 0, 224, 225, 226, 185, 0, 0, 0, 0, 0, + 194, 0, 0, 209, 200, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 174, 0, 193, 227, 206, 188, + 220, 500, 0, 743, 0, 0, 0, 0, 0, 0, + 180, 0, 0, 0, 0, 0, 187, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 223, 0, 0, 0, 0, 0, + 205, 0, 0, 0, 0, 181, 0, 208, 204, 218, + 176, 216, 211, 198, 190, 191, 175, 0, 207, 184, + 189, 183, 202, 213, 214, 182, 228, 179, 222, 178, + 0, 221, 201, 0, 212, 217, 199, 196, 177, 215, + 197, 195, 192, 186, 0, 0, 0, 210, 219, 229, + 203, 0, 224, 225, 226, 0, 0, 0, 356, 185, + 0, 0, 0, 0, 0, 194, 0, 0, 209, 200, + 0, 0, 0, 0, 0, 174, 0, 193, 227, 206, + 188, 220, 0, 0, 0, 0, 231, 0, 0, 0, + 0, 0, 0, 0, 0, 180, 0, 187, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 223, + 0, 0, 0, 0, 0, 205, 0, 0, 0, 0, + 181, 0, 208, 204, 218, 176, 216, 211, 198, 190, + 191, 175, 0, 207, 184, 189, 183, 202, 213, 214, + 182, 228, 179, 222, 178, 0, 221, 201, 0, 212, + 217, 199, 196, 177, 215, 197, 195, 192, 186, 0, + 0, 203, 210, 219, 229, 0, 0, 224, 225, 226, + 185, 0, 0, 0, 0, 0, 194, 0, 0, 209, + 200, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 174, 0, 193, 227, 206, 188, 220, 500, 0, 0, + 0, 0, 0, 0, 0, 0, 180, 0, 0, 0, + 0, 0, 187, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 223, 0, 0, 0, 0, 0, 205, 0, 0, 0, + 0, 181, 0, 208, 204, 218, 176, 216, 211, 198, + 190, 191, 175, 0, 207, 184, 189, 183, 202, 213, + 214, 182, 228, 179, 222, 178, 0, 221, 201, 0, + 212, 217, 199, 196, 177, 215, 197, 195, 192, 186, + 0, 0, 203, 210, 219, 229, 0, 0, 224, 225, + 226, 185, 0, 0, 0, 0, 0, 194, 0, 0, + 209, 200, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 174, 0, 193, 227, 206, 188, 220, 343, 0, + 0, 0, 0, 0, 0, 0, 0, 180, 0, 0, + 0, 0, 0, 187, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 223, 0, 0, 0, 0, 0, 205, 0, 0, + 0, 0, 181, 0, 208, 204, 218, 176, 216, 211, + 198, 190, 191, 175, 0, 207, 184, 189, 183, 202, + 213, 214, 182, 228, 179, 222, 178, 0, 221, 201, + 0, 212, 217, 199, 196, 177, 215, 197, 195, 192, + 186, 0, 0, 203, 210, 219, 229, 0, 0, 224, + 225, 226, 185, 0, 0, 0, 0, 0, 194, 0, + 0, 209, 200, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 174, 0, 193, 227, 206, 188, 220, 231, + 0, 0, 0, 0, 0, 0, 0, 0, 180, 0, + 0, 0, 0, 0, 187, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 223, 0, 0, 0, 0, 0, 205, 0, + 0, 0, 0, 181, 0, 208, 204, 218, 176, 216, + 211, 198, 190, 191, 175, 0, 207, 184, 189, 183, + 202, 213, 214, 182, 228, 179, 222, 178, 0, 221, + 201, 0, 212, 217, 199, 196, 177, 215, 197, 195, + 192, 186, 0, 0, 0, 210, 219, 229, 0, 0, + 224, 225, 226, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 174, 0, 193, 227, 206, 188, 220, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 187, +} +var yyPact = [...]int{ + + 93, -1000, -172, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 602, 625, -1000, -1000, -1000, -1000, -1000, 445, -8, 14, + -41, 43, 42, 1336, 5626, -1000, -1000, 247, -163, -1000, + -1000, -1000, -1000, -1000, 454, -1000, -1000, -1000, -1000, -1000, + 579, 600, 468, 573, 518, -1000, 14, 5626, 616, -1000, + -149, 301, 12, 371, 12, 37, -1000, 11, 368, 11, + 5626, 5626, -1000, 613, -47, -1000, -1000, -99, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 291, 551, 3580, 3580, 602, -1000, 454, -1000, + -1000, -1000, 541, -1000, -1000, 202, 5173, 547, 74, 5626, + 597, 376, -1000, 4030, 5626, 162, 472, 5626, 5626, 5626, + 567, 467, 5626, -1000, -1000, -1000, 5626, 5626, 5626, -1000, + -1000, 611, -1000, -1000, -1000, -1000, -1000, 621, 102, 250, + -1000, 3580, 1208, 440, 440, -1000, -1000, 53, -1000, -1000, + 3761, 3761, 3761, 3761, 3761, 3761, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 440, + 73, -1000, 3391, 440, 440, 440, 440, 440, 440, 3580, + 440, 440, 440, 440, 440, 440, 440, 440, 440, 440, + 440, 440, 440, -1000, -1000, 422, -1000, 189, 579, 291, + 518, 4561, 487, -1000, -1000, 469, 5626, -1000, 5475, 2626, + 609, -152, -170, 121, 198, -73, -1000, -1000, 443, -1000, + 443, 443, 443, 443, -33, -33, -33, -33, -1000, -1000, + -1000, -1000, -1000, 456, -1000, 443, 443, 443, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 455, 455, 455, 444, + 444, -1000, 557, 5626, -1000, 119, -1000, -1000, 5626, -1000, + -1000, -1000, -1000, 579, -106, -1000, 523, 3580, 3580, 217, + 3580, 3580, 120, 3761, 265, 112, 3761, 3761, 3761, 3761, + 3761, 3761, 3761, 3761, 3761, 3761, 3761, 3761, 3761, 3761, + 3761, 277, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 357, -1000, 454, 292, 292, 84, 84, 84, 84, 84, + 3939, 2824, 2411, 291, 364, 127, 3391, 3013, 3013, 3580, + 3580, 3013, 574, 149, 127, 5324, -1000, 291, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 3013, 3013, 3013, 3013, 3580, + -1000, -1000, -1000, 551, -1000, 574, 599, -1000, 531, 529, + -1000, -1000, 3013, -1000, 458, 5475, 440, -1000, 4410, -1000, + 451, -1000, 139, -1000, 69, -1000, -1000, -1000, -1000, -1000, + 602, 3580, 440, -1000, -54, 138, -1000, -1000, 449, 561, + 110, 355, -1000, -1000, 552, -1000, 176, -77, -1000, -1000, + 242, -33, -33, -1000, -1000, 78, 543, 78, 78, 78, + 281, -1000, -1000, -1000, -1000, 241, -1000, -1000, -1000, 238, + -1000, -1000, 1766, -1000, 125, 135, 17, 4, 3, 2, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 279, 515, 120, 123, -1000, -1000, 256, + -1000, -1000, 127, 127, 1231, -1000, -1000, -1000, -1000, 265, + 3761, 3761, 3761, 995, 1231, 2809, 1139, 509, 84, 194, + 194, 82, 82, 82, 82, 82, 391, 391, -1000, -1000, + -1000, 291, -1000, -1000, -1000, 291, 3013, 416, -1000, -1000, + 1042, 64, 440, 60, -1000, -1000, -1000, 3580, -1000, 291, + 310, 310, 87, 168, 310, 3013, 190, -1000, 3580, 291, + -1000, 310, 291, 310, 310, -1000, -1000, 5626, -1000, -1000, + -1000, -1000, 426, -1000, 544, 386, 395, -1000, -1000, 3202, + 291, 362, 58, 602, 5475, 3580, 2411, 579, 127, 354, + 549, 133, 327, 5324, -1000, 316, -1000, -1000, -69, 272, + -1000, -1000, -1000, 379, 78, 78, -1000, 314, 131, -1000, + -1000, -1000, 347, -1000, 412, 320, -1000, -1000, -1000, -1000, + -1000, 5626, -1000, -1000, -1000, -1000, -1000, 313, -34, 445, + 303, 301, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 995, + 1231, 1159, -1000, 3761, 3761, -1000, -1000, 310, 3013, -1000, + -1000, 5018, -1000, -1000, 1981, 3013, 2196, 127, -1000, -1000, + -1000, 22, 277, 22, -132, 418, 132, -1000, 3580, 171, + -1000, -1000, -1000, -1000, -1000, -1000, 609, 4867, 560, -1000, + 440, -1000, -1000, 428, 5324, 5324, 579, -1000, 127, -1000, + -1000, 291, -1000, -39, 227, -1000, 307, -1000, 443, -1000, + 128, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 268, 219, -1000, 218, -1000, -1000, -1000, + 542, -1000, -1000, -1000, -1000, 3761, 1231, 1231, -1000, -1000, + -1000, -1000, 57, 291, -1000, 291, 443, 443, -1000, 443, + 444, -1000, 443, -14, 443, -17, 291, 291, 440, -129, + -1000, 127, 3580, 607, 407, 665, -1000, -1000, -1000, 569, + 4099, 4250, 619, -1000, 440, -1000, 454, 56, -1000, -1000, + 1766, 129, -1000, -1000, 5324, -1000, 191, 556, -1000, 555, + -1000, 378, 311, 300, 1231, 1551, -1000, -1000, -1000, 68, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 3761, 291, + 257, 127, 601, 596, 4867, 4867, 4867, 4867, -1000, 510, + 508, -1000, 489, 486, 504, 5626, -1000, 290, 4099, 81, + -1000, 4712, -1000, -1000, 5475, 395, 291, 5324, -1000, 284, + -1000, -1000, 251, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -5, -1000, -1000, -1000, 3580, 3580, 665, 452, 614, + -1000, -1000, -1000, -1000, 503, -1000, 502, -1000, -1000, -1000, + -1000, -1000, 35, 24, 20, -1000, 394, -1000, -1000, -1000, + -1000, 291, 18, -140, 127, 387, 3580, 3580, -1000, -1000, + 440, 440, 440, -1000, 512, -136, -145, 127, 127, 5324, + 5324, 5324, -1000, 490, -1000, 288, -1000, 288, 288, -138, + -1000, 5324, -1000, -1000, -141, -1000, -146, -1000, +} +var yyPgo = [...]int{ + + 0, 822, 820, 819, 817, 816, 813, 69, 417, 811, + 810, 809, 808, 807, 805, 803, 802, 800, 799, 797, + 796, 791, 783, 52, 780, 774, 771, 41, 770, 44, + 769, 767, 766, 26, 326, 47, 23, 1, 765, 21, + 13, 4, 763, 761, 14, 758, 187, 757, 755, 754, + 2, 20, 753, 751, 750, 749, 42, 714, 746, 745, + 744, 741, 740, 739, 40, 3, 9, 8, 17, 738, + 18, 6, 737, 32, 735, 733, 730, 729, 29, 726, + 39, 724, 15, 38, 723, 31, 7, 27, 721, 252, + 720, 226, 249, 719, 715, 713, 45, 0, 59, 12, + 22, 709, 368, 37, 5, 695, 694, 68, 16, 25, + 19, 692, 685, 682, 680, 678, 676, 675, 166, 674, + 673, 24, 36, 670, 669, 667, 666, 665, 43, 11, + 664, 662, 661, 652, 30, 649, 33, 28, 648, 644, + 643, 10, 642, 635, 634, 50, 46, 624, 115, +} +var yyR1 = [...]int{ + + 0, 143, 144, 144, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, + 11, 26, 26, 12, 13, 14, 15, 15, 15, 15, + 18, 137, 139, 124, 124, 123, 123, 125, 125, 138, + 138, 138, 134, 112, 112, 112, 115, 115, 113, 113, + 113, 113, 113, 113, 113, 114, 114, 114, 114, 114, + 116, 116, 116, 116, 116, 117, 117, 117, 117, 117, + 117, 117, 117, 117, 117, 117, 117, 117, 117, 133, + 133, 118, 118, 128, 128, 129, 129, 129, 126, 126, + 127, 127, 130, 130, 130, 119, 119, 119, 119, 119, + 131, 131, 121, 121, 121, 122, 122, 132, 132, 132, + 132, 132, 120, 120, 135, 140, 140, 140, 140, 136, + 136, 142, 142, 141, 16, 16, 16, 16, 16, 16, + 16, 16, 17, 17, 17, 1, 19, 2, 3, 4, + 5, 5, 111, 111, 111, 20, 20, 20, 20, 20, + 20, 20, 32, 32, 21, 22, 22, 22, 22, 147, + 23, 24, 24, 25, 25, 25, 29, 29, 29, 27, + 27, 28, 28, 35, 35, 34, 34, 36, 36, 36, + 36, 101, 101, 101, 100, 100, 38, 38, 39, 39, + 40, 40, 41, 41, 41, 48, 42, 42, 42, 42, + 106, 106, 105, 105, 105, 104, 104, 43, 43, 43, + 43, 44, 44, 44, 44, 45, 45, 47, 47, 46, + 46, 49, 49, 49, 49, 50, 50, 51, 51, 37, + 37, 37, 37, 37, 37, 37, 90, 90, 53, 53, + 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, + 63, 63, 63, 63, 63, 63, 54, 54, 54, 54, + 54, 54, 54, 33, 33, 64, 64, 64, 70, 65, + 65, 57, 57, 57, 57, 57, 57, 57, 57, 57, + 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, + 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, + 57, 61, 61, 61, 59, 59, 59, 59, 59, 59, + 59, 59, 59, 60, 60, 60, 60, 60, 60, 60, + 60, 148, 148, 62, 62, 62, 62, 30, 30, 30, + 30, 30, 109, 109, 110, 110, 110, 110, 110, 110, + 110, 110, 110, 110, 110, 110, 110, 74, 74, 31, + 31, 72, 72, 73, 75, 75, 71, 71, 71, 56, + 56, 56, 56, 56, 56, 56, 58, 58, 58, 76, + 76, 77, 77, 78, 78, 79, 79, 80, 81, 81, + 81, 82, 82, 82, 82, 83, 83, 83, 55, 55, + 55, 55, 55, 55, 84, 84, 84, 84, 85, 85, + 66, 66, 68, 68, 67, 69, 86, 86, 87, 88, + 88, 91, 91, 92, 92, 89, 89, 93, 93, 93, + 93, 93, 93, 93, 93, 93, 93, 94, 94, 94, + 95, 95, 98, 98, 99, 99, 102, 102, 103, 103, + 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, + 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, + 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, + 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, + 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, + 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, + 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, + 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, + 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, + 96, 96, 96, 96, 96, 97, 97, 97, 97, 97, + 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, + 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, + 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, + 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, + 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, + 97, 145, 146, 107, 108, 108, 108, +} +var yyR2 = [...]int{ + + 0, 2, 0, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 4, 6, 7, 10, 1, 3, 1, 3, 6, + 7, 1, 1, 8, 7, 2, 2, 9, 4, 6, + 4, 4, 3, 0, 3, 0, 4, 0, 3, 1, + 3, 3, 7, 3, 1, 1, 2, 1, 1, 1, + 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, + 1, 2, 2, 2, 1, 4, 4, 2, 2, 3, + 3, 3, 3, 1, 1, 1, 1, 1, 4, 1, + 3, 0, 3, 0, 5, 0, 3, 5, 0, 1, + 0, 1, 0, 1, 2, 0, 2, 2, 2, 2, + 0, 1, 0, 3, 3, 0, 2, 0, 2, 1, + 2, 1, 0, 2, 4, 2, 3, 2, 2, 1, + 1, 1, 3, 2, 6, 7, 7, 7, 9, 7, + 7, 7, 4, 5, 4, 3, 3, 2, 2, 3, + 3, 2, 1, 1, 1, 3, 5, 5, 5, 3, + 3, 6, 0, 3, 2, 2, 2, 2, 2, 0, + 2, 0, 2, 1, 2, 2, 0, 1, 1, 0, + 1, 0, 1, 0, 1, 1, 3, 1, 2, 3, + 5, 0, 1, 2, 1, 1, 0, 2, 1, 3, + 1, 1, 1, 3, 3, 3, 3, 5, 5, 3, + 0, 1, 0, 1, 2, 1, 1, 1, 2, 2, + 1, 2, 3, 2, 3, 2, 2, 2, 1, 1, + 3, 0, 5, 5, 5, 1, 3, 0, 2, 1, + 3, 3, 2, 3, 1, 2, 0, 3, 1, 1, + 3, 3, 4, 4, 5, 3, 4, 5, 6, 2, + 1, 2, 1, 2, 1, 2, 1, 1, 1, 1, + 1, 1, 1, 0, 2, 1, 1, 1, 3, 1, + 3, 1, 1, 1, 1, 1, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 2, 2, 2, 2, 2, 3, 1, 1, 1, + 1, 4, 5, 6, 4, 4, 6, 6, 6, 9, + 7, 5, 4, 2, 2, 2, 2, 2, 2, 2, + 2, 0, 2, 4, 4, 4, 4, 0, 3, 4, + 7, 3, 1, 1, 2, 3, 3, 1, 2, 2, + 1, 2, 1, 2, 2, 1, 2, 0, 1, 0, + 2, 1, 2, 4, 0, 2, 1, 3, 5, 1, + 1, 1, 1, 1, 1, 1, 1, 2, 2, 0, + 3, 0, 2, 0, 3, 1, 3, 2, 0, 1, + 1, 0, 2, 4, 4, 0, 2, 4, 2, 1, + 3, 5, 4, 6, 1, 3, 3, 5, 0, 5, + 1, 3, 1, 2, 3, 1, 1, 3, 3, 1, + 1, 0, 2, 0, 3, 0, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 0, 0, 1, 1, +} +var yyChk = [...]int{ + + -1000, -143, -6, -7, -11, -12, -13, -14, -15, -16, + -17, -1, -19, -20, -21, -2, -3, -4, -5, -22, + -8, -9, 6, -26, 8, 9, 29, -18, 107, 108, + 109, 131, 111, 124, 47, 207, 126, 214, 215, 217, + 24, 125, 129, 130, -145, 7, 191, 50, -144, 220, + -78, 14, -25, 5, -23, -147, -23, -23, -23, -107, + -137, 50, 183, 115, 114, -89, 118, 114, 115, 183, + 114, 114, -111, 173, 107, 177, 178, 180, 53, -96, + -97, 67, 21, 23, 167, 70, 102, 15, 71, 152, + 155, 101, 192, 45, 184, 185, 182, 183, 172, 28, + 9, 24, 125, 20, 95, 109, 74, 75, 208, 128, + 22, 126, 65, 18, 48, 10, 12, 13, 119, 118, + 86, 115, 43, 7, 103, 25, 83, 39, 27, 41, + 84, 16, 186, 187, 30, 196, 97, 46, 33, 68, + 63, 49, 66, 14, 44, 212, 211, 85, 110, 191, + 42, 6, 195, 29, 124, 209, 40, 114, 73, 117, + 64, 213, 5, 120, 8, 47, 121, 188, 189, 190, + 31, 210, 72, 11, 197, 138, 132, 160, 151, 149, + 62, 127, 147, 143, 141, 26, 165, 219, 202, 142, + 136, 137, 164, 199, 32, 163, 159, 162, 135, 158, + 36, 154, 144, 17, 130, 122, 201, 140, 129, 35, + 169, 134, 156, 145, 146, 161, 133, 157, 131, 170, + 203, 153, 150, 116, 174, 175, 176, 200, 148, 171, + -102, 53, -97, -107, -107, 56, 216, -107, -107, -107, + -107, -107, -7, -82, 16, 15, -10, -8, -145, 6, + 19, 20, -29, 37, 38, -24, -89, -46, -102, 10, + 204, -138, -134, 53, -92, 119, 53, -92, 114, -91, + 119, 53, -91, -46, -46, -107, 10, 114, 183, -107, + -107, 179, -107, -107, -146, 52, -83, 18, 30, -37, + -52, 68, -57, 28, 22, -56, -53, -71, -69, -70, + 102, 91, 92, 99, 69, 103, -61, -59, -60, -62, + 55, 54, 56, 57, 58, 59, 63, 64, 65, -98, + -102, -67, -145, 41, 42, 192, 193, 196, 194, 71, + 31, 182, 190, 189, 188, 186, 187, 184, 185, 119, + 183, 97, 191, 53, -97, -79, -80, -37, -78, -7, + -23, 33, -27, 20, 61, -47, 25, -46, 29, 104, + -46, 15, 52, 51, -112, -115, -117, -116, -113, -114, + 149, 150, 102, 153, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 127, 145, 146, 147, 148, 132, + 133, 134, 135, 136, 137, 138, 140, 141, 142, 143, + 144, -102, 68, 49, -46, -46, -46, 22, 49, -102, + -46, -46, -46, -32, 10, 8, 86, 67, 66, 83, + 51, 17, -37, -54, 86, 68, 84, 85, 70, 88, + 87, 98, 91, 92, 93, 94, 95, 96, 97, 89, + 90, 101, 76, 77, 78, 79, 80, 81, 82, -90, + -145, -70, -145, 105, 106, -57, -57, -57, -57, -57, + -57, -145, 104, -7, -65, -37, -145, -145, -145, -145, + -145, -145, -145, -74, -37, -145, -148, -145, -148, -148, + -148, -148, -148, -148, -148, -145, -145, -145, -145, 51, + -81, 23, 24, -82, -146, -29, -58, -98, 56, 59, + 53, -97, -28, 40, -55, 29, 31, -7, -145, -46, + -86, -87, -71, -98, -102, -103, -102, -96, 107, 173, + -51, 11, 206, -139, -124, 219, -134, -135, -140, 122, + 120, -136, 115, 27, -130, 63, 68, -126, 170, -118, + 50, -118, -118, -118, -118, -121, 152, -121, -121, -121, + 50, -118, -118, -118, -128, 50, -128, -128, -129, 50, + -129, 22, -46, -93, 110, 219, 192, 112, 109, 113, + 108, 167, 152, 62, 28, 14, 203, 53, -46, -107, + -107, -107, -82, 181, 35, -37, -37, -63, 63, 68, + 64, 65, -37, -37, -57, -64, -67, -70, 60, 86, + 84, 85, 70, -57, -57, -57, -57, -57, -57, -57, + -57, -57, -57, -57, -57, -57, -57, -57, -109, 53, + 55, 53, -56, -56, -98, -35, 20, -34, -36, 93, + -37, -102, -99, -103, -98, -96, -146, 51, -146, -7, + -34, -34, -37, -37, -34, -27, -72, -73, 72, -98, + -146, -34, -35, -34, -34, -80, -83, -88, 18, 10, + 31, 31, -34, -85, 49, -86, -66, -68, -67, -145, + -7, -84, -98, -51, 51, 76, 104, -78, -37, -145, + -125, 167, 76, 50, 27, -136, 53, 53, -119, 28, + 63, -127, 171, 56, -121, -121, -122, 101, 29, -122, + -122, -122, -133, 55, 56, 56, -108, -145, -99, -96, + -107, -94, -95, 117, 21, 115, 27, 76, 117, 123, + 123, 123, -107, 55, 36, 63, 64, 65, -64, -57, + -57, -57, -33, 128, 67, -146, -146, -34, 51, -101, + -100, 21, -98, 55, 104, -145, 104, -37, -146, -146, + -146, 51, 121, 21, -146, -34, -75, -73, 74, -37, + -146, -146, -146, -146, -146, -46, -38, 10, 26, -85, + 51, -146, -146, -146, 51, 104, -78, -87, -37, -99, + -82, 53, -123, 28, 76, 53, -142, -141, -98, 53, + -131, 167, 55, 56, 57, 63, 52, -122, -122, 53, + 53, 102, 52, 51, 51, 52, 51, -46, -107, 53, + 152, -137, 53, -134, -33, 67, -57, -57, -146, -36, + -100, 93, -103, -35, -99, -110, 102, 149, 127, 147, + 143, 164, 154, 169, 145, 170, -109, -110, 197, -78, + 75, -37, 73, -51, -39, -40, -41, -42, -48, -70, + -145, -46, 27, -68, 31, -7, -145, -98, -98, -82, + -146, 155, 56, 52, 51, -118, -132, 122, 27, 120, + 55, 56, 56, 29, -57, 104, -146, -146, -118, -118, + -118, -129, -118, 137, -118, 137, -146, -146, -145, -31, + 195, -37, -76, 12, 51, -43, -44, -45, 39, 43, + 45, 40, 41, 42, 46, -106, 21, -39, -145, -105, + -104, 21, -102, 55, 8, -66, -7, 104, -108, 76, + -141, -120, 62, 27, 27, 52, 52, 53, 93, -121, + 53, -57, -146, 55, -77, 13, 15, -40, -41, -40, + -41, 39, 39, 39, 44, 39, 44, 39, -44, -102, + -146, -49, 47, 118, 48, -104, -86, -146, -98, 53, + 55, -30, 86, 200, -37, -65, 49, 49, 39, 39, + 115, 115, 115, -146, 198, 46, 201, -37, -37, -145, + -145, -145, 36, 199, 202, -50, -98, -50, -50, 36, + -146, 51, -146, -146, 200, -98, 201, 202, +} +var yyDef = [...]int{ + + 0, -2, 2, -2, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 383, 0, 169, 169, 169, 169, 603, 0, 0, 425, + 0, 0, 0, 0, 0, 603, 603, 0, 0, 603, + 603, 603, 603, 603, 0, 31, 32, 601, 1, 3, + 391, 0, 0, 173, 176, 171, 425, 0, 0, 35, + 36, 0, 423, 0, 423, 0, 426, 421, 0, 421, + 0, 0, 603, 528, 462, 603, 603, 0, 152, 153, + 154, 450, 451, 452, 453, 454, 455, 456, 457, 458, + 459, 460, 461, 463, 464, 465, 466, 467, 468, 469, + 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, + 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, + 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, + 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, + 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, + 520, 521, 522, 523, 524, 525, 526, 527, 529, 530, + 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, + 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, + 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, + 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, + 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, + 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, + 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, + 164, 446, 447, 147, 148, 603, 603, 151, 165, 166, + 167, 168, 25, 395, 0, 0, 383, 27, 0, 169, + 174, 175, 179, 177, 178, 170, 0, 0, 229, 0, + 0, 0, 49, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 145, 146, 155, 0, 0, 0, 159, + 160, 162, 149, 150, 26, 602, 21, 0, 0, 392, + 239, 0, 244, 246, 0, 281, 282, 283, 284, 285, + 0, 0, 0, 0, 0, 0, 307, 308, 309, 310, + 369, 370, 371, 372, 373, 374, 375, 248, 249, 366, + 0, 415, 0, 0, 0, 0, 0, 0, 0, 357, + 0, 331, 331, 331, 331, 331, 331, 331, 331, 0, + 0, 0, 0, -2, -2, 384, 385, 388, 391, 25, + 176, 0, 181, 180, 172, 0, 0, 228, 0, 0, + 237, 0, 43, 0, 102, 98, 54, 55, 91, 57, + 91, 91, 91, 91, 112, 112, 112, 112, 83, 84, + 85, 86, 87, 0, 70, 91, 91, 91, 74, 58, + 59, 60, 61, 62, 63, 64, 93, 93, 93, 95, + 95, 38, 0, 0, 40, 0, 142, 422, 0, 144, + 603, 603, 603, 391, 0, 396, 0, 0, 0, 0, + 0, 0, 242, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 266, 267, 268, 269, 270, 271, 272, 245, + 0, 259, 0, 0, 0, 301, 302, 303, 304, 305, + 0, 183, 0, 25, 0, 279, 0, 0, 0, 0, + 0, 0, 179, 0, 358, 0, 323, 0, 324, 325, + 326, 327, 328, 329, 330, 0, 183, 0, 0, 0, + 387, 389, 390, 395, 28, 179, 0, 376, 0, 0, + 442, 443, 0, 182, 408, 0, 0, -2, 0, 227, + 237, 416, 0, 366, 0, 230, 448, 449, 462, 528, + 383, 0, 0, 41, 47, 0, 50, 51, 0, 0, + 0, 0, 129, 130, 105, 103, 0, 100, 99, 56, + 0, 112, 112, 77, 78, 115, 0, 115, 115, 115, + 0, 71, 72, 73, 65, 0, 66, 67, 68, 0, + 69, 424, 604, 603, 437, 0, 434, 0, 432, 0, + 427, 428, 429, 430, 431, 433, 435, 436, 143, 156, + 157, 158, 603, 0, 0, 240, 241, 243, 260, 0, + 262, 264, 393, 394, 250, 251, 275, 276, 277, 0, + 0, 0, 0, 273, 255, 0, 286, 287, 288, 289, + 290, 291, 292, 293, 294, 295, 296, 297, 300, 342, + 343, 0, 298, 299, 306, 0, 0, 184, 185, 187, + 191, 0, 367, 0, 444, -2, 278, 0, 414, 25, + 0, 0, 0, 0, 0, 0, 364, 361, 0, 0, + 332, 0, 0, 0, 0, 386, 22, 0, 419, 420, + 377, 378, 196, 29, 0, 408, 398, 410, 412, 0, + 25, 0, 404, 383, 0, 0, 0, 391, 238, 0, + 45, 0, 0, 0, 125, 0, 127, 128, 110, 0, + 104, 53, 101, 0, 115, 115, 79, 0, 0, 80, + 81, 82, 0, 89, 0, 0, 39, 605, 606, 445, + 134, 0, 603, 438, 439, 440, 441, 0, 0, 0, + 0, 0, 161, 163, 397, 261, 263, 265, 252, 273, + 256, 0, 253, 0, 0, 247, 311, 0, 0, 188, + 192, 0, 194, 195, 0, 183, 0, 280, -2, 314, + 315, 0, 0, 0, 0, 383, 0, 362, 0, 0, + 322, 333, 334, 335, 336, 23, 237, 0, 0, 30, + 0, 413, -2, 0, 0, 0, 391, 417, 418, 367, + 34, 0, 42, 0, 0, 44, 0, 131, 91, 126, + 117, 111, 106, 107, 108, 109, 92, 75, 76, 116, + 113, 114, 88, 0, 0, 96, 0, 135, 136, 137, + 0, 139, 140, 141, 254, 0, 274, 257, 312, 186, + 193, 189, 0, 0, 368, 0, 91, 91, 347, 91, + 95, 350, 91, 352, 91, 355, 0, 0, 0, 359, + 321, 365, 0, 379, 197, 198, 200, 201, 202, 210, + 0, 212, 0, 411, 0, -2, 0, 406, 405, 33, + 604, 0, 48, 124, 0, 133, 122, 0, 119, 121, + 90, 0, 0, 0, 258, 0, 313, 316, 344, 112, + 348, 349, 351, 353, 354, 356, 318, 317, 0, 0, + 0, 363, 381, 0, 0, 0, 0, 0, 217, 0, + 0, 220, 0, 0, 0, 0, 211, 0, 0, 231, + 213, 0, 215, 216, 0, 401, 25, 0, 37, 0, + 132, 52, 0, 118, 120, 94, 97, 138, 190, 345, + 346, 337, 320, 360, 24, 0, 0, 199, 206, 0, + 209, 218, 219, 221, 0, 223, 0, 225, 226, 203, + 204, 205, 0, 0, 0, 214, 409, -2, 407, 46, + 123, 0, 0, 0, 382, 380, 0, 0, 222, 224, + 0, 0, 0, 319, 0, 0, 0, 207, 208, 0, + 0, 0, 338, 0, 341, 0, 235, 0, 0, 339, + 232, 0, 233, 234, 0, 236, 0, 340, +} +var yyTok1 = [...]int{ + + 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 69, 3, 3, 3, 96, 88, 3, + 50, 52, 93, 91, 51, 92, 104, 94, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 220, + 77, 76, 78, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 98, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 87, 3, 99, +} +var yyTok2 = [...]int{ + + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 70, 71, 72, 73, 74, 75, + 79, 80, 81, 82, 83, 84, 85, 86, 89, 90, + 95, 97, 100, 101, 102, 103, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, + 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, + 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, + 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, + 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, + 219, +} +var yyTok3 = [...]int{ + 0, +} + +var yyErrorMessages = [...]struct { + state int + token int + msg string +}{} + +//line yaccpar:1 + +/* parser for yacc output */ + +var ( + yyDebug = 0 + yyErrorVerbose = false +) + +type yyLexer interface { + Lex(lval *yySymType) int + Error(s string) +} + +type yyParser interface { + Parse(yyLexer) int + Lookahead() int +} + +type yyParserImpl struct { + lval yySymType + stack [yyInitialStackSize]yySymType + char int +} + +func (p *yyParserImpl) Lookahead() int { + return p.char +} + +func yyNewParser() yyParser { + return &yyParserImpl{} +} + +const yyFlag = -1000 + +func yyTokname(c int) string { + if c >= 1 && c-1 < len(yyToknames) { + if yyToknames[c-1] != "" { + return yyToknames[c-1] + } + } + return __yyfmt__.Sprintf("tok-%v", c) +} + +func yyStatname(s int) string { + if s >= 0 && s < len(yyStatenames) { + if yyStatenames[s] != "" { + return yyStatenames[s] + } + } + return __yyfmt__.Sprintf("state-%v", s) +} + +func yyErrorMessage(state, lookAhead int) string { + const TOKSTART = 4 + + if !yyErrorVerbose { + return "syntax error" + } + + for _, e := range yyErrorMessages { + if e.state == state && e.token == lookAhead { + return "syntax error: " + e.msg + } + } + + res := "syntax error: unexpected " + yyTokname(lookAhead) + + // To match Bison, suggest at most four expected tokens. + expected := make([]int, 0, 4) + + // Look for shiftable tokens. + base := yyPact[state] + for tok := TOKSTART; tok-1 < len(yyToknames); tok++ { + if n := base + tok; n >= 0 && n < yyLast && yyChk[yyAct[n]] == tok { + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + } + + if yyDef[state] == -2 { + i := 0 + for yyExca[i] != -1 || yyExca[i+1] != state { + i += 2 + } + + // Look for tokens that we accept or reduce. + for i += 2; yyExca[i] >= 0; i += 2 { + tok := yyExca[i] + if tok < TOKSTART || yyExca[i+1] == 0 { + continue + } + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + + // If the default action is to accept or reduce, give up. + if yyExca[i+1] != 0 { + return res + } + } + + for i, tok := range expected { + if i == 0 { + res += ", expecting " + } else { + res += " or " + } + res += yyTokname(tok) + } + return res +} + +func yylex1(lex yyLexer, lval *yySymType) (char, token int) { + token = 0 + char = lex.Lex(lval) + if char <= 0 { + token = yyTok1[0] + goto out + } + if char < len(yyTok1) { + token = yyTok1[char] + goto out + } + if char >= yyPrivate { + if char < yyPrivate+len(yyTok2) { + token = yyTok2[char-yyPrivate] + goto out + } + } + for i := 0; i < len(yyTok3); i += 2 { + token = yyTok3[i+0] + if token == char { + token = yyTok3[i+1] + goto out + } + } + +out: + if token == 0 { + token = yyTok2[1] /* unknown char */ + } + if yyDebug >= 3 { + __yyfmt__.Printf("lex %s(%d)\n", yyTokname(token), uint(char)) + } + return char, token +} + +func yyParse(yylex yyLexer) int { + return yyNewParser().Parse(yylex) +} + +func (yyrcvr *yyParserImpl) Parse(yylex yyLexer) int { + var yyn int + var yyVAL yySymType + var yyDollar []yySymType + _ = yyDollar // silence set and not used + yyS := yyrcvr.stack[:] + + Nerrs := 0 /* number of errors */ + Errflag := 0 /* error recovery flag */ + yystate := 0 + yyrcvr.char = -1 + yytoken := -1 // yyrcvr.char translated into internal numbering + defer func() { + // Make sure we report no lookahead when not parsing. + yystate = -1 + yyrcvr.char = -1 + yytoken = -1 + }() + yyp := -1 + goto yystack + +ret0: + return 0 + +ret1: + return 1 + +yystack: + /* put a state and value onto the stack */ + if yyDebug >= 4 { + __yyfmt__.Printf("char %v in %v\n", yyTokname(yytoken), yyStatname(yystate)) + } + + yyp++ + if yyp >= len(yyS) { + nyys := make([]yySymType, len(yyS)*2) + copy(nyys, yyS) + yyS = nyys + } + yyS[yyp] = yyVAL + yyS[yyp].yys = yystate + +yynewstate: + yyn = yyPact[yystate] + if yyn <= yyFlag { + goto yydefault /* simple state */ + } + if yyrcvr.char < 0 { + yyrcvr.char, yytoken = yylex1(yylex, &yyrcvr.lval) + } + yyn += yytoken + if yyn < 0 || yyn >= yyLast { + goto yydefault + } + yyn = yyAct[yyn] + if yyChk[yyn] == yytoken { /* valid shift */ + yyrcvr.char = -1 + yytoken = -1 + yyVAL = yyrcvr.lval + yystate = yyn + if Errflag > 0 { + Errflag-- + } + goto yystack + } + +yydefault: + /* default state action */ + yyn = yyDef[yystate] + if yyn == -2 { + if yyrcvr.char < 0 { + yyrcvr.char, yytoken = yylex1(yylex, &yyrcvr.lval) + } + + /* look through exception table */ + xi := 0 + for { + if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate { + break + } + xi += 2 + } + for xi += 2; ; xi += 2 { + yyn = yyExca[xi+0] + if yyn < 0 || yyn == yytoken { + break + } + } + yyn = yyExca[xi+1] + if yyn < 0 { + goto ret0 + } + } + if yyn == 0 { + /* error ... attempt to resume parsing */ + switch Errflag { + case 0: /* brand new error */ + yylex.Error(yyErrorMessage(yystate, yytoken)) + Nerrs++ + if yyDebug >= 1 { + __yyfmt__.Printf("%s", yyStatname(yystate)) + __yyfmt__.Printf(" saw %s\n", yyTokname(yytoken)) + } + fallthrough + + case 1, 2: /* incompletely recovered error ... try again */ + Errflag = 3 + + /* find a state where "error" is a legal shift action */ + for yyp >= 0 { + yyn = yyPact[yyS[yyp].yys] + yyErrCode + if yyn >= 0 && yyn < yyLast { + yystate = yyAct[yyn] /* simulate a shift of "error" */ + if yyChk[yystate] == yyErrCode { + goto yystack + } + } + + /* the current p has no shift on "error", pop stack */ + if yyDebug >= 2 { + __yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys) + } + yyp-- + } + /* there is no state on the stack with an error shift ... abort */ + goto ret1 + + case 3: /* no shift yet; clobber input char */ + if yyDebug >= 2 { + __yyfmt__.Printf("error recovery discards %s\n", yyTokname(yytoken)) + } + if yytoken == yyEofCode { + goto ret1 + } + yyrcvr.char = -1 + yytoken = -1 + goto yynewstate /* try again in the same state */ + } + } + + /* reduction by production yyn */ + if yyDebug >= 2 { + __yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate)) + } + + yynt := yyn + yypt := yyp + _ = yypt // guard against "declared and not used" + + yyp -= yyR2[yyn] + // yyp is now the index of $0. Perform the default action. Iff the + // reduced production is ε, $1 is possibly out of range. + if yyp+1 >= len(yyS) { + nyys := make([]yySymType, len(yyS)*2) + copy(nyys, yyS) + yyS = nyys + } + yyVAL = yyS[yyp+1] + + /* consult goto table to find next state */ + yyn = yyR1[yyn] + yyg := yyPgo[yyn] + yyj := yyg + yyS[yyp].yys + 1 + + if yyj >= yyLast { + yystate = yyAct[yyg] + } else { + yystate = yyAct[yyj] + if yyChk[yystate] != -yyn { + yystate = yyAct[yyg] + } + } + // dummy call; replaced with literal code + switch yynt { + + case 1: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:263 + { + setParseTree(yylex, yyDollar[1].statement) + } + case 2: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:268 + { + } + case 3: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:269 + { + } + case 4: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:273 + { + yyVAL.statement = yyDollar[1].selStmt + } + case 21: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:295 + { + sel := yyDollar[1].selStmt.(*Select) + sel.OrderBy = yyDollar[2].orderBy + sel.Limit = yyDollar[3].limit + sel.Lock = yyDollar[4].str + yyVAL.selStmt = sel + } + case 22: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:303 + { + yyVAL.selStmt = &Union{Type: yyDollar[2].str, Left: yyDollar[1].selStmt, Right: yyDollar[3].selStmt, OrderBy: yyDollar[4].orderBy, Limit: yyDollar[5].limit, Lock: yyDollar[6].str} + } + case 23: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:307 + { + yyVAL.selStmt = &Select{Comments: Comments(yyDollar[2].bytes2), Cache: yyDollar[3].str, SelectExprs: SelectExprs{Nextval{Expr: yyDollar[5].expr}}, From: TableExprs{&AliasedTableExpr{Expr: yyDollar[7].tableName}}} + } + case 24: + yyDollar = yyS[yypt-10 : yypt+1] + //line sql.y:314 + { + yyVAL.selStmt = &Select{Comments: Comments(yyDollar[2].bytes2), Cache: yyDollar[3].str, Distinct: yyDollar[4].str, Hints: yyDollar[5].str, SelectExprs: yyDollar[6].selectExprs, From: yyDollar[7].tableExprs, Where: NewWhere(WhereStr, yyDollar[8].expr), GroupBy: GroupBy(yyDollar[9].exprs), Having: NewWhere(HavingStr, yyDollar[10].expr)} + } + case 25: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:320 + { + yyVAL.selStmt = yyDollar[1].selStmt + } + case 26: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:324 + { + yyVAL.selStmt = &ParenSelect{Select: yyDollar[2].selStmt} + } + case 27: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:330 + { + yyVAL.selStmt = yyDollar[1].selStmt + } + case 28: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:334 + { + yyVAL.selStmt = &ParenSelect{Select: yyDollar[2].selStmt} + } + case 29: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:341 + { + // insert_data returns a *Insert pre-filled with Columns & Values + ins := yyDollar[5].ins + ins.Action = yyDollar[1].str + ins.Comments = yyDollar[2].bytes2 + ins.Ignore = yyDollar[3].str + ins.Table = yyDollar[4].tableName + ins.OnDup = OnDup(yyDollar[6].updateExprs) + yyVAL.statement = ins + } + case 30: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:352 + { + cols := make(Columns, 0, len(yyDollar[6].updateExprs)) + vals := make(ValTuple, 0, len(yyDollar[7].updateExprs)) + for _, updateList := range yyDollar[6].updateExprs { + cols = append(cols, updateList.Name.Name) + vals = append(vals, updateList.Expr) + } + yyVAL.statement = &Insert{Action: yyDollar[1].str, Comments: Comments(yyDollar[2].bytes2), Ignore: yyDollar[3].str, Table: yyDollar[4].tableName, Columns: cols, Rows: Values{vals}, OnDup: OnDup(yyDollar[7].updateExprs)} + } + case 31: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:364 + { + yyVAL.str = InsertStr + } + case 32: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:368 + { + yyVAL.str = ReplaceStr + } + case 33: + yyDollar = yyS[yypt-8 : yypt+1] + //line sql.y:374 + { + yyVAL.statement = &Update{Comments: Comments(yyDollar[2].bytes2), Table: yyDollar[3].tableName, Exprs: yyDollar[5].updateExprs, Where: NewWhere(WhereStr, yyDollar[6].expr), OrderBy: yyDollar[7].orderBy, Limit: yyDollar[8].limit} + } + case 34: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:380 + { + yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), Table: yyDollar[4].tableName, Where: NewWhere(WhereStr, yyDollar[5].expr), OrderBy: yyDollar[6].orderBy, Limit: yyDollar[7].limit} + } + case 35: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:386 + { + yyVAL.statement = &Set{} + } + case 36: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:392 + { + yyDollar[1].ddl.Action = CreateTableStr + yyDollar[1].ddl.TableSpec = yyDollar[2].TableSpec + yyVAL.statement = yyDollar[1].ddl + } + case 37: + yyDollar = yyS[yypt-9 : yypt+1] + //line sql.y:398 + { + yyDollar[1].ddl.Action = CreateTableStr + yyDollar[1].ddl.TableSpec = yyDollar[2].TableSpec + yyDollar[1].ddl.PartitionName = string(yyDollar[7].bytes) + yyVAL.statement = yyDollar[1].ddl + } + case 38: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:405 + { + var ifnotexists bool + if yyDollar[3].byt != 0 { + ifnotexists = true + } + yyVAL.statement = &DDL{Action: CreateDBStr, IfNotExists: ifnotexists, Database: yyDollar[4].tableIdent} + } + case 39: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:413 + { + // Change this to an alter statement + yyVAL.statement = &DDL{Action: CreateIndexStr, IndexName: string(yyDollar[3].bytes), Table: yyDollar[5].tableName, NewName: yyDollar[5].tableName} + } + case 40: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:420 + { + var ifnotexists bool + if yyDollar[3].byt != 0 { + ifnotexists = true + } + yyVAL.ddl = &DDL{Action: CreateTableStr, IfNotExists: ifnotexists, Table: yyDollar[4].tableName, NewName: yyDollar[4].tableName} + setDDL(yylex, yyVAL.ddl) + } + case 41: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:431 + { + yyVAL.TableSpec = yyDollar[2].TableSpec + yyVAL.TableSpec.Options = yyDollar[4].TableOptions + } + case 42: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:438 + { + yyVAL.TableOptions.Engine = yyDollar[1].str + yyVAL.TableOptions.Charset = yyDollar[3].str + } + case 43: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:444 + { + yyVAL.str = "" + } + case 44: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:448 + { + yyVAL.str = string(yyDollar[3].bytes) + } + case 45: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:453 + { + yyVAL.str = "" + } + case 46: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:457 + { + yyVAL.str = string(yyDollar[4].bytes) + } + case 47: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:462 + { + yyVAL.str = "" + } + case 48: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:466 + { + } + case 49: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:472 + { + yyVAL.TableSpec = &TableSpec{} + yyVAL.TableSpec.AddColumn(yyDollar[1].columnDefinition) + } + case 50: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:477 + { + yyVAL.TableSpec.AddColumn(yyDollar[3].columnDefinition) + } + case 51: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:481 + { + yyVAL.TableSpec.AddIndex(yyDollar[3].indexDefinition) + } + case 52: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:487 + { + yyDollar[2].columnType.NotNull = yyDollar[3].boolVal + yyDollar[2].columnType.Default = yyDollar[4].optVal + yyDollar[2].columnType.Autoincrement = yyDollar[5].boolVal + yyDollar[2].columnType.KeyOpt = yyDollar[6].colKeyOpt + yyDollar[2].columnType.Comment = yyDollar[7].optVal + yyVAL.columnDefinition = &ColumnDefinition{Name: NewColIdent(string(yyDollar[1].bytes)), Type: yyDollar[2].columnType} + } + case 53: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:497 + { + yyVAL.columnType = yyDollar[1].columnType + yyVAL.columnType.Unsigned = yyDollar[2].boolVal + yyVAL.columnType.Zerofill = yyDollar[3].boolVal + } + case 56: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:507 + { + yyVAL.columnType = yyDollar[1].columnType + yyVAL.columnType.Length = yyDollar[2].optVal + } + case 57: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:512 + { + yyVAL.columnType = yyDollar[1].columnType + } + case 58: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:518 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 59: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:522 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 60: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:526 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 61: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:530 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 62: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:534 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 63: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:538 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 64: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:542 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 65: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:548 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale + } + case 66: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:554 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale + } + case 67: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:560 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale + } + case 68: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:566 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale + } + case 69: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:572 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale + } + case 70: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:580 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 71: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:584 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + } + case 72: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:588 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + } + case 73: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:592 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + } + case 74: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:596 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 75: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:602 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal, Charset: yyDollar[3].str, Collate: yyDollar[4].str} + } + case 76: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:606 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal, Charset: yyDollar[3].str, Collate: yyDollar[4].str} + } + case 77: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:610 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + } + case 78: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:614 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + } + case 79: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:618 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} + } + case 80: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:622 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} + } + case 81: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:626 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} + } + case 82: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:630 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} + } + case 83: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:634 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 84: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:638 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 85: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:642 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 86: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:646 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 87: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:650 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 88: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:654 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), EnumValues: yyDollar[3].strs} + } + case 89: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:660 + { + yyVAL.strs = make([]string, 0, 4) + yyVAL.strs = append(yyVAL.strs, "'"+string(yyDollar[1].bytes)+"'") + } + case 90: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:665 + { + yyVAL.strs = append(yyDollar[1].strs, "'"+string(yyDollar[3].bytes)+"'") + } + case 91: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:670 + { + yyVAL.optVal = nil + } + case 92: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:674 + { + yyVAL.optVal = NewIntVal(yyDollar[2].bytes) + } + case 93: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:679 + { + yyVAL.LengthScaleOption = LengthScaleOption{} + } + case 94: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:683 + { + yyVAL.LengthScaleOption = LengthScaleOption{ + Length: NewIntVal(yyDollar[2].bytes), + Scale: NewIntVal(yyDollar[4].bytes), + } + } + case 95: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:691 + { + yyVAL.LengthScaleOption = LengthScaleOption{} + } + case 96: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:695 + { + yyVAL.LengthScaleOption = LengthScaleOption{ + Length: NewIntVal(yyDollar[2].bytes), + } + } + case 97: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:701 + { + yyVAL.LengthScaleOption = LengthScaleOption{ + Length: NewIntVal(yyDollar[2].bytes), + Scale: NewIntVal(yyDollar[4].bytes), + } + } + case 98: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:709 + { + yyVAL.boolVal = BoolVal(false) + } + case 99: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:713 + { + yyVAL.boolVal = BoolVal(true) + } + case 100: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:718 + { + yyVAL.boolVal = BoolVal(false) + } + case 101: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:722 + { + yyVAL.boolVal = BoolVal(true) + } + case 102: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:728 + { + yyVAL.boolVal = BoolVal(false) + } + case 103: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:732 + { + yyVAL.boolVal = BoolVal(false) + } + case 104: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:736 + { + yyVAL.boolVal = BoolVal(true) + } + case 105: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:741 + { + yyVAL.optVal = nil + } + case 106: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:745 + { + yyVAL.optVal = NewStrVal(yyDollar[2].bytes) + } + case 107: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:749 + { + yyVAL.optVal = NewIntVal(yyDollar[2].bytes) + } + case 108: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:753 + { + yyVAL.optVal = NewFloatVal(yyDollar[2].bytes) + } + case 109: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:757 + { + yyVAL.optVal = NewValArg(yyDollar[2].bytes) + } + case 110: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:762 + { + yyVAL.boolVal = BoolVal(false) + } + case 111: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:766 + { + yyVAL.boolVal = BoolVal(true) + } + case 112: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:771 + { + yyVAL.str = "" + } + case 113: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:775 + { + yyVAL.str = string(yyDollar[3].bytes) + } + case 114: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:779 + { + yyVAL.str = string(yyDollar[3].bytes) + } + case 115: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:784 + { + yyVAL.str = "" + } + case 116: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:788 + { + yyVAL.str = string(yyDollar[2].bytes) + } + case 117: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:793 + { + yyVAL.colKeyOpt = ColKeyNone + } + case 118: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:797 + { + yyVAL.colKeyOpt = ColKeyPrimary + } + case 119: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:801 + { + yyVAL.colKeyOpt = ColKey + } + case 120: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:805 + { + yyVAL.colKeyOpt = ColKeyUniqueKey + } + case 121: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:809 + { + yyVAL.colKeyOpt = ColKeyUnique + } + case 122: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:814 + { + yyVAL.optVal = nil + } + case 123: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:818 + { + yyVAL.optVal = NewStrVal(yyDollar[2].bytes) + } + case 124: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:824 + { + yyVAL.indexDefinition = &IndexDefinition{Info: yyDollar[1].indexInfo, Columns: yyDollar[3].indexColumns} + } + case 125: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:830 + { + yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].bytes), Name: NewColIdent("PRIMARY"), Primary: true, Unique: true} + } + case 126: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:834 + { + yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].str), Name: NewColIdent(string(yyDollar[3].bytes)), Unique: true} + } + case 127: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:838 + { + yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes), Name: NewColIdent(string(yyDollar[2].bytes)), Unique: true} + } + case 128: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:842 + { + yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].str), Name: NewColIdent(string(yyDollar[2].bytes)), Unique: false} + } + case 129: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:848 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 130: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:852 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 131: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:858 + { + yyVAL.indexColumns = []*IndexColumn{yyDollar[1].indexColumn} + } + case 132: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:862 + { + yyVAL.indexColumns = append(yyVAL.indexColumns, yyDollar[3].indexColumn) + } + case 133: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:868 + { + yyVAL.indexColumn = &IndexColumn{Column: yyDollar[1].colIdent, Length: yyDollar[2].optVal} + } + case 134: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:874 + { + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName, NewName: yyDollar[4].tableName} + } + case 135: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:878 + { + // Change this to a rename statement + yyVAL.statement = &DDL{Action: RenameStr, Table: yyDollar[4].tableName, NewName: yyDollar[7].tableName} + } + case 136: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:883 + { + // Rename an index can just be an alter + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName, NewName: yyDollar[4].tableName} + } + case 137: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:888 + { + yyVAL.statement = &DDL{Action: AlterEngineStr, Table: yyDollar[4].tableName, NewName: yyDollar[4].tableName, Engine: string(yyDollar[7].bytes)} + } + case 138: + yyDollar = yyS[yypt-9 : yypt+1] + //line sql.y:892 + { + yyVAL.statement = &DDL{Action: AlterCharsetStr, Table: yyDollar[4].tableName, NewName: yyDollar[4].tableName, Charset: string(yyDollar[9].bytes)} + } + case 139: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:896 + { + yyVAL.statement = &DDL{Action: AlterAddColumnStr, Table: yyDollar[4].tableName, NewName: yyDollar[4].tableName, TableSpec: yyDollar[7].TableSpec} + } + case 140: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:900 + { + yyVAL.statement = &DDL{Action: AlterDropColumnStr, Table: yyDollar[4].tableName, NewName: yyDollar[4].tableName, DropColumnName: string(yyDollar[7].bytes)} + } + case 141: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:904 + { + yyVAL.statement = &DDL{Action: AlterModifyColumnStr, Table: yyDollar[4].tableName, NewName: yyDollar[4].tableName, ModifyColumnDef: yyDollar[7].columnDefinition} + } + case 142: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:911 + { + var exists bool + if yyDollar[3].byt != 0 { + exists = true + } + yyVAL.statement = &DDL{Action: DropTableStr, Table: yyDollar[4].tableName, IfExists: exists} + } + case 143: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:919 + { + // Change this to an alter statement + yyVAL.statement = &DDL{Action: DropIndexStr, IndexName: string(yyDollar[3].bytes), Table: yyDollar[5].tableName, NewName: yyDollar[5].tableName} + } + case 144: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:924 + { + var exists bool + if yyDollar[3].byt != 0 { + exists = true + } + yyVAL.statement = &DDL{Action: DropDBStr, Database: yyDollar[4].tableIdent, IfExists: exists} + } + case 145: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:934 + { + yyVAL.statement = &DDL{Action: TruncateTableStr, Table: yyDollar[3].tableName, NewName: yyDollar[3].tableName} + } + case 146: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:940 + { + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[3].tableName, NewName: yyDollar[3].tableName} + } + case 147: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:946 + { + yyVAL.statement = &Xa{} + } + case 148: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:952 + { + yyVAL.statement = &Explain{} + } + case 149: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:958 + { + yyVAL.statement = &Kill{QueryID: &NumVal{raw: string(yyDollar[2].bytes)}} + } + case 150: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:964 + { + yyVAL.statement = &Transaction{Action: StartTxnStr} + } + case 151: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:968 + { + yyVAL.statement = &Transaction{Action: CommitTxnStr} + } + case 152: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:974 + { + yyVAL.str = ShowUnsupportedStr + } + case 153: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:978 + { + switch v := string(yyDollar[1].bytes); v { + case ShowDatabasesStr, ShowTablesStr, ShowEnginesStr, ShowVersionsStr, ShowProcesslistStr, ShowQueryzStr, ShowTxnzStr, ShowStatusStr: + yyVAL.str = v + default: + yyVAL.str = ShowUnsupportedStr + } + } + case 154: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:987 + { + yyVAL.str = ShowUnsupportedStr + } + case 155: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:993 + { + yyVAL.statement = &Show{Type: yyDollar[2].str} + } + case 156: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:997 + { + yyVAL.statement = &Show{Type: ShowTablesStr, Database: yyDollar[4].tableName} + } + case 157: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1001 + { + yyVAL.statement = &Show{Type: ShowCreateTableStr, Table: yyDollar[4].tableName} + } + case 158: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1005 + { + yyVAL.statement = &Show{Type: ShowCreateDatabaseStr, Database: yyDollar[4].tableName} + } + case 159: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1009 + { + yyVAL.statement = &Show{Type: ShowWarningsStr} + } + case 160: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1013 + { + yyVAL.statement = &Show{Type: ShowVariablesStr} + } + case 161: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:1017 + { + yyVAL.statement = &Show{Type: ShowBinlogEventsStr, From: yyDollar[4].str, Limit: yyDollar[5].limit} + } + case 162: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1022 + { + yyVAL.str = "" + } + case 163: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1026 + { + yyVAL.str = string(yyDollar[3].bytes) + } + case 164: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1032 + { + yyVAL.statement = &Use{DBName: yyDollar[2].tableIdent} + } + case 165: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1038 + { + yyVAL.statement = &OtherRead{} + } + case 166: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1042 + { + yyVAL.statement = &OtherRead{} + } + case 167: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1046 + { + yyVAL.statement = &OtherAdmin{} + } + case 168: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1050 + { + yyVAL.statement = &OtherAdmin{} + } + case 169: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1055 + { + setAllowComments(yylex, true) + } + case 170: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1059 + { + yyVAL.bytes2 = yyDollar[2].bytes2 + setAllowComments(yylex, false) + } + case 171: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1065 + { + yyVAL.bytes2 = nil + } + case 172: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1069 + { + yyVAL.bytes2 = append(yyDollar[1].bytes2, yyDollar[2].bytes) + } + case 173: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1075 + { + yyVAL.str = UnionStr + } + case 174: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1079 + { + yyVAL.str = UnionAllStr + } + case 175: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1083 + { + yyVAL.str = UnionDistinctStr + } + case 176: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1088 + { + yyVAL.str = "" + } + case 177: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1092 + { + yyVAL.str = SQLNoCacheStr + } + case 178: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1096 + { + yyVAL.str = SQLCacheStr + } + case 179: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1101 + { + yyVAL.str = "" + } + case 180: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1105 + { + yyVAL.str = DistinctStr + } + case 181: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1110 + { + yyVAL.str = "" + } + case 182: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1114 + { + yyVAL.str = StraightJoinHint + } + case 183: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1119 + { + yyVAL.selectExprs = nil + } + case 184: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1123 + { + yyVAL.selectExprs = yyDollar[1].selectExprs + } + case 185: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1129 + { + yyVAL.selectExprs = SelectExprs{yyDollar[1].selectExpr} + } + case 186: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1133 + { + yyVAL.selectExprs = append(yyVAL.selectExprs, yyDollar[3].selectExpr) + } + case 187: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1139 + { + yyVAL.selectExpr = &StarExpr{} + } + case 188: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1143 + { + yyVAL.selectExpr = &AliasedExpr{Expr: yyDollar[1].expr, As: yyDollar[2].colIdent} + } + case 189: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1147 + { + yyVAL.selectExpr = &StarExpr{TableName: TableName{Name: yyDollar[1].tableIdent}} + } + case 190: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1151 + { + yyVAL.selectExpr = &StarExpr{TableName: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}} + } + case 191: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1156 + { + yyVAL.colIdent = ColIdent{} + } + case 192: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1160 + { + yyVAL.colIdent = yyDollar[1].colIdent + } + case 193: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1164 + { + yyVAL.colIdent = yyDollar[2].colIdent + } + case 195: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1171 + { + yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) + } + case 196: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1176 + { + yyVAL.tableExprs = TableExprs{&AliasedTableExpr{Expr: TableName{Name: NewTableIdent("dual")}}} + } + case 197: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1180 + { + yyVAL.tableExprs = yyDollar[2].tableExprs + } + case 198: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1186 + { + yyVAL.tableExprs = TableExprs{yyDollar[1].tableExpr} + } + case 199: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1190 + { + yyVAL.tableExprs = append(yyVAL.tableExprs, yyDollar[3].tableExpr) + } + case 202: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1200 + { + yyVAL.tableExpr = yyDollar[1].aliasedTableName + } + case 203: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1204 + { + yyVAL.tableExpr = &AliasedTableExpr{Expr: yyDollar[1].subquery, As: yyDollar[3].tableIdent} + } + case 204: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1208 + { + yyVAL.tableExpr = &ParenTableExpr{Exprs: yyDollar[2].tableExprs} + } + case 205: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1214 + { + yyVAL.aliasedTableName = &AliasedTableExpr{Expr: yyDollar[1].tableName, As: yyDollar[2].tableIdent, Hints: yyDollar[3].indexHints} + } + case 206: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1227 + { + yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr} + } + case 207: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1231 + { + yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, On: yyDollar[5].expr} + } + case 208: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1235 + { + yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, On: yyDollar[5].expr} + } + case 209: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1239 + { + yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr} + } + case 210: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1244 + { + yyVAL.empty = struct{}{} + } + case 211: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1246 + { + yyVAL.empty = struct{}{} + } + case 212: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1249 + { + yyVAL.tableIdent = NewTableIdent("") + } + case 213: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1253 + { + yyVAL.tableIdent = yyDollar[1].tableIdent + } + case 214: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1257 + { + yyVAL.tableIdent = yyDollar[2].tableIdent + } + case 216: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1264 + { + yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + } + case 217: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1270 + { + yyVAL.str = JoinStr + } + case 218: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1274 + { + yyVAL.str = JoinStr + } + case 219: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1278 + { + yyVAL.str = JoinStr + } + case 220: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1282 + { + yyVAL.str = StraightJoinStr + } + case 221: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1288 + { + yyVAL.str = LeftJoinStr + } + case 222: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1292 + { + yyVAL.str = LeftJoinStr + } + case 223: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1296 + { + yyVAL.str = RightJoinStr + } + case 224: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1300 + { + yyVAL.str = RightJoinStr + } + case 225: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1306 + { + yyVAL.str = NaturalJoinStr + } + case 226: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1310 + { + if yyDollar[2].str == LeftJoinStr { + yyVAL.str = NaturalLeftJoinStr + } else { + yyVAL.str = NaturalRightJoinStr + } + } + case 227: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1320 + { + yyVAL.tableName = yyDollar[2].tableName + } + case 228: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1324 + { + yyVAL.tableName = yyDollar[1].tableName + } + case 229: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1330 + { + yyVAL.tableName = TableName{Name: yyDollar[1].tableIdent} + } + case 230: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1334 + { + yyVAL.tableName = TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent} + } + case 231: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1339 + { + yyVAL.indexHints = nil + } + case 232: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1343 + { + yyVAL.indexHints = &IndexHints{Type: UseStr, Indexes: yyDollar[4].colIdents} + } + case 233: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1347 + { + yyVAL.indexHints = &IndexHints{Type: IgnoreStr, Indexes: yyDollar[4].colIdents} + } + case 234: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1351 + { + yyVAL.indexHints = &IndexHints{Type: ForceStr, Indexes: yyDollar[4].colIdents} + } + case 235: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1357 + { + yyVAL.colIdents = []ColIdent{yyDollar[1].colIdent} + } + case 236: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1361 + { + yyVAL.colIdents = append(yyDollar[1].colIdents, yyDollar[3].colIdent) + } + case 237: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1366 + { + yyVAL.expr = nil + } + case 238: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1370 + { + yyVAL.expr = yyDollar[2].expr + } + case 239: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1376 + { + yyVAL.expr = yyDollar[1].expr + } + case 240: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1380 + { + yyVAL.expr = &AndExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} + } + case 241: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1384 + { + yyVAL.expr = &OrExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} + } + case 242: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1388 + { + yyVAL.expr = &NotExpr{Expr: yyDollar[2].expr} + } + case 243: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1392 + { + yyVAL.expr = &IsExpr{Operator: yyDollar[3].str, Expr: yyDollar[1].expr} + } + case 244: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1396 + { + yyVAL.expr = yyDollar[1].expr + } + case 245: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1400 + { + yyVAL.expr = &Default{ColName: yyDollar[2].str} + } + case 246: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1406 + { + yyVAL.str = "" + } + case 247: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1410 + { + yyVAL.str = string(yyDollar[2].bytes) + } + case 248: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1416 + { + yyVAL.boolVal = BoolVal(true) + } + case 249: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1420 + { + yyVAL.boolVal = BoolVal(false) + } + case 250: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1426 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: yyDollar[2].str, Right: yyDollar[3].expr} + } + case 251: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1430 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: InStr, Right: yyDollar[3].colTuple} + } + case 252: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1434 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotInStr, Right: yyDollar[4].colTuple} + } + case 253: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1438 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: LikeStr, Right: yyDollar[3].expr, Escape: yyDollar[4].expr} + } + case 254: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1442 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotLikeStr, Right: yyDollar[4].expr, Escape: yyDollar[5].expr} + } + case 255: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1446 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: RegexpStr, Right: yyDollar[3].expr} + } + case 256: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1450 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotRegexpStr, Right: yyDollar[4].expr} + } + case 257: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1454 + { + yyVAL.expr = &RangeCond{Left: yyDollar[1].expr, Operator: BetweenStr, From: yyDollar[3].expr, To: yyDollar[5].expr} + } + case 258: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:1458 + { + yyVAL.expr = &RangeCond{Left: yyDollar[1].expr, Operator: NotBetweenStr, From: yyDollar[4].expr, To: yyDollar[6].expr} + } + case 259: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1462 + { + yyVAL.expr = &ExistsExpr{Subquery: yyDollar[2].subquery} + } + case 260: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1468 + { + yyVAL.str = IsNullStr + } + case 261: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1472 + { + yyVAL.str = IsNotNullStr + } + case 262: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1476 + { + yyVAL.str = IsTrueStr + } + case 263: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1480 + { + yyVAL.str = IsNotTrueStr + } + case 264: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1484 + { + yyVAL.str = IsFalseStr + } + case 265: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1488 + { + yyVAL.str = IsNotFalseStr + } + case 266: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1494 + { + yyVAL.str = EqualStr + } + case 267: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1498 + { + yyVAL.str = LessThanStr + } + case 268: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1502 + { + yyVAL.str = GreaterThanStr + } + case 269: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1506 + { + yyVAL.str = LessEqualStr + } + case 270: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1510 + { + yyVAL.str = GreaterEqualStr + } + case 271: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1514 + { + yyVAL.str = NotEqualStr + } + case 272: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1518 + { + yyVAL.str = NullSafeEqualStr + } + case 273: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1523 + { + yyVAL.expr = nil + } + case 274: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1527 + { + yyVAL.expr = yyDollar[2].expr + } + case 275: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1533 + { + yyVAL.colTuple = yyDollar[1].valTuple + } + case 276: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1537 + { + yyVAL.colTuple = yyDollar[1].subquery + } + case 277: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1541 + { + yyVAL.colTuple = ListArg(yyDollar[1].bytes) + } + case 278: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1547 + { + yyVAL.subquery = &Subquery{yyDollar[2].selStmt} + } + case 279: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1553 + { + yyVAL.exprs = Exprs{yyDollar[1].expr} + } + case 280: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1557 + { + yyVAL.exprs = append(yyDollar[1].exprs, yyDollar[3].expr) + } + case 281: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1563 + { + yyVAL.expr = yyDollar[1].expr + } + case 282: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1567 + { + yyVAL.expr = yyDollar[1].boolVal + } + case 283: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1571 + { + yyVAL.expr = yyDollar[1].colName + } + case 284: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1575 + { + yyVAL.expr = yyDollar[1].expr + } + case 285: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1579 + { + yyVAL.expr = yyDollar[1].subquery + } + case 286: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1583 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitAndStr, Right: yyDollar[3].expr} + } + case 287: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1587 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitOrStr, Right: yyDollar[3].expr} + } + case 288: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1591 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitXorStr, Right: yyDollar[3].expr} + } + case 289: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1595 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: PlusStr, Right: yyDollar[3].expr} + } + case 290: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1599 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: MinusStr, Right: yyDollar[3].expr} + } + case 291: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1603 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: MultStr, Right: yyDollar[3].expr} + } + case 292: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1607 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: DivStr, Right: yyDollar[3].expr} + } + case 293: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1611 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: IntDivStr, Right: yyDollar[3].expr} + } + case 294: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1615 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ModStr, Right: yyDollar[3].expr} + } + case 295: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1619 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ModStr, Right: yyDollar[3].expr} + } + case 296: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1623 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ShiftLeftStr, Right: yyDollar[3].expr} + } + case 297: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1627 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ShiftRightStr, Right: yyDollar[3].expr} + } + case 298: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1631 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].colName, Operator: JSONExtractOp, Right: yyDollar[3].expr} + } + case 299: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1635 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].colName, Operator: JSONUnquoteExtractOp, Right: yyDollar[3].expr} + } + case 300: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1639 + { + yyVAL.expr = &CollateExpr{Expr: yyDollar[1].expr, Charset: yyDollar[3].str} + } + case 301: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1643 + { + yyVAL.expr = &UnaryExpr{Operator: BinaryStr, Expr: yyDollar[2].expr} + } + case 302: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1647 + { + if num, ok := yyDollar[2].expr.(*SQLVal); ok && num.Type == IntVal { + yyVAL.expr = num + } else { + yyVAL.expr = &UnaryExpr{Operator: UPlusStr, Expr: yyDollar[2].expr} + } + } + case 303: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1655 + { + if num, ok := yyDollar[2].expr.(*SQLVal); ok && num.Type == IntVal { + // Handle double negative + if num.Val[0] == '-' { + num.Val = num.Val[1:] + yyVAL.expr = num + } else { + yyVAL.expr = NewIntVal(append([]byte("-"), num.Val...)) + } + } else { + yyVAL.expr = &UnaryExpr{Operator: UMinusStr, Expr: yyDollar[2].expr} + } + } + case 304: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1669 + { + yyVAL.expr = &UnaryExpr{Operator: TildaStr, Expr: yyDollar[2].expr} + } + case 305: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1673 + { + yyVAL.expr = &UnaryExpr{Operator: BangStr, Expr: yyDollar[2].expr} + } + case 306: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1677 + { + // This rule prevents the usage of INTERVAL + // as a function. If support is needed for that, + // we'll need to revisit this. The solution + // will be non-trivial because of grammar conflicts. + yyVAL.expr = &IntervalExpr{Expr: yyDollar[2].expr, Unit: yyDollar[3].colIdent} + } + case 311: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1695 + { + yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Exprs: yyDollar[3].selectExprs} + } + case 312: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1699 + { + yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Distinct: true, Exprs: yyDollar[4].selectExprs} + } + case 313: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:1703 + { + yyVAL.expr = &FuncExpr{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].colIdent, Exprs: yyDollar[5].selectExprs} + } + case 314: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1713 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("left"), Exprs: yyDollar[3].selectExprs} + } + case 315: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1717 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("right"), Exprs: yyDollar[3].selectExprs} + } + case 316: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:1721 + { + yyVAL.expr = &ConvertExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].convertType} + } + case 317: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:1725 + { + yyVAL.expr = &ConvertExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].convertType} + } + case 318: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:1729 + { + yyVAL.expr = &ConvertUsingExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].str} + } + case 319: + yyDollar = yyS[yypt-9 : yypt+1] + //line sql.y:1733 + { + yyVAL.expr = &MatchExpr{Columns: yyDollar[3].selectExprs, Expr: yyDollar[7].expr, Option: yyDollar[8].str} + } + case 320: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:1737 + { + yyVAL.expr = &GroupConcatExpr{Distinct: yyDollar[3].str, Exprs: yyDollar[4].selectExprs, OrderBy: yyDollar[5].orderBy, Separator: yyDollar[6].str} + } + case 321: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1741 + { + yyVAL.expr = &CaseExpr{Expr: yyDollar[2].expr, Whens: yyDollar[3].whens, Else: yyDollar[4].expr} + } + case 322: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1745 + { + yyVAL.expr = &ValuesFuncExpr{Name: yyDollar[3].colIdent} + } + case 323: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1755 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("current_timestamp")} + } + case 324: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1759 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_timestamp")} + } + case 325: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1763 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_time")} + } + case 326: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1767 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_date")} + } + case 327: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1772 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("localtime")} + } + case 328: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1777 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("localtimestamp")} + } + case 329: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1782 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("current_date")} + } + case 330: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1787 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("current_time")} + } + case 333: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1801 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("if"), Exprs: yyDollar[3].selectExprs} + } + case 334: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1805 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("database"), Exprs: yyDollar[3].selectExprs} + } + case 335: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1809 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("mod"), Exprs: yyDollar[3].selectExprs} + } + case 336: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1813 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("replace"), Exprs: yyDollar[3].selectExprs} + } + case 337: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1819 + { + yyVAL.str = "" + } + case 338: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1823 + { + yyVAL.str = BooleanModeStr + } + case 339: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1827 + { + yyVAL.str = NaturalLanguageModeStr + } + case 340: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:1831 + { + yyVAL.str = NaturalLanguageModeWithQueryExpansionStr + } + case 341: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1835 + { + yyVAL.str = QueryExpansionStr + } + case 342: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1841 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 343: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1845 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 344: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1851 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + } + case 345: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1855 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal, Charset: yyDollar[3].str, Operator: CharacterSetStr} + } + case 346: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1859 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal, Charset: string(yyDollar[3].bytes)} + } + case 347: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1863 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 348: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1867 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + } + case 349: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1871 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + yyVAL.convertType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.convertType.Scale = yyDollar[2].LengthScaleOption.Scale + } + case 350: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1877 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 351: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1881 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + } + case 352: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1885 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 353: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1889 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 354: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1893 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + } + case 355: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1897 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 356: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1901 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 357: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1906 + { + yyVAL.expr = nil + } + case 358: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1910 + { + yyVAL.expr = yyDollar[1].expr + } + case 359: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1915 + { + yyVAL.str = string("") + } + case 360: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1919 + { + yyVAL.str = " separator '" + string(yyDollar[2].bytes) + "'" + } + case 361: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1925 + { + yyVAL.whens = []*When{yyDollar[1].when} + } + case 362: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1929 + { + yyVAL.whens = append(yyDollar[1].whens, yyDollar[2].when) + } + case 363: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1935 + { + yyVAL.when = &When{Cond: yyDollar[2].expr, Val: yyDollar[4].expr} + } + case 364: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1940 + { + yyVAL.expr = nil + } + case 365: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1944 + { + yyVAL.expr = yyDollar[2].expr + } + case 366: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1950 + { + yyVAL.colName = &ColName{Name: yyDollar[1].colIdent} + } + case 367: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1954 + { + yyVAL.colName = &ColName{Qualifier: TableName{Name: yyDollar[1].tableIdent}, Name: yyDollar[3].colIdent} + } + case 368: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1958 + { + yyVAL.colName = &ColName{Qualifier: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}, Name: yyDollar[5].colIdent} + } + case 369: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1964 + { + yyVAL.expr = NewStrVal(yyDollar[1].bytes) + } + case 370: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1968 + { + yyVAL.expr = NewHexVal(yyDollar[1].bytes) + } + case 371: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1972 + { + yyVAL.expr = NewIntVal(yyDollar[1].bytes) + } + case 372: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1976 + { + yyVAL.expr = NewFloatVal(yyDollar[1].bytes) + } + case 373: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1980 + { + yyVAL.expr = NewHexNum(yyDollar[1].bytes) + } + case 374: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1984 + { + yyVAL.expr = NewValArg(yyDollar[1].bytes) + } + case 375: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1988 + { + yyVAL.expr = &NullVal{} + } + case 376: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1994 + { + // TODO(sougou): Deprecate this construct. + if yyDollar[1].colIdent.Lowered() != "value" { + yylex.Error("expecting value after next") + return 1 + } + yyVAL.expr = NewIntVal([]byte("1")) + } + case 377: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2003 + { + yyVAL.expr = NewIntVal(yyDollar[1].bytes) + } + case 378: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2007 + { + yyVAL.expr = NewValArg(yyDollar[1].bytes) + } + case 379: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2012 + { + yyVAL.exprs = nil + } + case 380: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2016 + { + yyVAL.exprs = yyDollar[3].exprs + } + case 381: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2021 + { + yyVAL.expr = nil + } + case 382: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2025 + { + yyVAL.expr = yyDollar[2].expr + } + case 383: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2030 + { + yyVAL.orderBy = nil + } + case 384: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2034 + { + yyVAL.orderBy = yyDollar[3].orderBy + } + case 385: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2040 + { + yyVAL.orderBy = OrderBy{yyDollar[1].order} + } + case 386: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2044 + { + yyVAL.orderBy = append(yyDollar[1].orderBy, yyDollar[3].order) + } + case 387: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2050 + { + yyVAL.order = &Order{Expr: yyDollar[1].expr, Direction: yyDollar[2].str} + } + case 388: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2055 + { + yyVAL.str = AscScr + } + case 389: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2059 + { + yyVAL.str = AscScr + } + case 390: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2063 + { + yyVAL.str = DescScr + } + case 391: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2068 + { + yyVAL.limit = nil + } + case 392: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2072 + { + yyVAL.limit = &Limit{Rowcount: yyDollar[2].expr} + } + case 393: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:2076 + { + yyVAL.limit = &Limit{Offset: yyDollar[2].expr, Rowcount: yyDollar[4].expr} + } + case 394: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:2080 + { + yyVAL.limit = &Limit{Offset: yyDollar[4].expr, Rowcount: yyDollar[2].expr} + } + case 395: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2085 + { + yyVAL.str = "" + } + case 396: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2089 + { + yyVAL.str = ForUpdateStr + } + case 397: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:2093 + { + yyVAL.str = ShareModeStr + } + case 398: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2106 + { + yyVAL.ins = &Insert{Rows: yyDollar[2].values} + } + case 399: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2110 + { + yyVAL.ins = &Insert{Rows: yyDollar[1].selStmt} + } + case 400: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2114 + { + // Drop the redundant parenthesis. + yyVAL.ins = &Insert{Rows: yyDollar[2].selStmt} + } + case 401: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:2119 + { + yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[5].values} + } + case 402: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:2123 + { + yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[4].selStmt} + } + case 403: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:2127 + { + // Drop the redundant parenthesis. + yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[5].selStmt} + } + case 404: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2134 + { + yyVAL.columns = Columns{yyDollar[1].colIdent} + } + case 405: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2138 + { + yyVAL.columns = Columns{yyDollar[3].colIdent} + } + case 406: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2142 + { + yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent) + } + case 407: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:2146 + { + yyVAL.columns = append(yyVAL.columns, yyDollar[5].colIdent) + } + case 408: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2151 + { + yyVAL.updateExprs = nil + } + case 409: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:2155 + { + yyVAL.updateExprs = yyDollar[5].updateExprs + } + case 410: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2161 + { + yyVAL.values = Values{yyDollar[1].valTuple} + } + case 411: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2165 + { + yyVAL.values = append(yyDollar[1].values, yyDollar[3].valTuple) + } + case 412: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2171 + { + yyVAL.valTuple = yyDollar[1].valTuple + } + case 413: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2175 + { + yyVAL.valTuple = ValTuple{} + } + case 414: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2181 + { + yyVAL.valTuple = ValTuple(yyDollar[2].exprs) + } + case 415: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2187 + { + if len(yyDollar[1].valTuple) == 1 { + yyVAL.expr = &ParenExpr{yyDollar[1].valTuple[0]} + } else { + yyVAL.expr = yyDollar[1].valTuple + } + } + case 416: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2197 + { + yyVAL.updateExprs = UpdateExprs{yyDollar[1].updateExpr} + } + case 417: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2201 + { + yyVAL.updateExprs = append(yyDollar[1].updateExprs, yyDollar[3].updateExpr) + } + case 418: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2207 + { + yyVAL.updateExpr = &UpdateExpr{Name: yyDollar[1].colName, Expr: yyDollar[3].expr} + } + case 421: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2216 + { + yyVAL.byt = 0 + } + case 422: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2218 + { + yyVAL.byt = 1 + } + case 423: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2221 + { + yyVAL.byt = 0 + } + case 424: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2223 + { + yyVAL.byt = 1 + } + case 425: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2226 + { + yyVAL.str = "" + } + case 426: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2228 + { + yyVAL.str = IgnoreStr + } + case 427: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2232 + { + yyVAL.empty = struct{}{} + } + case 428: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2234 + { + yyVAL.empty = struct{}{} + } + case 429: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2236 + { + yyVAL.empty = struct{}{} + } + case 430: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2238 + { + yyVAL.empty = struct{}{} + } + case 431: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2240 + { + yyVAL.empty = struct{}{} + } + case 432: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2242 + { + yyVAL.empty = struct{}{} + } + case 433: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2244 + { + yyVAL.empty = struct{}{} + } + case 434: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2246 + { + yyVAL.empty = struct{}{} + } + case 435: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2248 + { + yyVAL.empty = struct{}{} + } + case 436: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2250 + { + yyVAL.empty = struct{}{} + } + case 437: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2253 + { + yyVAL.empty = struct{}{} + } + case 438: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2255 + { + yyVAL.empty = struct{}{} + } + case 439: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2257 + { + yyVAL.empty = struct{}{} + } + case 440: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2261 + { + yyVAL.empty = struct{}{} + } + case 441: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2263 + { + yyVAL.empty = struct{}{} + } + case 442: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2267 + { + yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) + } + case 443: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2271 + { + yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) + } + case 445: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2278 + { + yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) + } + case 446: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2284 + { + yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + } + case 447: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2288 + { + yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + } + case 449: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2295 + { + yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + } + case 601: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2472 + { + if incNesting(yylex) { + yylex.Error("max nesting level reached") + return 1 + } + } + case 602: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2481 + { + decNesting(yylex) + } + case 603: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2486 + { + forceEOF(yylex) + } + case 604: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2491 + { + forceEOF(yylex) + } + case 605: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2495 + { + forceEOF(yylex) + } + case 606: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2499 + { + forceEOF(yylex) + } + } + goto yystack /* stack new state and value */ +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/sql.y b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/sql.y new file mode 100644 index 00000000..4b6a9df9 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/sql.y @@ -0,0 +1,2501 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +%{ +package sqlparser + +func setParseTree(yylex interface{}, stmt Statement) { + yylex.(*Tokenizer).ParseTree = stmt +} + +func setAllowComments(yylex interface{}, allow bool) { + yylex.(*Tokenizer).AllowComments = allow +} + +func setDDL(yylex interface{}, ddl *DDL) { + yylex.(*Tokenizer).partialDDL = ddl +} + +func incNesting(yylex interface{}) bool { + yylex.(*Tokenizer).nesting++ + if yylex.(*Tokenizer).nesting == 200 { + return true + } + return false +} + +func decNesting(yylex interface{}) { + yylex.(*Tokenizer).nesting-- +} + +func forceEOF(yylex interface{}) { + yylex.(*Tokenizer).ForceEOF = true +} + +%} + +%union { + empty struct{} + statement Statement + selStmt SelectStatement + ddl *DDL + ins *Insert + byt byte + bytes []byte + bytes2 [][]byte + str string + strs []string + selectExprs SelectExprs + selectExpr SelectExpr + columns Columns + colName *ColName + tableExprs TableExprs + tableExpr TableExpr + tableName TableName + indexHints *IndexHints + expr Expr + exprs Exprs + boolVal BoolVal + colTuple ColTuple + values Values + valTuple ValTuple + subquery *Subquery + whens []*When + when *When + orderBy OrderBy + order *Order + limit *Limit + updateExprs UpdateExprs + updateExpr *UpdateExpr + colIdent ColIdent + colIdents []ColIdent + tableIdent TableIdent + convertType *ConvertType + aliasedTableName *AliasedTableExpr + TableSpec *TableSpec + TableOptions TableOptions + columnType ColumnType + colKeyOpt ColumnKeyOption + optVal *SQLVal + LengthScaleOption LengthScaleOption + columnDefinition *ColumnDefinition + indexDefinition *IndexDefinition + indexInfo *IndexInfo + indexColumn *IndexColumn + indexColumns []*IndexColumn +} + +%token LEX_ERROR +%left UNION +%token SELECT INSERT UPDATE DELETE FROM WHERE GROUP HAVING ORDER BY LIMIT OFFSET FOR +%token ALL DISTINCT AS EXISTS ASC DESC INTO DUPLICATE KEY DEFAULT SET LOCK +%token VALUES LAST_INSERT_ID +%token NEXT VALUE SHARE MODE +%token SQL_NO_CACHE SQL_CACHE +%left JOIN STRAIGHT_JOIN LEFT RIGHT INNER OUTER CROSS NATURAL USE FORCE +%left ON +%token '(' ',' ')' +%token ID HEX STRING INTEGRAL FLOAT HEXNUM VALUE_ARG LIST_ARG COMMENT COMMENT_KEYWORD +%token NULL TRUE FALSE + +// Precedence dictated by mysql. But the vitess grammar is simplified. +// Some of these operators don't conflict in our situation. Nevertheless, +// it's better to have these listed in the correct order. Also, we don't +// support all operators yet. +%left OR +%left AND +%right NOT '!' +%left BETWEEN CASE WHEN THEN ELSE END +%left '=' '<' '>' LE GE NE NULL_SAFE_EQUAL IS LIKE REGEXP IN +%left '|' +%left '&' +%left SHIFT_LEFT SHIFT_RIGHT +%left '+' '-' +%left '*' '/' DIV '%' MOD +%left '^' +%right '~' UNARY +%left COLLATE +%right BINARY +%right INTERVAL +%nonassoc '.' + +// There is no need to define precedence for the JSON +// operators because the syntax is restricted enough that +// they don't cause conflicts. +%token JSON_EXTRACT_OP JSON_UNQUOTE_EXTRACT_OP + +// DDL Tokens +%token CREATE ALTER DROP RENAME ANALYZE ADD MODIFY +%token TABLE INDEX VIEW TO IGNORE IF UNIQUE USING PRIMARY COLUMN +%token SHOW DESCRIBE EXPLAIN DATE ESCAPE REPAIR OPTIMIZE TRUNCATE + +// Type Tokens +%token BIT TINYINT SMALLINT MEDIUMINT INT INTEGER BIGINT INTNUM +%token REAL DOUBLE FLOAT_TYPE DECIMAL NUMERIC +%token TIME TIMESTAMP DATETIME YEAR +%token CHAR VARCHAR BOOL CHARACTER VARBINARY NCHAR CHARSET +%token TEXT TINYTEXT MEDIUMTEXT LONGTEXT +%token BLOB TINYBLOB MEDIUMBLOB LONGBLOB JSON ENUM + +// Type Modifiers +%token NULLX AUTO_INCREMENT APPROXNUM SIGNED UNSIGNED ZEROFILL + +// Supported SHOW tokens +%token DATABASES TABLES VITESS_KEYSPACES VITESS_SHARDS VSCHEMA_TABLES WARNINGS VARIABLES EVENTS BINLOG GTID + +// Functions +%token CURRENT_TIMESTAMP DATABASE CURRENT_DATE +%token CURRENT_TIME LOCALTIME LOCALTIMESTAMP +%token UTC_DATE UTC_TIME UTC_TIMESTAMP +%token REPLACE +%token CONVERT CAST +%token GROUP_CONCAT SEPARATOR + +// Match +%token MATCH AGAINST BOOLEAN LANGUAGE WITH QUERY EXPANSION + +// MySQL reserved words that are unused by this grammar will map to this token. +%token UNUSED + +// RadonDB +%token PARTITION PARTITIONS HASH XA +%type truncate_statement xa_statement explain_statement kill_statement transaction_statement +%token ENGINES STATUS VERSIONS PROCESSLIST QUERYZ TXNZ KILL START TRANSACTION COMMIT SESSION ENGINE + +%type command +%type select_statement base_select union_lhs union_rhs +%type insert_statement update_statement delete_statement set_statement +%type create_statement alter_statement drop_statement +%type create_table_prefix +%type analyze_statement show_statement use_statement other_statement +%type comment_opt comment_list +%type union_op insert_or_replace +%type distinct_opt straight_join_opt cache_opt match_option separator_opt binlog_from_opt +%type like_escape_opt +%type select_expression_list select_expression_list_opt +%type select_expression +%type expression +%type from_opt table_references +%type table_reference table_factor join_table +%type inner_join outer_join natural_join +%type table_name into_table_name +%type aliased_table_name +%type index_hint_list +%type index_list +%type where_expression_opt +%type condition +%type boolean_value +%type compare +%type insert_data +%type value value_expression num_val +%type function_call_keyword function_call_nonkeyword function_call_generic function_call_conflict +%type is_suffix +%type col_tuple +%type expression_list +%type tuple_list +%type row_tuple tuple_or_empty +%type tuple_expression +%type subquery +%type column_name +%type when_expression_list +%type when_expression +%type expression_opt else_expression_opt +%type group_by_opt +%type having_opt +%type order_by_opt order_list +%type order +%type asc_desc_opt +%type limit_opt +%type lock_opt +%type ins_column_list +%type on_dup_opt +%type update_list +%type update_expression +%type for_from +%type ignore_opt default_opt +%type exists_opt not_exists_opt +%type non_rename_operation to_opt index_opt +%type reserved_keyword non_reserved_keyword +%type sql_id reserved_sql_id col_alias as_ci_opt +%type table_id reserved_table_id table_alias as_opt_id +%type as_opt +%type force_eof ddl_force_eof +%type charset +%type convert_type +%type show_statement_type +%type column_type +%type int_type decimal_type numeric_type time_type char_type +%type length_opt column_default_opt column_comment_opt +%type charset_opt collate_opt charset_option engine_option autoincrement_option +%type unsigned_opt zero_fill_opt +%type float_length_opt decimal_length_opt +%type null_opt auto_increment_opt +%type column_key_opt +%type enum_values +%type column_definition +%type index_definition +%type index_or_key +%type table_spec table_column_list +%type table_option_list +%type index_info +%type index_column +%type index_column_list + +%start any_command + +%% + +any_command: + command semicolon_opt + { + setParseTree(yylex, $1) + } + +semicolon_opt: +/*empty*/ {} +| ';' {} + +command: + select_statement + { + $$ = $1 + } +| insert_statement +| update_statement +| delete_statement +| set_statement +| create_statement +| alter_statement +| drop_statement +| truncate_statement +| analyze_statement +| show_statement +| use_statement +| xa_statement +| explain_statement +| kill_statement +| transaction_statement +| other_statement + +select_statement: + base_select order_by_opt limit_opt lock_opt + { + sel := $1.(*Select) + sel.OrderBy = $2 + sel.Limit = $3 + sel.Lock = $4 + $$ = sel + } +| union_lhs union_op union_rhs order_by_opt limit_opt lock_opt + { + $$ = &Union{Type: $2, Left: $1, Right: $3, OrderBy: $4, Limit: $5, Lock: $6} + } +| SELECT comment_opt cache_opt NEXT num_val for_from table_name + { + $$ = &Select{Comments: Comments($2), Cache: $3, SelectExprs: SelectExprs{Nextval{Expr: $5}}, From: TableExprs{&AliasedTableExpr{Expr: $7}}} + } + +// base_select is an unparenthesized SELECT with no order by clause or beyond. +base_select: + SELECT comment_opt cache_opt distinct_opt straight_join_opt select_expression_list from_opt where_expression_opt group_by_opt having_opt + { + $$ = &Select{Comments: Comments($2), Cache: $3, Distinct: $4, Hints: $5, SelectExprs: $6, From: $7, Where: NewWhere(WhereStr, $8), GroupBy: GroupBy($9), Having: NewWhere(HavingStr, $10)} + } + +union_lhs: + select_statement + { + $$ = $1 + } +| openb select_statement closeb + { + $$ = &ParenSelect{Select: $2} + } + +union_rhs: + base_select + { + $$ = $1 + } +| openb select_statement closeb + { + $$ = &ParenSelect{Select: $2} + } + + +insert_statement: + insert_or_replace comment_opt ignore_opt into_table_name insert_data on_dup_opt + { + // insert_data returns a *Insert pre-filled with Columns & Values + ins := $5 + ins.Action = $1 + ins.Comments = $2 + ins.Ignore = $3 + ins.Table = $4 + ins.OnDup = OnDup($6) + $$ = ins + } +| insert_or_replace comment_opt ignore_opt into_table_name SET update_list on_dup_opt + { + cols := make(Columns, 0, len($6)) + vals := make(ValTuple, 0, len($7)) + for _, updateList := range $6 { + cols = append(cols, updateList.Name.Name) + vals = append(vals, updateList.Expr) + } + $$ = &Insert{Action: $1, Comments: Comments($2), Ignore: $3, Table: $4, Columns: cols, Rows: Values{vals}, OnDup: OnDup($7)} + } + +insert_or_replace: + INSERT + { + $$ = InsertStr + } +| REPLACE + { + $$ = ReplaceStr + } + +update_statement: + UPDATE comment_opt table_name SET update_list where_expression_opt order_by_opt limit_opt + { + $$ = &Update{Comments: Comments($2), Table: $3, Exprs: $5, Where: NewWhere(WhereStr, $6), OrderBy: $7, Limit: $8} + } + +delete_statement: + DELETE comment_opt FROM table_name where_expression_opt order_by_opt limit_opt + { + $$ = &Delete{Comments: Comments($2), Table: $4, Where: NewWhere(WhereStr, $5), OrderBy: $6, Limit: $7} + } + +set_statement: + SET force_eof + { + $$ = &Set{} + } + +create_statement: + create_table_prefix table_spec + { + $1.Action = CreateTableStr + $1.TableSpec = $2 + $$ = $1 + } +| create_table_prefix table_spec PARTITION BY HASH openb ID closeb ddl_force_eof + { + $1.Action = CreateTableStr + $1.TableSpec = $2 + $1.PartitionName = string($7) + $$ = $1 + } +| CREATE DATABASE not_exists_opt table_id + { + var ifnotexists bool + if $3 != 0 { + ifnotexists= true + } + $$ = &DDL{Action: CreateDBStr, IfNotExists:ifnotexists, Database: $4} + } +| CREATE INDEX ID ON table_name ddl_force_eof + { + // Change this to an alter statement + $$ = &DDL{Action: CreateIndexStr, IndexName:string($3), Table: $5, NewName:$5} + } + +create_table_prefix: + CREATE TABLE not_exists_opt table_name + { + var ifnotexists bool + if $3 != 0 { + ifnotexists= true + } + $$ = &DDL{Action: CreateTableStr, IfNotExists:ifnotexists, Table: $4, NewName: $4} + setDDL(yylex, $$) + } + +table_spec: + '(' table_column_list ')' table_option_list + { + $$ = $2 + $$.Options = $4 + } + +table_option_list: + engine_option autoincrement_option charset_option + { + $$.Engine = $1 + $$.Charset = $3 + } + +engine_option: + { + $$="" + } +| ENGINE '=' ID + { + $$ = string($3) + } + +charset_option: + { + $$="" + } +| DEFAULT CHARSET '=' ID + { + $$ = string($4) + } + +autoincrement_option: + { + $$="" + } +| AUTO_INCREMENT '=' INTEGRAL + { + } + + +table_column_list: + column_definition + { + $$ = &TableSpec{} + $$.AddColumn($1) + } +| table_column_list ',' column_definition + { + $$.AddColumn($3) + } +| table_column_list ',' index_definition + { + $$.AddIndex($3) + } + +column_definition: + ID column_type null_opt column_default_opt auto_increment_opt column_key_opt column_comment_opt + { + $2.NotNull = $3 + $2.Default = $4 + $2.Autoincrement = $5 + $2.KeyOpt = $6 + $2.Comment = $7 + $$ = &ColumnDefinition{Name: NewColIdent(string($1)), Type: $2} + } +column_type: + numeric_type unsigned_opt zero_fill_opt + { + $$ = $1 + $$.Unsigned = $2 + $$.Zerofill = $3 + } +| char_type +| time_type + +numeric_type: + int_type length_opt + { + $$ = $1 + $$.Length = $2 + } +| decimal_type + { + $$ = $1 + } + +int_type: + BIT + { + $$ = ColumnType{Type: string($1)} + } +| TINYINT + { + $$ = ColumnType{Type: string($1)} + } +| SMALLINT + { + $$ = ColumnType{Type: string($1)} + } +| MEDIUMINT + { + $$ = ColumnType{Type: string($1)} + } +| INT + { + $$ = ColumnType{Type: string($1)} + } +| INTEGER + { + $$ = ColumnType{Type: string($1)} + } +| BIGINT + { + $$ = ColumnType{Type: string($1)} + } + +decimal_type: +REAL float_length_opt + { + $$ = ColumnType{Type: string($1)} + $$.Length = $2.Length + $$.Scale = $2.Scale + } +| DOUBLE float_length_opt + { + $$ = ColumnType{Type: string($1)} + $$.Length = $2.Length + $$.Scale = $2.Scale + } +| FLOAT_TYPE float_length_opt + { + $$ = ColumnType{Type: string($1)} + $$.Length = $2.Length + $$.Scale = $2.Scale + } +| DECIMAL decimal_length_opt + { + $$ = ColumnType{Type: string($1)} + $$.Length = $2.Length + $$.Scale = $2.Scale + } +| NUMERIC decimal_length_opt + { + $$ = ColumnType{Type: string($1)} + $$.Length = $2.Length + $$.Scale = $2.Scale + } + +time_type: + DATE + { + $$ = ColumnType{Type: string($1)} + } +| TIME length_opt + { + $$ = ColumnType{Type: string($1), Length: $2} + } +| TIMESTAMP length_opt + { + $$ = ColumnType{Type: string($1), Length: $2} + } +| DATETIME length_opt + { + $$ = ColumnType{Type: string($1), Length: $2} + } +| YEAR + { + $$ = ColumnType{Type: string($1)} + } + +char_type: + CHAR length_opt charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), Length: $2, Charset: $3, Collate: $4} + } +| VARCHAR length_opt charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), Length: $2, Charset: $3, Collate: $4} + } +| BINARY length_opt + { + $$ = ColumnType{Type: string($1), Length: $2} + } +| VARBINARY length_opt + { + $$ = ColumnType{Type: string($1), Length: $2} + } +| TEXT charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), Charset: $2, Collate: $3} + } +| TINYTEXT charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), Charset: $2, Collate: $3} + } +| MEDIUMTEXT charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), Charset: $2, Collate: $3} + } +| LONGTEXT charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), Charset: $2, Collate: $3} + } +| BLOB + { + $$ = ColumnType{Type: string($1)} + } +| TINYBLOB + { + $$ = ColumnType{Type: string($1)} + } +| MEDIUMBLOB + { + $$ = ColumnType{Type: string($1)} + } +| LONGBLOB + { + $$ = ColumnType{Type: string($1)} + } +| JSON + { + $$ = ColumnType{Type: string($1)} + } +| ENUM '(' enum_values ')' + { + $$ = ColumnType{Type: string($1), EnumValues: $3} + } + +enum_values: + STRING + { + $$ = make([]string, 0, 4) + $$ = append($$, "'" + string($1) + "'") + } +| enum_values ',' STRING + { + $$ = append($1, "'" + string($3) + "'") + } + +length_opt: + { + $$ = nil + } +| '(' INTEGRAL ')' + { + $$ = NewIntVal($2) + } + +float_length_opt: + { + $$ = LengthScaleOption{} + } +| '(' INTEGRAL ',' INTEGRAL ')' + { + $$ = LengthScaleOption{ + Length: NewIntVal($2), + Scale: NewIntVal($4), + } + } + +decimal_length_opt: + { + $$ = LengthScaleOption{} + } +| '(' INTEGRAL ')' + { + $$ = LengthScaleOption{ + Length: NewIntVal($2), + } + } +| '(' INTEGRAL ',' INTEGRAL ')' + { + $$ = LengthScaleOption{ + Length: NewIntVal($2), + Scale: NewIntVal($4), + } + } + +unsigned_opt: + { + $$ = BoolVal(false) + } +| UNSIGNED + { + $$ = BoolVal(true) + } + +zero_fill_opt: + { + $$ = BoolVal(false) + } +| ZEROFILL + { + $$ = BoolVal(true) + } + +// Null opt returns false to mean NULL (i.e. the default) and true for NOT NULL +null_opt: + { + $$ = BoolVal(false) + } +| NULL + { + $$ = BoolVal(false) + } +| NOT NULL + { + $$ = BoolVal(true) + } + +column_default_opt: + { + $$ = nil + } +| DEFAULT STRING + { + $$ = NewStrVal($2) + } +| DEFAULT INTEGRAL + { + $$ = NewIntVal($2) + } +| DEFAULT FLOAT + { + $$ = NewFloatVal($2) + } +| DEFAULT NULL + { + $$ = NewValArg($2) + } + +auto_increment_opt: + { + $$ = BoolVal(false) + } +| AUTO_INCREMENT + { + $$ = BoolVal(true) + } + +charset_opt: + { + $$ = "" + } +| CHARACTER SET ID + { + $$ = string($3) + } +| CHARACTER SET BINARY + { + $$ = string($3) + } + +collate_opt: + { + $$ = "" + } +| COLLATE ID + { + $$ = string($2) + } + +column_key_opt: + { + $$ = ColKeyNone + } +| PRIMARY KEY + { + $$ = ColKeyPrimary + } +| KEY + { + $$ = ColKey + } +| UNIQUE KEY + { + $$ = ColKeyUniqueKey + } +| UNIQUE + { + $$ = ColKeyUnique + } + +column_comment_opt: + { + $$ = nil + } +| COMMENT_KEYWORD STRING + { + $$ = NewStrVal($2) + } + +index_definition: + index_info '(' index_column_list ')' + { + $$ = &IndexDefinition{Info: $1, Columns: $3} + } + +index_info: + PRIMARY KEY + { + $$ = &IndexInfo{Type: string($1) + " " + string($2), Name: NewColIdent("PRIMARY"), Primary: true, Unique: true} + } +| UNIQUE index_or_key ID + { + $$ = &IndexInfo{Type: string($1) + " " + string($2), Name: NewColIdent(string($3)), Unique: true} + } +| UNIQUE ID + { + $$ = &IndexInfo{Type: string($1), Name: NewColIdent(string($2)), Unique: true} + } +| index_or_key ID + { + $$ = &IndexInfo{Type: string($1), Name: NewColIdent(string($2)), Unique: false} + } + +index_or_key: + INDEX + { + $$ = string($1) + } + | KEY + { + $$ = string($1) + } + +index_column_list: + index_column + { + $$ = []*IndexColumn{$1} + } +| index_column_list ',' index_column + { + $$ = append($$, $3) + } + +index_column: + sql_id length_opt + { + $$ = &IndexColumn{Column: $1, Length: $2} + } + +alter_statement: + ALTER ignore_opt TABLE table_name non_rename_operation force_eof + { + $$ = &DDL{Action: AlterStr, Table: $4, NewName: $4} + } +| ALTER ignore_opt TABLE table_name RENAME to_opt table_name + { + // Change this to a rename statement + $$ = &DDL{Action: RenameStr, Table: $4, NewName: $7} + } +| ALTER ignore_opt TABLE table_name RENAME index_opt force_eof + { + // Rename an index can just be an alter + $$ = &DDL{Action: AlterStr, Table: $4, NewName: $4} + } +| ALTER ignore_opt TABLE table_name ENGINE '=' ID + { + $$ = &DDL{Action: AlterEngineStr, Table: $4, NewName:$4, Engine: string($7)} + } +| ALTER ignore_opt TABLE table_name CONVERT TO CHARACTER SET ID + { + $$ = &DDL{Action: AlterCharsetStr, Table: $4, NewName:$4, Charset: string($9)} + } +| ALTER ignore_opt TABLE table_name ADD COLUMN table_spec + { + $$ = &DDL{Action: AlterAddColumnStr, Table: $4, NewName:$4, TableSpec:$7} + } +| ALTER ignore_opt TABLE table_name DROP COLUMN ID + { + $$ = &DDL{Action: AlterDropColumnStr, Table: $4, NewName:$4, DropColumnName:string($7)} + } +| ALTER ignore_opt TABLE table_name MODIFY COLUMN column_definition + { + $$ = &DDL{Action: AlterModifyColumnStr, Table: $4, NewName:$4, ModifyColumnDef:$7} + } + + +drop_statement: + DROP TABLE exists_opt table_name + { + var exists bool + if $3 != 0 { + exists = true + } + $$ = &DDL{Action: DropTableStr, Table: $4, IfExists: exists} + } +| DROP INDEX ID ON table_name + { + // Change this to an alter statement + $$ = &DDL{Action: DropIndexStr, IndexName:string($3), Table: $5, NewName: $5} + } +| DROP DATABASE exists_opt table_id + { + var exists bool + if $3 != 0 { + exists = true + } + $$ = &DDL{Action: DropDBStr, Database: $4, IfExists: exists} + } + +truncate_statement: + TRUNCATE TABLE table_name + { + $$ = &DDL{Action: TruncateTableStr, Table: $3, NewName: $3} + } + +analyze_statement: + ANALYZE TABLE table_name + { + $$ = &DDL{Action: AlterStr, Table: $3, NewName: $3} + } + +xa_statement: + XA force_eof + { + $$ = &Xa{} + } + +explain_statement: + EXPLAIN force_eof + { + $$ = &Explain{} + } + +kill_statement: + KILL INTEGRAL force_eof + { + $$ = &Kill{ QueryID: &NumVal{raw: string($2)}} + } + +transaction_statement: + START TRANSACTION force_eof + { + $$ = &Transaction{ Action: StartTxnStr} + } +| COMMIT force_eof + { + $$ = &Transaction{ Action: CommitTxnStr} + } + +show_statement_type: + ID + { + $$ = ShowUnsupportedStr + } +| reserved_keyword + { + switch v := string($1); v { + case ShowDatabasesStr, ShowTablesStr, ShowEnginesStr, ShowVersionsStr, ShowProcesslistStr, ShowQueryzStr, ShowTxnzStr, ShowStatusStr: + $$ = v + default: + $$ = ShowUnsupportedStr + } + } +| non_reserved_keyword +{ + $$ = ShowUnsupportedStr +} + +show_statement: + SHOW show_statement_type force_eof + { + $$ = &Show{Type: $2} + } +| SHOW TABLES FROM table_name force_eof + { + $$ = &Show{Type: ShowTablesStr, Database: $4} + } +| SHOW CREATE TABLE table_name force_eof + { + $$ = &Show{Type: ShowCreateTableStr, Table: $4} + } +| SHOW CREATE DATABASE table_name force_eof + { + $$ = &Show{Type: ShowCreateDatabaseStr, Database: $4} + } +| SHOW WARNINGS force_eof + { + $$ = &Show{Type: ShowWarningsStr} + } +| SHOW VARIABLES force_eof + { + $$ = &Show{Type: ShowVariablesStr} + } +| SHOW BINLOG EVENTS binlog_from_opt limit_opt force_eof + { + $$ = &Show{Type: ShowBinlogEventsStr, From: $4, Limit: $5 } + } + +binlog_from_opt: + { + $$ = "" + } +| FROM GTID STRING + { + $$ = string($3) + } + +use_statement: + USE table_id + { + $$ = &Use{DBName: $2} + } + +other_statement: + DESC force_eof + { + $$ = &OtherRead{} + } +| DESCRIBE force_eof + { + $$ = &OtherRead{} + } +| REPAIR force_eof + { + $$ = &OtherAdmin{} + } +| OPTIMIZE force_eof + { + $$ = &OtherAdmin{} + } + +comment_opt: + { + setAllowComments(yylex, true) + } + comment_list + { + $$ = $2 + setAllowComments(yylex, false) + } + +comment_list: + { + $$ = nil + } +| comment_list COMMENT + { + $$ = append($1, $2) + } + +union_op: + UNION + { + $$ = UnionStr + } +| UNION ALL + { + $$ = UnionAllStr + } +| UNION DISTINCT + { + $$ = UnionDistinctStr + } + +cache_opt: +{ + $$ = "" +} +| SQL_NO_CACHE +{ + $$ = SQLNoCacheStr +} +| SQL_CACHE +{ + $$ = SQLCacheStr +} + +distinct_opt: + { + $$ = "" + } +| DISTINCT + { + $$ = DistinctStr + } + +straight_join_opt: + { + $$ = "" + } +| STRAIGHT_JOIN + { + $$ = StraightJoinHint + } + +select_expression_list_opt: + { + $$ = nil + } +| select_expression_list + { + $$ = $1 + } + +select_expression_list: + select_expression + { + $$ = SelectExprs{$1} + } +| select_expression_list ',' select_expression + { + $$ = append($$, $3) + } + +select_expression: + '*' + { + $$ = &StarExpr{} + } +| expression as_ci_opt + { + $$ = &AliasedExpr{Expr: $1, As: $2} + } +| table_id '.' '*' + { + $$ = &StarExpr{TableName: TableName{Name: $1}} + } +| table_id '.' reserved_table_id '.' '*' + { + $$ = &StarExpr{TableName: TableName{Qualifier: $1, Name: $3}} + } + +as_ci_opt: + { + $$ = ColIdent{} + } +| col_alias + { + $$ = $1 + } +| AS col_alias + { + $$ = $2 + } + +col_alias: + sql_id +| STRING + { + $$ = NewColIdent(string($1)) + } + +from_opt: + { + $$ = TableExprs{&AliasedTableExpr{Expr:TableName{Name: NewTableIdent("dual")}}} + } +| FROM table_references + { + $$ = $2 + } + +table_references: + table_reference + { + $$ = TableExprs{$1} + } +| table_references ',' table_reference + { + $$ = append($$, $3) + } + +table_reference: + table_factor +| join_table + +table_factor: + aliased_table_name + { + $$ = $1 + } +| subquery as_opt table_id + { + $$ = &AliasedTableExpr{Expr:$1, As: $3} + } +| openb table_references closeb + { + $$ = &ParenTableExpr{Exprs: $2} + } + +aliased_table_name: +table_name as_opt_id index_hint_list + { + $$ = &AliasedTableExpr{Expr:$1, As: $2, Hints: $3} + } + +// There is a grammar conflict here: +// 1: INSERT INTO a SELECT * FROM b JOIN c ON b.i = c.i +// 2: INSERT INTO a SELECT * FROM b JOIN c ON DUPLICATE KEY UPDATE a.i = 1 +// When yacc encounters the ON clause, it cannot determine which way to +// resolve. The %prec override below makes the parser choose the +// first construct, which automatically makes the second construct a +// syntax error. This is the same behavior as MySQL. +join_table: + table_reference inner_join table_factor %prec JOIN + { + $$ = &JoinTableExpr{LeftExpr: $1, Join: $2, RightExpr: $3} + } +| table_reference inner_join table_factor ON expression + { + $$ = &JoinTableExpr{LeftExpr: $1, Join: $2, RightExpr: $3, On: $5} + } +| table_reference outer_join table_reference ON expression + { + $$ = &JoinTableExpr{LeftExpr: $1, Join: $2, RightExpr: $3, On: $5} + } +| table_reference natural_join table_factor + { + $$ = &JoinTableExpr{LeftExpr: $1, Join: $2, RightExpr: $3} + } + +as_opt: + { $$ = struct{}{} } +| AS + { $$ = struct{}{} } + +as_opt_id: + { + $$ = NewTableIdent("") + } +| table_alias + { + $$ = $1 + } +| AS table_alias + { + $$ = $2 + } + +table_alias: + table_id +| STRING + { + $$ = NewTableIdent(string($1)) + } + +inner_join: + JOIN + { + $$ = JoinStr + } +| INNER JOIN + { + $$ = JoinStr + } +| CROSS JOIN + { + $$ = JoinStr + } +| STRAIGHT_JOIN + { + $$ = StraightJoinStr + } + +outer_join: + LEFT JOIN + { + $$ = LeftJoinStr + } +| LEFT OUTER JOIN + { + $$ = LeftJoinStr + } +| RIGHT JOIN + { + $$ = RightJoinStr + } +| RIGHT OUTER JOIN + { + $$ = RightJoinStr + } + +natural_join: + NATURAL JOIN + { + $$ = NaturalJoinStr + } +| NATURAL outer_join + { + if $2 == LeftJoinStr { + $$ = NaturalLeftJoinStr + } else { + $$ = NaturalRightJoinStr + } + } + +into_table_name: + INTO table_name + { + $$ = $2 + } +| table_name + { + $$ = $1 + } + +table_name: + table_id + { + $$ = TableName{Name: $1} + } +| table_id '.' reserved_table_id + { + $$ = TableName{Qualifier: $1, Name: $3} + } + +index_hint_list: + { + $$ = nil + } +| USE INDEX openb index_list closeb + { + $$ = &IndexHints{Type: UseStr, Indexes: $4} + } +| IGNORE INDEX openb index_list closeb + { + $$ = &IndexHints{Type: IgnoreStr, Indexes: $4} + } +| FORCE INDEX openb index_list closeb + { + $$ = &IndexHints{Type: ForceStr, Indexes: $4} + } + +index_list: + sql_id + { + $$ = []ColIdent{$1} + } +| index_list ',' sql_id + { + $$ = append($1, $3) + } + +where_expression_opt: + { + $$ = nil + } +| WHERE expression + { + $$ = $2 + } + +expression: + condition + { + $$ = $1 + } +| expression AND expression + { + $$ = &AndExpr{Left: $1, Right: $3} + } +| expression OR expression + { + $$ = &OrExpr{Left: $1, Right: $3} + } +| NOT expression + { + $$ = &NotExpr{Expr: $2} + } +| expression IS is_suffix + { + $$ = &IsExpr{Operator: $3, Expr: $1} + } +| value_expression + { + $$ = $1 + } +| DEFAULT default_opt + { + $$ = &Default{ColName: $2} + } + +default_opt: + /* empty */ + { + $$ = "" + } +| openb ID closeb + { + $$ = string($2) + } + +boolean_value: + TRUE + { + $$ = BoolVal(true) + } +| FALSE + { + $$ = BoolVal(false) + } + +condition: + value_expression compare value_expression + { + $$ = &ComparisonExpr{Left: $1, Operator: $2, Right: $3} + } +| value_expression IN col_tuple + { + $$ = &ComparisonExpr{Left: $1, Operator: InStr, Right: $3} + } +| value_expression NOT IN col_tuple + { + $$ = &ComparisonExpr{Left: $1, Operator: NotInStr, Right: $4} + } +| value_expression LIKE value_expression like_escape_opt + { + $$ = &ComparisonExpr{Left: $1, Operator: LikeStr, Right: $3, Escape: $4} + } +| value_expression NOT LIKE value_expression like_escape_opt + { + $$ = &ComparisonExpr{Left: $1, Operator: NotLikeStr, Right: $4, Escape: $5} + } +| value_expression REGEXP value_expression + { + $$ = &ComparisonExpr{Left: $1, Operator: RegexpStr, Right: $3} + } +| value_expression NOT REGEXP value_expression + { + $$ = &ComparisonExpr{Left: $1, Operator: NotRegexpStr, Right: $4} + } +| value_expression BETWEEN value_expression AND value_expression + { + $$ = &RangeCond{Left: $1, Operator: BetweenStr, From: $3, To: $5} + } +| value_expression NOT BETWEEN value_expression AND value_expression + { + $$ = &RangeCond{Left: $1, Operator: NotBetweenStr, From: $4, To: $6} + } +| EXISTS subquery + { + $$ = &ExistsExpr{Subquery: $2} + } + +is_suffix: + NULL + { + $$ = IsNullStr + } +| NOT NULL + { + $$ = IsNotNullStr + } +| TRUE + { + $$ = IsTrueStr + } +| NOT TRUE + { + $$ = IsNotTrueStr + } +| FALSE + { + $$ = IsFalseStr + } +| NOT FALSE + { + $$ = IsNotFalseStr + } + +compare: + '=' + { + $$ = EqualStr + } +| '<' + { + $$ = LessThanStr + } +| '>' + { + $$ = GreaterThanStr + } +| LE + { + $$ = LessEqualStr + } +| GE + { + $$ = GreaterEqualStr + } +| NE + { + $$ = NotEqualStr + } +| NULL_SAFE_EQUAL + { + $$ = NullSafeEqualStr + } + +like_escape_opt: + { + $$ = nil + } +| ESCAPE value_expression + { + $$ = $2 + } + +col_tuple: + row_tuple + { + $$ = $1 + } +| subquery + { + $$ = $1 + } +| LIST_ARG + { + $$ = ListArg($1) + } + +subquery: + openb select_statement closeb + { + $$ = &Subquery{$2} + } + +expression_list: + expression + { + $$ = Exprs{$1} + } +| expression_list ',' expression + { + $$ = append($1, $3) + } + +value_expression: + value + { + $$ = $1 + } +| boolean_value + { + $$ = $1 + } +| column_name + { + $$ = $1 + } +| tuple_expression + { + $$ = $1 + } +| subquery + { + $$ = $1 + } +| value_expression '&' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: BitAndStr, Right: $3} + } +| value_expression '|' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: BitOrStr, Right: $3} + } +| value_expression '^' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: BitXorStr, Right: $3} + } +| value_expression '+' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: PlusStr, Right: $3} + } +| value_expression '-' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: MinusStr, Right: $3} + } +| value_expression '*' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: MultStr, Right: $3} + } +| value_expression '/' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: DivStr, Right: $3} + } +| value_expression DIV value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: IntDivStr, Right: $3} + } +| value_expression '%' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: ModStr, Right: $3} + } +| value_expression MOD value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: ModStr, Right: $3} + } +| value_expression SHIFT_LEFT value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: ShiftLeftStr, Right: $3} + } +| value_expression SHIFT_RIGHT value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: ShiftRightStr, Right: $3} + } +| column_name JSON_EXTRACT_OP value + { + $$ = &BinaryExpr{Left: $1, Operator: JSONExtractOp, Right: $3} + } +| column_name JSON_UNQUOTE_EXTRACT_OP value + { + $$ = &BinaryExpr{Left: $1, Operator: JSONUnquoteExtractOp, Right: $3} + } +| value_expression COLLATE charset + { + $$ = &CollateExpr{Expr: $1, Charset: $3} + } +| BINARY value_expression %prec UNARY + { + $$ = &UnaryExpr{Operator: BinaryStr, Expr: $2} + } +| '+' value_expression %prec UNARY + { + if num, ok := $2.(*SQLVal); ok && num.Type == IntVal { + $$ = num + } else { + $$ = &UnaryExpr{Operator: UPlusStr, Expr: $2} + } + } +| '-' value_expression %prec UNARY + { + if num, ok := $2.(*SQLVal); ok && num.Type == IntVal { + // Handle double negative + if num.Val[0] == '-' { + num.Val = num.Val[1:] + $$ = num + } else { + $$ = NewIntVal(append([]byte("-"), num.Val...)) + } + } else { + $$ = &UnaryExpr{Operator: UMinusStr, Expr: $2} + } + } +| '~' value_expression + { + $$ = &UnaryExpr{Operator: TildaStr, Expr: $2} + } +| '!' value_expression %prec UNARY + { + $$ = &UnaryExpr{Operator: BangStr, Expr: $2} + } +| INTERVAL value_expression sql_id + { + // This rule prevents the usage of INTERVAL + // as a function. If support is needed for that, + // we'll need to revisit this. The solution + // will be non-trivial because of grammar conflicts. + $$ = &IntervalExpr{Expr: $2, Unit: $3} + } +| function_call_generic +| function_call_keyword +| function_call_nonkeyword +| function_call_conflict + +/* + Regular function calls without special token or syntax, guaranteed to not + introduce side effects due to being a simple identifier +*/ +function_call_generic: + sql_id openb select_expression_list_opt closeb + { + $$ = &FuncExpr{Name: $1, Exprs: $3} + } +| sql_id openb DISTINCT select_expression_list closeb + { + $$ = &FuncExpr{Name: $1, Distinct: true, Exprs: $4} + } +| table_id '.' reserved_sql_id openb select_expression_list_opt closeb + { + $$ = &FuncExpr{Qualifier: $1, Name: $3, Exprs: $5} + } + +/* + Function calls using reserved keywords, with dedicated grammar rules + as a result +*/ +function_call_keyword: + LEFT openb select_expression_list closeb + { + $$ = &FuncExpr{Name: NewColIdent("left"), Exprs: $3} + } +| RIGHT openb select_expression_list closeb + { + $$ = &FuncExpr{Name: NewColIdent("right"), Exprs: $3} + } +| CONVERT openb expression ',' convert_type closeb + { + $$ = &ConvertExpr{Expr: $3, Type: $5} + } +| CAST openb expression AS convert_type closeb + { + $$ = &ConvertExpr{Expr: $3, Type: $5} + } +| CONVERT openb expression USING charset closeb + { + $$ = &ConvertUsingExpr{Expr: $3, Type: $5} + } +| MATCH openb select_expression_list closeb AGAINST openb value_expression match_option closeb + { + $$ = &MatchExpr{Columns: $3, Expr: $7, Option: $8} + } +| GROUP_CONCAT openb distinct_opt select_expression_list order_by_opt separator_opt closeb + { + $$ = &GroupConcatExpr{Distinct: $3, Exprs: $4, OrderBy: $5, Separator: $6} + } +| CASE expression_opt when_expression_list else_expression_opt END + { + $$ = &CaseExpr{Expr: $2, Whens: $3, Else: $4} + } +| VALUES openb sql_id closeb + { + $$ = &ValuesFuncExpr{Name: $3} + } + +/* + Function calls using non reserved keywords but with special syntax forms. + Dedicated grammar rules are needed because of the special syntax +*/ +function_call_nonkeyword: + CURRENT_TIMESTAMP func_datetime_precision_opt + { + $$ = &FuncExpr{Name:NewColIdent("current_timestamp")} + } +| UTC_TIMESTAMP func_datetime_precision_opt + { + $$ = &FuncExpr{Name:NewColIdent("utc_timestamp")} + } +| UTC_TIME func_datetime_precision_opt + { + $$ = &FuncExpr{Name:NewColIdent("utc_time")} + } +| UTC_DATE func_datetime_precision_opt + { + $$ = &FuncExpr{Name:NewColIdent("utc_date")} + } + // now +| LOCALTIME func_datetime_precision_opt + { + $$ = &FuncExpr{Name:NewColIdent("localtime")} + } + // now +| LOCALTIMESTAMP func_datetime_precision_opt + { + $$ = &FuncExpr{Name:NewColIdent("localtimestamp")} + } + // curdate +| CURRENT_DATE func_datetime_precision_opt + { + $$ = &FuncExpr{Name:NewColIdent("current_date")} + } + // curtime +| CURRENT_TIME func_datetime_precision_opt + { + $$ = &FuncExpr{Name:NewColIdent("current_time")} + } + +func_datetime_precision_opt: + /* empty */ +| openb closeb + +/* + Function calls using non reserved keywords with *normal* syntax forms. Because + the names are non-reserved, they need a dedicated rule so as not to conflict +*/ +function_call_conflict: + IF openb select_expression_list closeb + { + $$ = &FuncExpr{Name: NewColIdent("if"), Exprs: $3} + } +| DATABASE openb select_expression_list_opt closeb + { + $$ = &FuncExpr{Name: NewColIdent("database"), Exprs: $3} + } +| MOD openb select_expression_list closeb + { + $$ = &FuncExpr{Name: NewColIdent("mod"), Exprs: $3} + } +| REPLACE openb select_expression_list closeb + { + $$ = &FuncExpr{Name: NewColIdent("replace"), Exprs: $3} + } + +match_option: +/*empty*/ + { + $$ = "" + } +| IN BOOLEAN MODE + { + $$ = BooleanModeStr + } +| IN NATURAL LANGUAGE MODE + { + $$ = NaturalLanguageModeStr + } +| IN NATURAL LANGUAGE MODE WITH QUERY EXPANSION + { + $$ = NaturalLanguageModeWithQueryExpansionStr + } +| WITH QUERY EXPANSION + { + $$ = QueryExpansionStr + } + +charset: + ID +{ + $$ = string($1) +} +| STRING +{ + $$ = string($1) +} + +convert_type: + BINARY length_opt + { + $$ = &ConvertType{Type: string($1), Length: $2} + } +| CHAR length_opt charset_opt + { + $$ = &ConvertType{Type: string($1), Length: $2, Charset: $3, Operator: CharacterSetStr} + } +| CHAR length_opt ID + { + $$ = &ConvertType{Type: string($1), Length: $2, Charset: string($3)} + } +| DATE + { + $$ = &ConvertType{Type: string($1)} + } +| DATETIME length_opt + { + $$ = &ConvertType{Type: string($1), Length: $2} + } +| DECIMAL decimal_length_opt + { + $$ = &ConvertType{Type: string($1)} + $$.Length = $2.Length + $$.Scale = $2.Scale + } +| JSON + { + $$ = &ConvertType{Type: string($1)} + } +| NCHAR length_opt + { + $$ = &ConvertType{Type: string($1), Length: $2} + } +| SIGNED + { + $$ = &ConvertType{Type: string($1)} + } +| SIGNED INTEGER + { + $$ = &ConvertType{Type: string($1)} + } +| TIME length_opt + { + $$ = &ConvertType{Type: string($1), Length: $2} + } +| UNSIGNED + { + $$ = &ConvertType{Type: string($1)} + } +| UNSIGNED INTEGER + { + $$ = &ConvertType{Type: string($1)} + } + +expression_opt: + { + $$ = nil + } +| expression + { + $$ = $1 + } + +separator_opt: + { + $$ = string("") + } +| SEPARATOR STRING + { + $$ = " separator '"+string($2)+"'" + } + +when_expression_list: + when_expression + { + $$ = []*When{$1} + } +| when_expression_list when_expression + { + $$ = append($1, $2) + } + +when_expression: + WHEN expression THEN expression + { + $$ = &When{Cond: $2, Val: $4} + } + +else_expression_opt: + { + $$ = nil + } +| ELSE expression + { + $$ = $2 + } + +column_name: + sql_id + { + $$ = &ColName{Name: $1} + } +| table_id '.' reserved_sql_id + { + $$ = &ColName{Qualifier: TableName{Name: $1}, Name: $3} + } +| table_id '.' reserved_table_id '.' reserved_sql_id + { + $$ = &ColName{Qualifier: TableName{Qualifier: $1, Name: $3}, Name: $5} + } + +value: + STRING + { + $$ = NewStrVal($1) + } +| HEX + { + $$ = NewHexVal($1) + } +| INTEGRAL + { + $$ = NewIntVal($1) + } +| FLOAT + { + $$ = NewFloatVal($1) + } +| HEXNUM + { + $$ = NewHexNum($1) + } +| VALUE_ARG + { + $$ = NewValArg($1) + } +| NULL + { + $$ = &NullVal{} + } + +num_val: + sql_id + { + // TODO(sougou): Deprecate this construct. + if $1.Lowered() != "value" { + yylex.Error("expecting value after next") + return 1 + } + $$ = NewIntVal([]byte("1")) + } +| INTEGRAL VALUES + { + $$ = NewIntVal($1) + } +| VALUE_ARG VALUES + { + $$ = NewValArg($1) + } + +group_by_opt: + { + $$ = nil + } +| GROUP BY expression_list + { + $$ = $3 + } + +having_opt: + { + $$ = nil + } +| HAVING expression + { + $$ = $2 + } + +order_by_opt: + { + $$ = nil + } +| ORDER BY order_list + { + $$ = $3 + } + +order_list: + order + { + $$ = OrderBy{$1} + } +| order_list ',' order + { + $$ = append($1, $3) + } + +order: + expression asc_desc_opt + { + $$ = &Order{Expr: $1, Direction: $2} + } + +asc_desc_opt: + { + $$ = AscScr + } +| ASC + { + $$ = AscScr + } +| DESC + { + $$ = DescScr + } + +limit_opt: + { + $$ = nil + } +| LIMIT expression + { + $$ = &Limit{Rowcount: $2} + } +| LIMIT expression ',' expression + { + $$ = &Limit{Offset: $2, Rowcount: $4} + } +| LIMIT expression OFFSET expression + { + $$ = &Limit{Offset: $4, Rowcount: $2} + } + +lock_opt: + { + $$ = "" + } +| FOR UPDATE + { + $$ = ForUpdateStr + } +| LOCK IN SHARE MODE + { + $$ = ShareModeStr + } + +// insert_data expands all combinations into a single rule. +// This avoids a shift/reduce conflict while encountering the +// following two possible constructs: +// insert into t1(a, b) (select * from t2) +// insert into t1(select * from t2) +// Because the rules are together, the parser can keep shifting +// the tokens until it disambiguates a as sql_id and select as keyword. +insert_data: + VALUES tuple_list + { + $$ = &Insert{Rows: $2} + } +| select_statement + { + $$ = &Insert{Rows: $1} + } +| openb select_statement closeb + { + // Drop the redundant parenthesis. + $$ = &Insert{Rows: $2} + } +| openb ins_column_list closeb VALUES tuple_list + { + $$ = &Insert{Columns: $2, Rows: $5} + } +| openb ins_column_list closeb select_statement + { + $$ = &Insert{Columns: $2, Rows: $4} + } +| openb ins_column_list closeb openb select_statement closeb + { + // Drop the redundant parenthesis. + $$ = &Insert{Columns: $2, Rows: $5} + } + +ins_column_list: + sql_id + { + $$ = Columns{$1} + } +| sql_id '.' sql_id + { + $$ = Columns{$3} + } +| ins_column_list ',' sql_id + { + $$ = append($$, $3) + } +| ins_column_list ',' sql_id '.' sql_id + { + $$ = append($$, $5) + } + +on_dup_opt: + { + $$ = nil + } +| ON DUPLICATE KEY UPDATE update_list + { + $$ = $5 + } + +tuple_list: + tuple_or_empty + { + $$ = Values{$1} + } +| tuple_list ',' tuple_or_empty + { + $$ = append($1, $3) + } + +tuple_or_empty: + row_tuple + { + $$ = $1 + } +| openb closeb + { + $$ = ValTuple{} + } + +row_tuple: + openb expression_list closeb + { + $$ = ValTuple($2) + } + +tuple_expression: + row_tuple + { + if len($1) == 1 { + $$ = &ParenExpr{$1[0]} + } else { + $$ = $1 + } + } + +update_list: + update_expression + { + $$ = UpdateExprs{$1} + } +| update_list ',' update_expression + { + $$ = append($1, $3) + } + +update_expression: + column_name '=' expression + { + $$ = &UpdateExpr{Name: $1, Expr: $3} + } + +for_from: + FOR +| FROM + +exists_opt: + { $$ = 0 } +| IF EXISTS + { $$ = 1 } + +not_exists_opt: + { $$ = 0 } +| IF NOT EXISTS + { $$ = 1 } + +ignore_opt: + { $$ = "" } +| IGNORE + { $$ = IgnoreStr } + +non_rename_operation: + ALTER + { $$ = struct{}{} } +| AUTO_INCREMENT + { $$ = struct{}{} } +| CHARACTER + { $$ = struct{}{} } +| COMMENT_KEYWORD + { $$ = struct{}{} } +| DEFAULT + { $$ = struct{}{} } +| DROP + { $$ = struct{}{} } +| ORDER + { $$ = struct{}{} } +| CONVERT + { $$ = struct{}{} } +| UNUSED + { $$ = struct{}{} } +| ID + { $$ = struct{}{} } + +to_opt: + { $$ = struct{}{} } +| TO + { $$ = struct{}{} } +| AS + { $$ = struct{}{} } + +index_opt: + INDEX + { $$ = struct{}{} } +| KEY + { $$ = struct{}{} } + +sql_id: + ID + { + $$ = NewColIdent(string($1)) + } +| non_reserved_keyword + { + $$ = NewColIdent(string($1)) + } + +reserved_sql_id: + sql_id +| reserved_keyword + { + $$ = NewColIdent(string($1)) + } + +table_id: + ID + { + $$ = NewTableIdent(string($1)) + } +| non_reserved_keyword + { + $$ = NewTableIdent(string($1)) + } + +reserved_table_id: + table_id +| reserved_keyword + { + $$ = NewTableIdent(string($1)) + } + +/* + These are not all necessarily reserved in MySQL, but some are. + + These are more importantly reserved because they may conflict with our grammar. + If you want to move one that is not reserved in MySQL (i.e. ESCAPE) to the + non_reserved_keywords, you'll need to deal with any conflicts. + + Sorted alphabetically +*/ +reserved_keyword: + AND +| AS +| ASC +| AUTO_INCREMENT +| BETWEEN +| BINARY +| BY +| CASE +| CHARACTER +| CHARSET +| COLLATE +| CONVERT +| CREATE +| CROSS +| CURRENT_DATE +| CURRENT_TIME +| CURRENT_TIMESTAMP +| DATABASE +| DATABASES +| DEFAULT +| DELETE +| DESC +| DESCRIBE +| DISTINCT +| DIV +| DROP +| ELSE +| END +| ENGINES +| ESCAPE +| EXISTS +| EXPLAIN +| FALSE +| FOR +| FORCE +| FROM +| GROUP +| HAVING +| IF +| IGNORE +| IN +| INDEX +| INNER +| INSERT +| INTERVAL +| INTO +| IS +| JOIN +| KEY +| LEFT +| LIKE +| LIMIT +| LOCALTIME +| LOCALTIMESTAMP +| LOCK +| MATCH +| MOD +| NATURAL +| NEXT // next should be doable as non-reserved, but is not due to the special `select next num_val` query that vitess supports +| NOT +| NULL +| ON +| OR +| ORDER +| OUTER +| QUERYZ +| PROCESSLIST +| REGEXP +| RENAME +| REPLACE +| RIGHT +| SELECT +| SEPARATOR +| SET +| SHOW +| STATUS +| STRAIGHT_JOIN +| TABLE +| TABLES +| THEN +| TO +| TRUE +| TXNZ +| UNION +| UNIQUE +| UPDATE +| USE +| USING +| UTC_DATE +| UTC_TIME +| UTC_TIMESTAMP +| VALUES +| VERSIONS +| WHEN +| WHERE + +/* + These are non-reserved Vitess, because they don't cause conflicts in the grammar. + Some of them may be reserved in MySQL. The good news is we backtick quote them + when we rewrite the query, so no issue should arise. + + Sorted alphabetically +*/ +non_reserved_keyword: + AGAINST +| BIGINT +| BIT +| BLOB +| BOOL +| CHAR +| COMMENT_KEYWORD +| DATE +| DATETIME +| DECIMAL +| DOUBLE +| DUPLICATE +| ENUM +| ENGINE +| EXPANSION +| FLOAT_TYPE +| INT +| INTEGER +| JSON +| LANGUAGE +| LAST_INSERT_ID +| LONGBLOB +| LONGTEXT +| MEDIUMBLOB +| MEDIUMINT +| MEDIUMTEXT +| MODE +| NCHAR +| NUMERIC +| OFFSET +| OPTIMIZE +| PRIMARY +| QUERY +| REAL +| REPAIR +| SHARE +| SIGNED +| SMALLINT +| TEXT +| TIME +| TIMESTAMP +| TINYBLOB +| TINYINT +| TINYTEXT +| TRUNCATE +| UNSIGNED +| UNUSED +| VARBINARY +| VARCHAR +| VIEW +| VITESS_KEYSPACES +| VITESS_SHARDS +| VSCHEMA_TABLES +| WITH +| YEAR +| ZEROFILL + +openb: + '(' + { + if incNesting(yylex) { + yylex.Error("max nesting level reached") + return 1 + } + } + +closeb: + ')' + { + decNesting(yylex) + } + +force_eof: +{ + forceEOF(yylex) +} + +ddl_force_eof: + { + forceEOF(yylex) + } +| openb + { + forceEOF(yylex) + } +| reserved_sql_id + { + forceEOF(yylex) + } diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/token.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/token.go new file mode 100644 index 00000000..888dc7cb --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/token.go @@ -0,0 +1,766 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "bytes" + "fmt" + "strings" + + "github.com/xelabs/go-mysqlstack/sqlparser/depends/bytes2" + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +const eofChar = 0x100 + +// Tokenizer is the struct used to generate SQL +// tokens for the parser. +type Tokenizer struct { + InStream *strings.Reader + AllowComments bool + ForceEOF bool + lastChar uint16 + Position int + lastToken []byte + LastError string + posVarIndex int + ParseTree Statement + partialDDL *DDL + nesting int +} + +// NewStringTokenizer creates a new Tokenizer for the +// sql string. +func NewStringTokenizer(sql string) *Tokenizer { + return &Tokenizer{InStream: strings.NewReader(sql)} +} + +// keywords is a map of mysql keywords that fall into two categories: +// 1) keywords considered reserved by MySQL +// 2) keywords for us to handle specially in sql.y +// +// Those marked as UNUSED are likely reserved keywords. We add them here so that +// when rewriting queries we can properly backtick quote them so they don't cause issues +// +// NOTE: If you add new keywords, add them also to the reserved_keywords or +// non_reserved_keywords grammar in sql.y -- this will allow the keyword to be used +// in identifiers. See the docs for each grammar to determine which one to put it into. +var keywords = map[string]int{ + "accessible": UNUSED, + "add": ADD, + "against": AGAINST, + "all": ALL, + "alter": ALTER, + "analyze": ANALYZE, + "and": AND, + "as": AS, + "asc": ASC, + "asensitive": UNUSED, + "auto_increment": AUTO_INCREMENT, + "before": UNUSED, + "between": BETWEEN, + "bigint": BIGINT, + "binary": BINARY, + "binlog": BINLOG, + "bit": BIT, + "blob": BLOB, + "bool": BOOL, + "boolean": BOOLEAN, + "both": UNUSED, + "by": BY, + "call": UNUSED, + "cascade": UNUSED, + "case": CASE, + "cast": CAST, + "change": UNUSED, + "char": CHAR, + "character": CHARACTER, + "charset": CHARSET, + "check": UNUSED, + "collate": COLLATE, + "column": COLUMN, + "comment": COMMENT_KEYWORD, + "commit": COMMIT, + "condition": UNUSED, + "constraint": UNUSED, + "continue": UNUSED, + "convert": CONVERT, + "create": CREATE, + "cross": CROSS, + "current_date": CURRENT_DATE, + "current_time": CURRENT_TIME, + "current_timestamp": CURRENT_TIMESTAMP, + "current_user": UNUSED, + "cursor": UNUSED, + "database": DATABASE, + "databases": DATABASES, + "day_hour": UNUSED, + "day_microsecond": UNUSED, + "day_minute": UNUSED, + "day_second": UNUSED, + "date": DATE, + "datetime": DATETIME, + "dec": UNUSED, + "decimal": DECIMAL, + "declare": UNUSED, + "default": DEFAULT, + "delayed": UNUSED, + "delete": DELETE, + "desc": DESC, + "describe": DESCRIBE, + "deterministic": UNUSED, + "distinct": DISTINCT, + "distinctrow": UNUSED, + "div": DIV, + "double": DOUBLE, + "drop": DROP, + "duplicate": DUPLICATE, + "each": UNUSED, + "else": ELSE, + "elseif": UNUSED, + "enclosed": UNUSED, + "end": END, + "engine": ENGINE, + "engines": ENGINES, + "enum": ENUM, + "escape": ESCAPE, + "escaped": UNUSED, + "events": EVENTS, + "exists": EXISTS, + "exit": UNUSED, + "explain": EXPLAIN, + "expansion": EXPANSION, + "false": FALSE, + "fetch": UNUSED, + "float": FLOAT_TYPE, + "float4": UNUSED, + "float8": UNUSED, + "for": FOR, + "force": FORCE, + "foreign": UNUSED, + "from": FROM, + "fulltext": UNUSED, + "generated": UNUSED, + "get": UNUSED, + "grant": UNUSED, + "group": GROUP, + "group_concat": GROUP_CONCAT, + "gtid": GTID, + "hash": HASH, + "having": HAVING, + "high_priority": UNUSED, + "hour_microsecond": UNUSED, + "hour_minute": UNUSED, + "hour_second": UNUSED, + "if": IF, + "ignore": IGNORE, + "in": IN, + "index": INDEX, + "infile": UNUSED, + "inout": UNUSED, + "inner": INNER, + "insensitive": UNUSED, + "insert": INSERT, + "int": INT, + "int1": UNUSED, + "int2": UNUSED, + "int3": UNUSED, + "int4": UNUSED, + "int8": UNUSED, + "integer": INTEGER, + "interval": INTERVAL, + "into": INTO, + "io_after_gtids": UNUSED, + "is": IS, + "iterate": UNUSED, + "join": JOIN, + "json": JSON, + "key": KEY, + "keys": UNUSED, + "kill": KILL, + "language": LANGUAGE, + "last_insert_id": LAST_INSERT_ID, + "leading": UNUSED, + "leave": UNUSED, + "left": LEFT, + "like": LIKE, + "limit": LIMIT, + "linear": UNUSED, + "lines": UNUSED, + "load": UNUSED, + "localtime": LOCALTIME, + "localtimestamp": LOCALTIMESTAMP, + "lock": LOCK, + "long": UNUSED, + "longblob": LONGBLOB, + "longtext": LONGTEXT, + "loop": UNUSED, + "low_priority": UNUSED, + "master_bind": UNUSED, + "match": MATCH, + "maxvalue": UNUSED, + "mediumblob": MEDIUMBLOB, + "mediumint": MEDIUMINT, + "mediumtext": MEDIUMTEXT, + "middleint": UNUSED, + "minute_microsecond": UNUSED, + "minute_second": UNUSED, + "mod": MOD, + "mode": MODE, + "modify": MODIFY, + "modifies": UNUSED, + "natural": NATURAL, + "nchar": NCHAR, + "next": NEXT, + "not": NOT, + "no_write_to_binlog": UNUSED, + "null": NULL, + "numeric": NUMERIC, + "offset": OFFSET, + "on": ON, + "optimize": OPTIMIZE, + "optimizer_costs": UNUSED, + "option": UNUSED, + "optionally": UNUSED, + "or": OR, + "order": ORDER, + "out": UNUSED, + "outer": OUTER, + "outfile": UNUSED, + "partition": PARTITION, + "precision": UNUSED, + "primary": PRIMARY, + "procedure": UNUSED, + "processlist": PROCESSLIST, + "query": QUERY, + "queryz": QUERYZ, + "range": UNUSED, + "read": UNUSED, + "reads": UNUSED, + "read_write": UNUSED, + "real": REAL, + "references": UNUSED, + "regexp": REGEXP, + "release": UNUSED, + "rename": RENAME, + "repair": REPAIR, + "repeat": UNUSED, + "replace": REPLACE, + "require": UNUSED, + "resignal": UNUSED, + "restrict": UNUSED, + "return": UNUSED, + "revoke": UNUSED, + "right": RIGHT, + "rlike": REGEXP, + "schema": UNUSED, + "schemas": UNUSED, + "second_microsecond": UNUSED, + "select": SELECT, + "sensitive": UNUSED, + "separator": SEPARATOR, + "set": SET, + "session": SESSION, + "share": SHARE, + "show": SHOW, + "signal": UNUSED, + "signed": SIGNED, + "smallint": SMALLINT, + "spatial": UNUSED, + "specific": UNUSED, + "sql": UNUSED, + "sqlexception": UNUSED, + "sqlstate": UNUSED, + "sqlwarning": UNUSED, + "sql_big_result": UNUSED, + "sql_cache": SQL_CACHE, + "sql_calc_found_rows": UNUSED, + "sql_no_cache": SQL_NO_CACHE, + "sql_small_result": UNUSED, + "ssl": UNUSED, + "status": STATUS, + "start": START, + "starting": UNUSED, + "stored": UNUSED, + "straight_join": STRAIGHT_JOIN, + "table": TABLE, + "tables": TABLES, + "terminated": UNUSED, + "text": TEXT, + "then": THEN, + "time": TIME, + "timestamp": TIMESTAMP, + "tinyblob": TINYBLOB, + "tinyint": TINYINT, + "tinytext": TINYTEXT, + "to": TO, + "trailing": UNUSED, + "trigger": UNUSED, + "true": TRUE, + "truncate": TRUNCATE, + "transaction": TRANSACTION, + "txnz": TXNZ, + "undo": UNUSED, + "union": UNION, + "unique": UNIQUE, + "unlock": UNUSED, + "unsigned": UNSIGNED, + "update": UPDATE, + "usage": UNUSED, + "use": USE, + "using": USING, + "utc_date": UTC_DATE, + "utc_time": UTC_TIME, + "utc_timestamp": UTC_TIMESTAMP, + "values": VALUES, + "varbinary": VARBINARY, + "varchar": VARCHAR, + "varcharacter": UNUSED, + "variables": VARIABLES, + "varying": UNUSED, + "versions": VERSIONS, + "virtual": UNUSED, + "view": VIEW, + "vitess_keyspaces": VITESS_KEYSPACES, + "vitess_shards": VITESS_SHARDS, + "vschema_tables": VSCHEMA_TABLES, + "warnings": WARNINGS, + "when": WHEN, + "where": WHERE, + "while": UNUSED, + "with": WITH, + "write": UNUSED, + "xa": XA, + "xor": UNUSED, + "year": YEAR, + "year_month": UNUSED, + "zerofill": ZEROFILL, +} + +// keywordStrings contains the reverse mapping of token to keyword strings +var keywordStrings = map[int]string{} + +func init() { + for str, id := range keywords { + if id == UNUSED { + continue + } + keywordStrings[id] = str + } +} + +// Lex returns the next token form the Tokenizer. +// This function is used by go yacc. +func (tkn *Tokenizer) Lex(lval *yySymType) int { + typ, val := tkn.Scan() + for typ == COMMENT { + if tkn.AllowComments { + break + } + typ, val = tkn.Scan() + } + lval.bytes = val + tkn.lastToken = val + return typ +} + +// Error is called by go yacc if there's a parsing error. +func (tkn *Tokenizer) Error(err string) { + buf := &bytes2.Buffer{} + if tkn.lastToken != nil { + fmt.Fprintf(buf, "%s at position %v near '%s'", err, tkn.Position, tkn.lastToken) + } else { + fmt.Fprintf(buf, "%s at position %v", err, tkn.Position) + } + tkn.LastError = buf.String() +} + +// Scan scans the tokenizer for the next token and returns +// the token type and an optional value. +func (tkn *Tokenizer) Scan() (int, []byte) { + if tkn.ForceEOF { + return 0, nil + } + + if tkn.lastChar == 0 { + tkn.next() + } + tkn.skipBlank() + switch ch := tkn.lastChar; { + case isLetter(ch): + tkn.next() + if ch == 'X' || ch == 'x' { + if tkn.lastChar == '\'' { + tkn.next() + return tkn.scanHex() + } + } + return tkn.scanIdentifier(byte(ch)) + case isDigit(ch): + return tkn.scanNumber(false) + case ch == ':': + return tkn.scanBindVar() + default: + tkn.next() + switch ch { + case eofChar: + return 0, nil + case '=', ',', ';', '(', ')', '+', '*', '%', '^', '~': + return int(ch), nil + case '&': + if tkn.lastChar == '&' { + tkn.next() + return AND, nil + } + return int(ch), nil + case '|': + if tkn.lastChar == '|' { + tkn.next() + return OR, nil + } + return int(ch), nil + case '?': + tkn.posVarIndex++ + buf := new(bytes2.Buffer) + fmt.Fprintf(buf, ":v%d", tkn.posVarIndex) + return VALUE_ARG, buf.Bytes() + case '.': + if isDigit(tkn.lastChar) { + return tkn.scanNumber(true) + } + return int(ch), nil + case '/': + switch tkn.lastChar { + case '/': + tkn.next() + return tkn.scanCommentType1("//") + case '*': + tkn.next() + return tkn.scanCommentType2() + default: + return int(ch), nil + } + case '#': + tkn.next() + return tkn.scanCommentType1("#") + case '-': + switch tkn.lastChar { + case '-': + tkn.next() + return tkn.scanCommentType1("--") + case '>': + tkn.next() + if tkn.lastChar == '>' { + tkn.next() + return JSON_UNQUOTE_EXTRACT_OP, nil + } + return JSON_EXTRACT_OP, nil + } + return int(ch), nil + case '<': + switch tkn.lastChar { + case '>': + tkn.next() + return NE, nil + case '<': + tkn.next() + return SHIFT_LEFT, nil + case '=': + tkn.next() + switch tkn.lastChar { + case '>': + tkn.next() + return NULL_SAFE_EQUAL, nil + default: + return LE, nil + } + default: + return int(ch), nil + } + case '>': + switch tkn.lastChar { + case '=': + tkn.next() + return GE, nil + case '>': + tkn.next() + return SHIFT_RIGHT, nil + default: + return int(ch), nil + } + case '!': + if tkn.lastChar == '=' { + tkn.next() + return NE, nil + } + return int(ch), nil + case '\'', '"': + return tkn.scanString(ch, STRING) + case '`': + return tkn.scanLiteralIdentifier() + default: + return LEX_ERROR, []byte{byte(ch)} + } + } +} + +func (tkn *Tokenizer) skipBlank() { + ch := tkn.lastChar + for ch == ' ' || ch == '\n' || ch == '\r' || ch == '\t' { + tkn.next() + ch = tkn.lastChar + } +} + +func (tkn *Tokenizer) scanIdentifier(firstByte byte) (int, []byte) { + buffer := &bytes2.Buffer{} + buffer.WriteByte(firstByte) + for isLetter(tkn.lastChar) || isDigit(tkn.lastChar) { + buffer.WriteByte(byte(tkn.lastChar)) + tkn.next() + } + lowered := bytes.ToLower(buffer.Bytes()) + loweredStr := string(lowered) + if keywordID, found := keywords[loweredStr]; found { + return keywordID, lowered + } + // dual must always be case-insensitive + if loweredStr == "dual" { + return ID, lowered + } + return ID, buffer.Bytes() +} + +func (tkn *Tokenizer) scanHex() (int, []byte) { + buffer := &bytes2.Buffer{} + tkn.scanMantissa(16, buffer) + if tkn.lastChar != '\'' { + return LEX_ERROR, buffer.Bytes() + } + tkn.next() + if buffer.Len()%2 != 0 { + return LEX_ERROR, buffer.Bytes() + } + return HEX, buffer.Bytes() +} + +func (tkn *Tokenizer) scanLiteralIdentifier() (int, []byte) { + buffer := &bytes2.Buffer{} + backTickSeen := false + for { + if backTickSeen { + if tkn.lastChar != '`' { + break + } + backTickSeen = false + buffer.WriteByte('`') + tkn.next() + continue + } + // The previous char was not a backtick. + switch tkn.lastChar { + case '`': + backTickSeen = true + case eofChar: + // Premature EOF. + return LEX_ERROR, buffer.Bytes() + default: + buffer.WriteByte(byte(tkn.lastChar)) + } + tkn.next() + } + if buffer.Len() == 0 { + return LEX_ERROR, buffer.Bytes() + } + return ID, buffer.Bytes() +} + +func (tkn *Tokenizer) scanBindVar() (int, []byte) { + buffer := &bytes2.Buffer{} + buffer.WriteByte(byte(tkn.lastChar)) + token := VALUE_ARG + tkn.next() + if tkn.lastChar == ':' { + token = LIST_ARG + buffer.WriteByte(byte(tkn.lastChar)) + tkn.next() + } + if !isLetter(tkn.lastChar) { + return LEX_ERROR, buffer.Bytes() + } + for isLetter(tkn.lastChar) || isDigit(tkn.lastChar) || tkn.lastChar == '.' { + buffer.WriteByte(byte(tkn.lastChar)) + tkn.next() + } + return token, buffer.Bytes() +} + +func (tkn *Tokenizer) scanMantissa(base int, buffer *bytes2.Buffer) { + for digitVal(tkn.lastChar) < base { + tkn.consumeNext(buffer) + } +} + +func (tkn *Tokenizer) scanNumber(seenDecimalPoint bool) (int, []byte) { + token := INTEGRAL + buffer := &bytes2.Buffer{} + if seenDecimalPoint { + token = FLOAT + buffer.WriteByte('.') + tkn.scanMantissa(10, buffer) + goto exponent + } + + // 0x construct. + if tkn.lastChar == '0' { + tkn.consumeNext(buffer) + if tkn.lastChar == 'x' || tkn.lastChar == 'X' { + token = HEXNUM + tkn.consumeNext(buffer) + tkn.scanMantissa(16, buffer) + goto exit + } + } + + tkn.scanMantissa(10, buffer) + + if tkn.lastChar == '.' { + token = FLOAT + tkn.consumeNext(buffer) + tkn.scanMantissa(10, buffer) + } + +exponent: + if tkn.lastChar == 'e' || tkn.lastChar == 'E' { + token = FLOAT + tkn.consumeNext(buffer) + if tkn.lastChar == '+' || tkn.lastChar == '-' { + tkn.consumeNext(buffer) + } + tkn.scanMantissa(10, buffer) + } + +exit: + // A letter cannot immediately follow a number. + if isLetter(tkn.lastChar) { + return LEX_ERROR, buffer.Bytes() + } + + return token, buffer.Bytes() +} + +func (tkn *Tokenizer) scanString(delim uint16, typ int) (int, []byte) { + buffer := &bytes2.Buffer{} + for { + ch := tkn.lastChar + tkn.next() + if ch == delim { + if tkn.lastChar == delim { + tkn.next() + } else { + break + } + } else if ch == '\\' { + if tkn.lastChar == eofChar { + return LEX_ERROR, buffer.Bytes() + } + if decodedChar := sqltypes.SQLDecodeMap[byte(tkn.lastChar)]; decodedChar == sqltypes.DontEscape { + ch = tkn.lastChar + } else { + ch = uint16(decodedChar) + } + tkn.next() + } + if ch == eofChar { + return LEX_ERROR, buffer.Bytes() + } + buffer.WriteByte(byte(ch)) + } + return typ, buffer.Bytes() +} + +func (tkn *Tokenizer) scanCommentType1(prefix string) (int, []byte) { + buffer := &bytes2.Buffer{} + buffer.WriteString(prefix) + for tkn.lastChar != eofChar { + if tkn.lastChar == '\n' { + tkn.consumeNext(buffer) + break + } + tkn.consumeNext(buffer) + } + return COMMENT, buffer.Bytes() +} + +func (tkn *Tokenizer) scanCommentType2() (int, []byte) { + buffer := &bytes2.Buffer{} + buffer.WriteString("/*") + for { + if tkn.lastChar == '*' { + tkn.consumeNext(buffer) + if tkn.lastChar == '/' { + tkn.consumeNext(buffer) + break + } + continue + } + if tkn.lastChar == eofChar { + return LEX_ERROR, buffer.Bytes() + } + tkn.consumeNext(buffer) + } + return COMMENT, buffer.Bytes() +} + +func (tkn *Tokenizer) consumeNext(buffer *bytes2.Buffer) { + if tkn.lastChar == eofChar { + // This should never happen. + panic("unexpected EOF") + } + buffer.WriteByte(byte(tkn.lastChar)) + tkn.next() +} + +func (tkn *Tokenizer) next() { + if ch, err := tkn.InStream.ReadByte(); err != nil { + // Only EOF is possible. + tkn.lastChar = eofChar + } else { + tkn.lastChar = uint16(ch) + } + tkn.Position++ +} + +func isLetter(ch uint16) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch == '@' +} + +func digitVal(ch uint16) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch) - '0' + case 'a' <= ch && ch <= 'f': + return int(ch) - 'a' + 10 + case 'A' <= ch && ch <= 'F': + return int(ch) - 'A' + 10 + } + return 16 // larger than any legal digit val +} + +func isDigit(ch uint16) bool { + return '0' <= ch && ch <= '9' +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/token_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/token_test.go new file mode 100644 index 00000000..9a55ab4b --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/token_test.go @@ -0,0 +1,63 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import "testing" + +func TestLiteralID(t *testing.T) { + testcases := []struct { + in string + id int + out string + }{{ + in: "`aa`", + id: ID, + out: "aa", + }, { + in: "```a```", + id: ID, + out: "`a`", + }, { + in: "`a``b`", + id: ID, + out: "a`b", + }, { + in: "`a``b`c", + id: ID, + out: "a`b", + }, { + in: "`a``b", + id: LEX_ERROR, + out: "a`b", + }, { + in: "`a``b``", + id: LEX_ERROR, + out: "a`b`", + }, { + in: "``", + id: LEX_ERROR, + out: "", + }} + + for _, tcase := range testcases { + tkn := NewStringTokenizer(tcase.in) + id, out := tkn.Scan() + if tcase.id != id || string(out) != tcase.out { + t.Errorf("Scan(%s): %d, %s, want %d, %s", tcase.in, id, out, tcase.id, tcase.out) + } + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/tracked_buffer.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/tracked_buffer.go new file mode 100644 index 00000000..a35efe6f --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/tracked_buffer.go @@ -0,0 +1,115 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "bytes" + "fmt" +) + +type bindLocation struct { + offset, length int +} + +// TrackedBuffer is used to rebuild a query from the ast. +// bindLocations keeps track of locations in the buffer that +// use bind variables for efficient future substitutions. +// nodeFormatter is the formatting function the buffer will +// use to format a node. By default(nil), it's FormatNode. +// But you can supply a different formatting function if you +// want to generate a query that's different from the default. +type TrackedBuffer struct { + *bytes.Buffer + bindLocations []bindLocation + nodeFormatter func(buf *TrackedBuffer, node SQLNode) +} + +// NewTrackedBuffer creates a new TrackedBuffer. +func NewTrackedBuffer(nodeFormatter func(buf *TrackedBuffer, node SQLNode)) *TrackedBuffer { + return &TrackedBuffer{ + Buffer: new(bytes.Buffer), + nodeFormatter: nodeFormatter, + } +} + +// Myprintf mimics fmt.Fprintf(buf, ...), but limited to Node(%v), +// Node.Value(%s) and string(%s). It also allows a %a for a value argument, in +// which case it adds tracking info for future substitutions. +// +// The name must be something other than the usual Printf() to avoid "go vet" +// warnings due to our custom format specifiers. +func (buf *TrackedBuffer) Myprintf(format string, values ...interface{}) { + end := len(format) + fieldnum := 0 + for i := 0; i < end; { + lasti := i + for i < end && format[i] != '%' { + i++ + } + if i > lasti { + buf.WriteString(format[lasti:i]) + } + if i >= end { + break + } + i++ // '%' + switch format[i] { + case 'c': + switch v := values[fieldnum].(type) { + case byte: + buf.WriteByte(v) + case rune: + buf.WriteRune(v) + default: + panic(fmt.Sprintf("unexpected TrackedBuffer type %T", v)) + } + case 's': + switch v := values[fieldnum].(type) { + case []byte: + buf.Write(v) + case string: + buf.WriteString(v) + default: + panic(fmt.Sprintf("unexpected TrackedBuffer type %T", v)) + } + case 'v': + node := values[fieldnum].(SQLNode) + if buf.nodeFormatter == nil { + node.Format(buf) + } else { + buf.nodeFormatter(buf, node) + } + case 'a': + buf.WriteArg(values[fieldnum].(string)) + default: + panic("unexpected") + } + fieldnum++ + i++ + } +} + +// WriteArg writes a value argument into the buffer along with +// tracking information for future substitutions. arg must contain +// the ":" or "::" prefix. +func (buf *TrackedBuffer) WriteArg(arg string) { + buf.bindLocations = append(buf.bindLocations, bindLocation{ + offset: buf.Len(), + length: len(arg), + }) + buf.WriteString(arg) +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/tracked_buffer_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/tracked_buffer_test.go new file mode 100644 index 00000000..ae2cc0e1 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/tracked_buffer_test.go @@ -0,0 +1,24 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import "testing" + +func TestTrackedBuffer(t *testing.T) { + buf := NewTrackedBuffer(nil) + buf.Myprintf("%c,%s,%a", 'a', "a", "a") +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/txn.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/txn.go new file mode 100644 index 00000000..111ab19a --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/txn.go @@ -0,0 +1,37 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlparser + +import () + +const ( + // StartTxnStr represents the txn start. + StartTxnStr = "start transaction" + + // CommitTxnStr represents the txn commit. + CommitTxnStr = "commit" +) + +// Transaction represents the transaction tuple. +type Transaction struct { + Action string +} + +func (*Transaction) iStatement() {} + +// Format formats the node. +func (node *Transaction) Format(buf *TrackedBuffer) { + switch node.Action { + case StartTxnStr: + buf.WriteString(StartTxnStr) + case CommitTxnStr: + buf.WriteString(CommitTxnStr) + } +} + +// WalkSubtree walks the nodes of the subtree. +func (node *Transaction) WalkSubtree(visit Visit) error { + return nil +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/txn_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/txn_test.go new file mode 100644 index 00000000..3871a1fc --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/txn_test.go @@ -0,0 +1,56 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import "strings" +import "testing" + +func TestTxn(t *testing.T) { + validSQL := []struct { + input string + output string + }{ + { + input: "start transaction", + output: "start transaction", + }, + + { + input: "commit", + output: "commit", + }, + } + + for _, exp := range validSQL { + sql := strings.TrimSpace(exp.input) + tree, err := Parse(sql) + if err != nil { + t.Errorf("input: %s, err: %v", sql, err) + continue + } + + // Walk. + Walk(func(node SQLNode) (bool, error) { + return true, nil + }, tree) + + got := String(tree.(*Transaction)) + if exp.output != got { + t.Errorf("want:\n%s\ngot:\n%s", exp.output, got) + } + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/xa.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/xa.go new file mode 100644 index 00000000..f8bad597 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/xa.go @@ -0,0 +1,23 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlparser + +import () + +func (*Xa) iStatement() {} + +// Xa represents a XA statement. +type Xa struct { +} + +// Format formats the node. +func (node *Xa) Format(buf *TrackedBuffer) { + buf.WriteString("XA") +} + +// WalkSubtree walks the nodes of the subtree. +func (node *Xa) WalkSubtree(visit Visit) error { + return nil +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/xa_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/xa_test.go new file mode 100644 index 00000000..b656d54c --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/sqlparser/xa_test.go @@ -0,0 +1,76 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "strings" + "testing" +) + +func TestXA(t *testing.T) { + validSQL := []struct { + input string + output string + }{ + { + input: "xa begin 'x1'", + output: "XA", + }, + + { + input: "xa prepare 'x1'", + output: "XA", + }, + + { + input: "xa end 'x1'", + output: "XA", + }, + + { + input: "xa commit 'x1'", + output: "XA", + }, + + { + input: "xa rollback 'x1'", + output: "XA", + }, + } + + for _, exp := range validSQL { + sql := strings.TrimSpace(exp.input) + tree, err := Parse(sql) + if err != nil { + t.Errorf("input: %s, err: %v", sql, err) + continue + } + + // Walk. + Walk(func(node SQLNode) (bool, error) { + return true, nil + }, tree) + + node := tree.(*Xa) + + // Format. + got := String(node) + if exp.output != got { + t.Errorf("want:\n%s\ngot:\n%s", exp.output, got) + } + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/xlog/options.go b/src/vendor/github.com/xelabs/go-mysqlstack/xlog/options.go new file mode 100644 index 00000000..0a5def3d --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/xlog/options.go @@ -0,0 +1,54 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package xlog + +var ( + defaultName = " " + defaultLevel = DEBUG +) + +// Options used for the options of the xlog. +type Options struct { + Name string + Level LogLevel +} + +// Option func. +type Option func(*Options) + +func newOptions(opts ...Option) *Options { + opt := &Options{} + for _, o := range opts { + o(opt) + } + + if len(opt.Name) == 0 { + opt.Name = defaultName + } + + if opt.Level == 0 { + opt.Level = defaultLevel + } + return opt +} + +// Name used to set the name. +func Name(v string) Option { + return func(o *Options) { + o.Name = v + } +} + +// Level used to set the log level. +func Level(v LogLevel) Option { + return func(o *Options) { + o.Level = v + } +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/xlog/xlog.go b/src/vendor/github.com/xelabs/go-mysqlstack/xlog/xlog.go new file mode 100644 index 00000000..b864a468 --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/xlog/xlog.go @@ -0,0 +1,174 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package xlog + +import ( + "fmt" + "io" + "log" + "log/syslog" + "os" + "strings" +) + +var ( + defaultlog *Log +) + +// LogLevel used for log level. +type LogLevel int + +const ( + // DEBUG enum. + DEBUG LogLevel = 1 << iota + // INFO enum. + INFO + // WARNING enum. + WARNING + // ERROR enum. + ERROR + // FATAL enum. + FATAL + // PANIC enum. + PANIC +) + +// LevelNames represents the string name of all levels. +var LevelNames = [...]string{ + DEBUG: "DEBUG", + INFO: "INFO", + WARNING: "WARNING", + ERROR: "ERROR", + FATAL: "FATAL", + PANIC: "PANIC", +} + +const ( + // D_LOG_FLAGS is the default log flags. + D_LOG_FLAGS int = log.LstdFlags | log.Lmicroseconds | log.Lshortfile +) + +// Log struct. +type Log struct { + opts *Options + *log.Logger +} + +// NewSysLog creates a new sys log. +func NewSysLog(opts ...Option) *Log { + w, err := syslog.New(syslog.LOG_DEBUG, "") + if err != nil { + panic(err) + } + return NewXLog(w, opts...) +} + +// NewStdLog creates a new std log. +func NewStdLog(opts ...Option) *Log { + return NewXLog(os.Stdout, opts...) +} + +// NewXLog creates a new xlog. +func NewXLog(w io.Writer, opts ...Option) *Log { + options := newOptions(opts...) + + l := &Log{ + opts: options, + } + l.Logger = log.New(w, l.opts.Name, D_LOG_FLAGS) + defaultlog = l + return l +} + +// NewLog creates the new log. +func NewLog(w io.Writer, prefix string, flag int) *Log { + l := &Log{} + l.Logger = log.New(w, prefix, flag) + return l +} + +// GetLog returns Log. +func GetLog() *Log { + if defaultlog == nil { + log := NewStdLog(Level(INFO)) + defaultlog = log + } + return defaultlog +} + +// SetLevel used to set the log level. +func (t *Log) SetLevel(level string) { + for i, v := range LevelNames { + if level == v { + t.opts.Level = LogLevel(i) + return + } + } +} + +// Debug used to log debug msg. +func (t *Log) Debug(format string, v ...interface{}) { + if DEBUG < t.opts.Level { + return + } + t.log("\t [DEBUG] \t%s", fmt.Sprintf(format, v...)) +} + +// Info used to log info msg. +func (t *Log) Info(format string, v ...interface{}) { + if INFO < t.opts.Level { + return + } + t.log("\t [INFO] \t%s", fmt.Sprintf(format, v...)) +} + +// Warning used to log warning msg. +func (t *Log) Warning(format string, v ...interface{}) { + if WARNING < t.opts.Level { + return + } + t.log("\t [WARNING] \t%s", fmt.Sprintf(format, v...)) +} + +// Error used to log error msg. +func (t *Log) Error(format string, v ...interface{}) { + if ERROR < t.opts.Level { + return + } + t.log("\t [ERROR] \t%s", fmt.Sprintf(format, v...)) +} + +// Fatal used to log faltal msg. +func (t *Log) Fatal(format string, v ...interface{}) { + if FATAL < t.opts.Level { + return + } + t.log("\t [FATAL+EXIT] \t%s", fmt.Sprintf(format, v...)) + os.Exit(1) +} + +// Panic used to log panic msg. +func (t *Log) Panic(format string, v ...interface{}) { + if PANIC < t.opts.Level { + return + } + msg := fmt.Sprintf("\t [PANIC] \t%s", fmt.Sprintf(format, v...)) + t.log(msg) + panic(msg) +} + +// Close used to close the log. +func (t *Log) Close() { + // nothing +} + +func (t *Log) log(format string, v ...interface{}) { + t.Output(3, strings.Repeat(" ", 3)+fmt.Sprintf(format, v...)+"\n") +} diff --git a/src/vendor/github.com/xelabs/go-mysqlstack/xlog/xlog_test.go b/src/vendor/github.com/xelabs/go-mysqlstack/xlog/xlog_test.go new file mode 100644 index 00000000..a4a6685c --- /dev/null +++ b/src/vendor/github.com/xelabs/go-mysqlstack/xlog/xlog_test.go @@ -0,0 +1,131 @@ +/* + * go-mysqlstack + * xelabs.org + * + * Copyright (c) XeLabs + * GPL License + * + */ + +package xlog + +import ( + "testing" +) + +// assert fails the test if the condition is false. +func Assert(tb testing.TB, condition bool, msg string, v ...interface{}) { + if !condition { + tb.FailNow() + } +} + +func TestGetLog(t *testing.T) { + GetLog().Debug("DEBUG") + log := NewStdLog() + log.SetLevel("INFO") + GetLog().Debug("DEBUG") + GetLog().Info("INFO") +} + +func TestSysLog(t *testing.T) { + log := NewSysLog() + + log.Debug("DEBUG") + log.Info("INFO") + log.Warning("WARNING") + log.Error("ERROR") + + log.SetLevel("DEBUG") + log.Debug("DEBUG") + log.Info("INFO") + log.Warning("WARNING") + log.Error("ERROR") + + log.SetLevel("INFO") + log.Debug("DEBUG") + log.Info("INFO") + log.Warning("WARNING") + log.Error("ERROR") + + log.SetLevel("WARNING") + log.Debug("DEBUG") + log.Info("INFO") + log.Warning("WARNING") + log.Error("ERROR") + + log.SetLevel("ERROR") + log.Debug("DEBUG") + log.Info("INFO") + log.Warning("WARNING") + log.Error("ERROR") +} + +func TestStdLog(t *testing.T) { + log := NewStdLog() + + log.Println("........DEFAULT........") + log.Debug("DEBUG") + log.Info("INFO") + log.Warning("WARNING") + log.Error("ERROR") + + log.Println("........DEBUG........") + log.SetLevel("DEBUG") + log.Debug("DEBUG") + log.Info("INFO") + log.Warning("WARNING") + log.Error("ERROR") + + log.Println("........INFO........") + log.SetLevel("INFO") + log.Debug("DEBUG") + log.Info("INFO") + log.Warning("WARNING") + log.Error("ERROR") + + log.Println("........WARNING........") + log.SetLevel("WARNING") + log.Debug("DEBUG") + log.Info("INFO") + log.Warning("WARNING") + log.Error("ERROR") + + log.Println("........ERROR........") + log.SetLevel("ERROR") + log.Debug("DEBUG") + log.Info("INFO") + log.Warning("WARNING") + log.Error("ERROR") +} + +func TestLogLevel(t *testing.T) { + log := NewStdLog() + { + log.SetLevel("DEBUG") + want := DEBUG + got := log.opts.Level + Assert(t, want == got, "want[%v]!=got[%v]", want, got) + } + + { + log.SetLevel("DEBUGX") + want := DEBUG + got := log.opts.Level + Assert(t, want == got, "want[%v]!=got[%v]", want, got) + } + + { + log.SetLevel("PANIC") + want := PANIC + got := log.opts.Level + Assert(t, want == got, "want[%v]!=got[%v]", want, got) + } + + { + log.SetLevel("WARNING") + want := WARNING + got := log.opts.Level + Assert(t, want == got, "want[%v]!=got[%v]", want, got) + } +} diff --git a/src/xbase/base.go b/src/xbase/base.go new file mode 100644 index 00000000..d960539e --- /dev/null +++ b/src/xbase/base.go @@ -0,0 +1,46 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package xbase + +import ( + "io" + "os" + + "github.com/pkg/errors" +) + +// WriteFile used to write data to file. +func WriteFile(file string, data []byte) error { + flag := os.O_RDWR | os.O_TRUNC + if _, err := os.Stat(file); os.IsNotExist(err) { + flag |= os.O_CREATE + } + f, err := os.OpenFile(file, flag, 0644) + if err != nil { + return errors.WithStack(err) + } + defer f.Close() + + n, err := f.Write(data) + if err != nil { + return errors.WithStack(err) + } + if n != len(data) { + return errors.WithStack(io.ErrShortWrite) + } + return f.Sync() +} + +// TruncateQuery used to truncate the query with max length. +func TruncateQuery(query string, max int) string { + if max == 0 || len(query) <= max { + return query + } + return query[:max] + " [TRUNCATED]" +} diff --git a/src/xbase/base_test.go b/src/xbase/base_test.go new file mode 100644 index 00000000..2e020dd1 --- /dev/null +++ b/src/xbase/base_test.go @@ -0,0 +1,53 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package xbase + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestXbaseWriteFile(t *testing.T) { + file := "/tmp/xbase.test" + defer os.RemoveAll(file) + + // Write OK. + { + err := WriteFile(file, []byte{0xfd}) + assert.Nil(t, err) + } + + // Write Error. + { + badFile := "/xx/xbase.test" + err := WriteFile(badFile, []byte{0xfd}) + assert.NotNil(t, err) + } +} + +func TestXbaseTruncateQuery(t *testing.T) { + var testCases = []struct { + in, out string + }{{ + in: "", + out: "", + }, { + in: "12345", + out: "12345", + }, { + in: "123456", + out: "12345 [TRUNCATED]", + }} + for _, testCase := range testCases { + got := TruncateQuery(testCase.in, 5) + assert.Equal(t, testCase.out, got) + } +} diff --git a/src/xbase/disk.go b/src/xbase/disk.go new file mode 100644 index 00000000..c8c5244e --- /dev/null +++ b/src/xbase/disk.go @@ -0,0 +1,34 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package xbase + +import ( + "syscall" +) + +// DiskStatus tuple. +type DiskStatus struct { + All uint64 + Used uint64 + Free uint64 +} + +// DiskUsage returns the disk info of the path. +func DiskUsage(path string) (*DiskStatus, error) { + disk := &DiskStatus{} + fs := syscall.Statfs_t{} + err := syscall.Statfs(path, &fs) + if err != nil { + return nil, err + } + disk.All = fs.Blocks * uint64(fs.Bsize) + disk.Free = fs.Bavail * uint64(fs.Bsize) + disk.Used = disk.All - disk.Free + return disk, nil +} diff --git a/src/xbase/disk_test.go b/src/xbase/disk_test.go new file mode 100644 index 00000000..ce3e6a6a --- /dev/null +++ b/src/xbase/disk_test.go @@ -0,0 +1,24 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package xbase + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDiskUsage(t *testing.T) { + disk, err := DiskUsage("/") + assert.Nil(t, err) + + assert.True(t, disk.All > 0) + assert.True(t, disk.Used > 0) + assert.True(t, disk.Free > 0) +} diff --git a/src/xbase/http.go b/src/xbase/http.go new file mode 100644 index 00000000..913365ee --- /dev/null +++ b/src/xbase/http.go @@ -0,0 +1,91 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package xbase + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "time" +) + +// makeSimpleRequest used to make a simple http request. +func makeSimpleRequest(ctx context.Context, method string, url string, payload interface{}) (*http.Request, error) { + var data string + + if payload != nil { + b, err := json.Marshal(payload) + if err != nil { + return nil, err + } + data = fmt.Sprintf("%s", b) + } + + req, err := http.NewRequest(method, url, strings.NewReader(data)) + if err != nil { + return nil, err + } + //r.Header.Set("Accept-Encoding", "gzip") + if payload != nil { + req.Header.Set("Content-Type", "application/json") + } + req = req.WithContext(ctx) + return req, nil +} + +func httpDo(method string, url string, payload interface{}) (*http.Response, func(), error) { + ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) + req, err := makeSimpleRequest(ctx, method, url, payload) + if err != nil { + return nil, nil, err + } + + client := &http.Client{} + resp, err := client.Do(req) + return resp, func() { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + }, err +} + +// HTTPPost used to do restful post request. +func HTTPPost(url string, payload interface{}) (*http.Response, func(), error) { + return httpDo("POST", url, payload) +} + +// HTTPPut used to do restful put request. +func HTTPPut(url string, payload interface{}) (*http.Response, func(), error) { + return httpDo("PUT", url, payload) +} + +// HTTPGet used to do restful get request. +func HTTPGet(url string) (string, error) { + resp, cleanup, err := httpDo("GET", url, nil) + if err != nil { + return "", err + } + defer cleanup() + return HTTPReadBody(resp), nil +} + +// HTTPReadBody returns the body of the response. +func HTTPReadBody(resp *http.Response) string { + if resp != nil && resp.Body != nil { + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err.Error() + } + return string(bodyBytes) + } + return "" +} diff --git a/src/xbase/http_test.go b/src/xbase/http_test.go new file mode 100644 index 00000000..59f5db0a --- /dev/null +++ b/src/xbase/http_test.go @@ -0,0 +1,126 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package xbase + +import ( + "context" + "net/http" + "testing" + "time" + + "github.com/ant0ine/go-json-rest/rest" + "github.com/stretchr/testify/assert" + "github.com/xelabs/go-mysqlstack/xlog" +) + +func TestHttpGet(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + httpSvr := mockHTTP(log, ":8888") + defer func() { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + httpSvr.Shutdown(ctx) + }() + + url := "http://127.0.0.1:8888/test/getok" + body, err := HTTPGet(url) + assert.Nil(t, err) + log.Debug("%#v", body) +} + +func TestHttpPost(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + httpSvr := mockHTTP(log, ":7888") + defer func() { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + httpSvr.Shutdown(ctx) + }() + + url := "http://127.0.0.1:7888/test/ok" + type request struct { + } + resp, cleanup, err := HTTPPost(url, &request{}) + assert.Nil(t, err) + defer cleanup() + log.Debug("%#v", resp) +} + +func TestHttpPostTimeout(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + httpSvr := mockHTTP(log, ":8889") + defer func() { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + httpSvr.Shutdown(ctx) + }() + + url := "http://127.0.0.1:8889/test/timeout" + want := "Get http://127.0.0.1:8889/test/timeout: context deadline exceeded" + _, err := HTTPGet(url) + got := err.Error() + assert.Equal(t, want, got) +} + +func TestHttpPut(t *testing.T) { + log := xlog.NewStdLog(xlog.Level(xlog.PANIC)) + httpSvr := mockHTTP(log, ":8888") + defer func() { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + httpSvr.Shutdown(ctx) + }() + + url := "http://127.0.0.1:8888/test/putok" + type request struct { + } + resp, cleanup, err := HTTPPut(url, &request{}) + assert.Nil(t, err) + defer cleanup() + log.Debug("%#v", resp) +} + +func mockHTTP(log *xlog.Log, addr string) *http.Server { + api := rest.NewApi() + api.Use(rest.DefaultDevStack...) + + router, err := rest.MakeRouter( + rest.Get("/test/getok", mockOKHandler(log)), + rest.Get("/test/timeout", mockTimeoutHandler(log)), + rest.Post("/test/ok", mockOKHandler(log)), + rest.Put("/test/putok", mockOKHandler(log)), + ) + if err != nil { + log.Panicf("mock.rest.make.router.error:%+v", err) + } + api.SetApp(router) + handlers := api.MakeHandler() + h := &http.Server{Addr: addr, Handler: handlers} + go func() { + if err := h.ListenAndServe(); err != nil { + log.Error("mock.rest.error:%+v", err) + return + } + }() + time.Sleep(time.Millisecond * 100) + return h +} + +func mockOKHandler(log *xlog.Log) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + } + return f +} + +func mockTimeoutHandler(log *xlog.Log) rest.HandlerFunc { + f := func(w rest.ResponseWriter, r *rest.Request) { + time.Sleep(time.Second * 20) + } + return f +} diff --git a/src/xbase/querytypes.go b/src/xbase/querytypes.go new file mode 100644 index 00000000..4f390b9d --- /dev/null +++ b/src/xbase/querytypes.go @@ -0,0 +1,42 @@ +package xbase + +const ( + // UNSUPPORT type. + UNSUPPORT = "UNSUPPORT" + + // DDL type. + DDL = "DDL" + + // USEDB type. + USEDB = "USEDB" + + // SHOW type. + SHOW = "SHOW" + + // SELECT type. + SELECT = "SELECT" + + // INSERT type. + INSERT = "INSERT" + + // UPDATE type. + UPDATE = "UPDATE" + + // DELETE type. + DELETE = "DELETE" + + // REPLACE type. + REPLACE = "REPLACE" + + // EXPLAIN type. + EXPLAIN = "EXPLAIN" + + // KILL type. + KILL = "KILL" + + // SET type. + SET = "SET" + + // TRANSACTION type. + TRANSACTION = "TRANSACTION" +) diff --git a/src/xbase/rfile.go b/src/xbase/rfile.go new file mode 100644 index 00000000..6ddc1a7e --- /dev/null +++ b/src/xbase/rfile.go @@ -0,0 +1,243 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package xbase + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "sort" + "strings" + "time" + "xbase/sync2" +) + +const ( + fileFormat = "20060102150405.000" +) + +var ( + _ RotateFile = &rotateFile{} +) + +// RotateFile interface. +type RotateFile interface { + Write(b []byte) (int, error) + Sync() error + Close() + Name() string + GetOldLogInfos() ([]LogInfo, error) + GetNextLogInfo(logName string) (LogInfo, error) + GetCurrLogInfo(ts int64) (LogInfo, error) +} + +type rotateFile struct { + size int + max int + file *os.File + name sync2.AtomicString + dir string + prefix string + extension string +} + +// NewRotateFile creates a new rotateFile. +func NewRotateFile(dir string, prefix string, extension string, maxSize int) RotateFile { + return &rotateFile{ + max: maxSize, + dir: dir, + prefix: prefix, + extension: extension, + } +} + +func (f *rotateFile) openNew() error { + t := time.Now().UTC() + timestamp := t.Format(fileFormat) + next := filepath.Join(f.dir, fmt.Sprintf("%s%s%s", f.prefix, timestamp, f.extension)) + f.name.Set(next) + + cur, err := os.OpenFile(next, os.O_CREATE|os.O_WRONLY, os.FileMode(0644)) + if err != nil { + return err + } + f.file = cur + f.size = 0 + return nil +} + +func (f *rotateFile) rotate() error { + if err := f.file.Sync(); err != nil { + return err + } + if err := f.file.Close(); err != nil { + return err + } + return f.openNew() +} + +// Name returns the current writing file base name. +func (f *rotateFile) Name() string { + return path.Base(f.name.Get()) +} + +// Write used to writes datas to file. +func (f *rotateFile) Write(b []byte) (int, error) { + if f.file == nil { + f.openNew() + } + n, err := f.file.Write(b) + if err != nil { + return n, err + } + f.size += n + + if f.size > f.max { + if err := f.rotate(); err != nil { + return n, err + } + } + return n, nil +} + +// Sync used to sync the file. +func (f *rotateFile) Sync() error { + return f.file.Sync() +} + +// Close used to close the file. +func (f *rotateFile) Close() { + if f.file != nil { + f.file.Close() + f.file = nil + } +} + +// LogInfo tuple. +type LogInfo struct { + Name string + // Ts is the timestamp with UTC().UnixNano. + Ts int64 +} + +func (f *rotateFile) logInfos() ([]LogInfo, error) { + infos := make([]LogInfo, 0, 64) + files, err := ioutil.ReadDir(f.dir) + if err != nil { + return infos, err + } + + for _, file := range files { + if file.IsDir() { + continue + } + if filepath.Ext(file.Name()) == f.extension { + name := strings.TrimSuffix(strings.TrimPrefix(file.Name(), f.prefix), f.extension) + t, err := time.Parse(fileFormat, name) + if err != nil { + continue + } + infos = append(infos, LogInfo{ + Name: file.Name(), + Ts: t.UnixNano(), + }) + } + } + return infos, nil +} + +// GetOldLogInfos returns all the files except the current writing file. +func (f *rotateFile) GetOldLogInfos() ([]LogInfo, error) { + infos, err := f.logInfos() + if err != nil { + return nil, err + } + + // sort by ts asc. + sort.Slice(infos, func(i, j int) bool { + return infos[i].Ts < infos[j].Ts + }) + + if len(infos) > 0 { + return infos[:len(infos)-1], nil + } + return infos, nil +} + +// GetCurrLogInfo returns the last log file which ts >= log.ts_name. +// If we ts < log.Ts returns the last LogInfo. +// ts is the UTC().UnixNano() tiemstamp. +func (f *rotateFile) GetCurrLogInfo(ts int64) (LogInfo, error) { + info := LogInfo{} + infos, err := f.logInfos() + if err != nil { + return info, err + } + + // sort by ts desc. + sort.Slice(infos, func(i, j int) bool { + return infos[i].Ts > infos[j].Ts + }) + + i := sort.Search(len(infos), func(i int) bool { return infos[i].Ts <= ts }) + if len(infos) != i { + info = infos[i] + } + + // Return the last log if ts < last.Ts. + if info.Name == "" && len(infos) > 0 { + lastIdx := len(infos) - 1 + last := infos[lastIdx] + if last.Ts > ts { + return last, nil + } + } + return info, nil +} + +// GetNextLogInfo return the first log file which log.ts_name > ts. +func (f *rotateFile) GetNextLogInfo(logName string) (LogInfo, error) { + info := LogInfo{} + + infos, err := f.logInfos() + if err != nil { + return info, err + } + + // sort by ts asc. + sort.Slice(infos, func(i, j int) bool { + return infos[i].Ts < infos[j].Ts + }) + + // logName is "". + if logName == "" && len(infos) > 0 { + return infos[0], nil + } + + // no logs. + logName = path.Base(logName) + if logName == "." { + return info, nil + } + + // Get the ts from the logname. + name := strings.TrimSuffix(strings.TrimPrefix(logName, f.prefix), f.extension) + t, err := time.Parse(fileFormat, name) + if err != nil { + return info, err + } + ts := t.UnixNano() + i := sort.Search(len(infos), func(i int) bool { return infos[i].Ts > ts }) + if len(infos) != i { + info = infos[i] + } + return info, nil +} diff --git a/src/xbase/rfile_test.go b/src/xbase/rfile_test.go new file mode 100644 index 00000000..56853cdc --- /dev/null +++ b/src/xbase/rfile_test.go @@ -0,0 +1,228 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package xbase + +import ( + "os" + "path" + "path/filepath" + "sort" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +const ( + mockDir = "/tmp/test/" + mockPrefix = "xfiletest-" + mockExtension = ".testlog" +) + +func TestFileGetOldLogInfos4(t *testing.T) { + os.RemoveAll(mockDir) + os.MkdirAll(mockDir, 0744) + + xfile := NewRotateFile(mockDir, mockPrefix, mockExtension, 1024*512) + defer xfile.Close() + + for i := 0; i < 1024*64; i++ { + datas := []byte("rotate.me....rotate.me....please...") + n, err := xfile.Write(datas) + assert.Nil(t, err) + assert.Equal(t, len(datas), n) + } + err := xfile.Sync() + assert.Nil(t, err) + + logInfos, err := xfile.GetOldLogInfos() + assert.Nil(t, err) + assert.Equal(t, 4, len(logInfos)) + + // check the old files not contains the current file. + xfile1 := xfile.(*rotateFile) + curName := xfile1.name.Get() + for _, info := range logInfos { + assert.False(t, strings.Contains(curName, info.Name)) + } + + list := make([]string, 0, 10) + filepath.Walk(mockDir, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + return nil + } + if filepath.Ext(path) == ".testlog" { + list = append(list, path) + os.Remove(path) + } + return nil + }) + + want := 5 + got := len(list) + assert.Equal(t, want, got) +} + +func TestFileGetOldLogInfos0(t *testing.T) { + os.RemoveAll(mockDir) + os.MkdirAll(mockDir, 0744) + + xfile := NewRotateFile(mockDir, mockPrefix, mockExtension, 1024*512) + defer xfile.Close() + + for i := 0; i < 1024; i++ { + datas := []byte("rotate.me....rotate.me....please...") + n, err := xfile.Write(datas) + assert.Nil(t, err) + assert.Equal(t, len(datas), n) + } + + logInfos, err := xfile.GetOldLogInfos() + assert.Nil(t, err) + assert.Equal(t, 0, len(logInfos)) + + list := make([]string, 0, 10) + filepath.Walk(mockDir, func(path string, info os.FileInfo, err error) error { + if info.IsDir() { + return nil + } + if filepath.Ext(path) == ".testlog" { + list = append(list, path) + os.Remove(path) + } + return nil + }) + + want := 1 + got := len(list) + assert.Equal(t, want, got) +} + +func TestFileGetCurrLogInfo(t *testing.T) { + dir := mockDir + os.RemoveAll(dir) + os.MkdirAll(dir, os.ModePerm) + + files := []string{ + "xfiletest-20171226140847.773.testlog", + "xfiletest-20171226140846.772.testlog", + "xfiletest-20171226140848.773.testlog", + "xfiletest-20171226140846.770.testlog", + } + + defer os.RemoveAll(dir) + for _, file := range files { + name := path.Join(dir, file) + f, err := os.OpenFile(name, os.O_RDONLY|os.O_CREATE, 0666) + assert.Nil(t, err) + f.Close() + } + + xfile := NewRotateFile(dir, mockPrefix, mockExtension, 1024*512) + defer xfile.Close() + + info, err := xfile.GetCurrLogInfo(time.Now().UnixNano()) + assert.Nil(t, err) + assert.Equal(t, files[2], info.Name) + + xfile1 := xfile.(*rotateFile) + infos, err := xfile1.logInfos() + assert.Nil(t, err) + + // sort by ts desc. + sort.Slice(infos, func(i, j int) bool { + return infos[i].Ts > infos[j].Ts + }) + + // timestamp == info.Ts = "20171226140846.772", + ts := infos[2].Ts + info, err = xfile.GetCurrLogInfo(ts) + assert.Nil(t, err) + assert.Equal(t, infos[2].Name, info.Name) + + // timestamp == (info.Ts + 2seconds) = "20171226140848.772" + ts = time.Unix(0, infos[2].Ts).Add(time.Second * 2).UnixNano() + info, err = xfile.GetCurrLogInfo(ts) + assert.Nil(t, err) + assert.Equal(t, files[0], info.Name) + + // timestamp == (info.Ts - 2seconds) = "20171226140844.772" + ts = time.Unix(0, infos[2].Ts).Add(time.Second * -2).UnixNano() + info, err = xfile.GetCurrLogInfo(ts) + assert.Nil(t, err) + assert.Equal(t, files[3], info.Name) +} + +func TestFileGetNextLogInfo(t *testing.T) { + dir := mockDir + os.RemoveAll(dir) + os.MkdirAll(dir, os.ModePerm) + + files := []string{ + "xfiletest-20171226140847.773.testlog", + "xfiletest-20171226140846.772.testlog", + "xfiletest-20171226140848.773.testlog", + "xfiletest-20171226140846.770.testlog", + } + + for _, file := range files { + name := path.Join(dir, file) + f, err := os.OpenFile(name, os.O_RDONLY|os.O_CREATE, 0666) + assert.Nil(t, err) + f.Close() + } + + xfile := NewRotateFile(dir, mockPrefix, mockExtension, 1024*512) + defer xfile.Close() + + // Next should be "xfiletest-20171226140846.772.testlog". + info, err := xfile.GetNextLogInfo("xfiletest-20171226140846.770.testlog") + assert.Nil(t, err) + assert.Equal(t, files[1], info.Name) + + // Next should be "xfiletest-20171226140846.770.testlog". + info, err = xfile.GetNextLogInfo("xfiletest-20171226140844.770.testlog") + assert.Nil(t, err) + assert.Equal(t, files[3], info.Name) + + // Next should be "xfiletest-20171226140846.770.testlog". + info, err = xfile.GetNextLogInfo("/tmp/logtest/xfiletest-20171226140844.770.testlog") + assert.Nil(t, err) + assert.Equal(t, files[3], info.Name) + + // Next should be "". + info, err = xfile.GetNextLogInfo("xfiletest-20171226140848.775.testlog") + assert.Nil(t, err) + assert.Equal(t, "", info.Name) + + // Next should be the first. + info, err = xfile.GetNextLogInfo("") + assert.Nil(t, err) + assert.Equal(t, files[3], info.Name) + + // Parse the time error. + info, err = xfile.GetNextLogInfo("xfiletest-20171226140848.775.test") + assert.NotNil(t, err) +} + +func TestFileGetNextLogInfoWithEmpty(t *testing.T) { + dir := mockDir + os.RemoveAll(dir) + os.MkdirAll(dir, os.ModePerm) + + xfile := NewRotateFile(dir, mockPrefix, mockExtension, 1024*512) + defer xfile.Close() + + // Next should be "". + info, err := xfile.GetNextLogInfo("") + assert.Nil(t, err) + assert.Equal(t, "", info.Name) + xfile.Name() +} diff --git a/src/xbase/stats/counters.go b/src/xbase/stats/counters.go new file mode 100644 index 00000000..00ba8c3e --- /dev/null +++ b/src/xbase/stats/counters.go @@ -0,0 +1,236 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stats + +import ( + "bytes" + "fmt" + "sort" + "strings" + "sync" + "sync/atomic" +) + +// Counters is similar to expvar.Map, except that +// it doesn't allow floats. In addition, it provides +// a Counts method which can be used for tracking rates. +type Counters struct { + // mu only protects adding and retrieving the value (*int64) from the map, + // modification to the actual number (int64) should be done with atomic funcs. + mu sync.RWMutex + counts map[string]*int64 +} + +// NewCounters create a new Counters instance. If name is set, the variable +// gets published. The functional also accepts an optional list of tags that +// pre-creates them initialized to 0. +func NewCounters(name string, tags ...string) *Counters { + c := &Counters{counts: make(map[string]*int64)} + for _, tag := range tags { + c.counts[tag] = new(int64) + } + if name != "" { + publish(name, c) + } + return c +} + +// Sort by name. +type orderByName struct { + name string + count int64 +} +type orderByNames []orderByName + +func (q orderByNames) Len() int { return len(q) } +func (q orderByNames) Swap(i, j int) { q[i], q[j] = q[j], q[i] } +func (q orderByNames) Less(i, j int) bool { return q[i].name < q[j].name } + +// String is used by expvar. +func (c *Counters) String() string { + b := bytes.NewBuffer(make([]byte, 0, 4096)) + var counters orderByNames + + c.mu.RLock() + for k, a := range c.counts { + counter := orderByName{ + name: k, + count: atomic.LoadInt64(a), + } + counters = append(counters, counter) + } + c.mu.RUnlock() + + // sorts. + sort.Sort(counters) + fmt.Fprintf(b, "{") + firstValue := true + for _, v := range counters { + if firstValue { + firstValue = false + } else { + fmt.Fprintf(b, ", ") + } + fmt.Fprintf(b, "\"%v\": %v", v.name, v.count) + } + fmt.Fprintf(b, "}") + return b.String() +} + +func (c *Counters) getValueAddr(name string) *int64 { + c.mu.RLock() + a, ok := c.counts[name] + c.mu.RUnlock() + + if ok { + return a + } + + c.mu.Lock() + defer c.mu.Unlock() + // we need to check the existence again + // as it may be created by other goroutine. + a, ok = c.counts[name] + if ok { + return a + } + a = new(int64) + c.counts[name] = a + return a +} + +// Add adds a value to a named counter. +func (c *Counters) Add(name string, value int64) { + a := c.getValueAddr(name) + atomic.AddInt64(a, value) +} + +// Set sets the value of a named counter. +func (c *Counters) Set(name string, value int64) { + a := c.getValueAddr(name) + atomic.StoreInt64(a, value) +} + +// Reset resets all counter values +func (c *Counters) Reset() { + c.mu.Lock() + defer c.mu.Unlock() + c.counts = make(map[string]*int64) +} + +// Counts returns a copy of the Counters' map. +func (c *Counters) Counts() map[string]int64 { + c.mu.RLock() + defer c.mu.RUnlock() + + counts := make(map[string]int64, len(c.counts)) + for k, a := range c.counts { + counts[k] = atomic.LoadInt64(a) + } + return counts +} + +// CountersFunc converts a function that returns +// a map of int64 as an expvar. +type CountersFunc func() map[string]int64 + +// Counts returns a copy of the Counters' map. +func (f CountersFunc) Counts() map[string]int64 { + return f() +} + +// String is used by expvar. +func (f CountersFunc) String() string { + m := f() + if m == nil { + return "{}" + } + b := bytes.NewBuffer(make([]byte, 0, 1024)) + fmt.Fprintf(b, "{") + firstValue := true + for k, v := range m { + if firstValue { + firstValue = false + } else { + fmt.Fprintf(b, ", ") + } + fmt.Fprintf(b, "\"%v\": %v", k, v) + } + fmt.Fprintf(b, "}") + return b.String() +} + +// MultiCounters is a multidimensional Counters implementation where +// names of categories are compound names made with joining multiple +// strings with '.'. +type MultiCounters struct { + Counters + labels []string +} + +// NewMultiCounters creates a new MultiCounters instance, and publishes it +// if name is set. +func NewMultiCounters(name string, labels []string) *MultiCounters { + t := &MultiCounters{ + Counters: Counters{counts: make(map[string]*int64)}, + labels: labels, + } + if name != "" { + publish(name, t) + } + return t +} + +// Labels returns the list of labels. +func (mc *MultiCounters) Labels() []string { + return mc.labels +} + +// Add adds a value to a named counter. len(names) must be equal to +// len(Labels) +func (mc *MultiCounters) Add(names []string, value int64) { + if len(names) != len(mc.labels) { + panic("MultiCounters: wrong number of values in Add") + } + mc.Counters.Add(strings.Join(names, "."), value) +} + +// Set sets the value of a named counter. len(names) must be equal to +// len(Labels) +func (mc *MultiCounters) Set(names []string, value int64) { + if len(names) != len(mc.labels) { + panic("MultiCounters: wrong number of values in Set") + } + mc.Counters.Set(strings.Join(names, "."), value) +} + +// MultiCountersFunc is a multidimensional CountersFunc implementation +// where names of categories are compound names made with joining +// multiple strings with '.'. Since the map is returned by the +// function, we assume it's in the right format (meaning each key is +// of the form 'aaa.bbb.ccc' with as many elements as there are in +// Labels). +type MultiCountersFunc struct { + CountersFunc + labels []string +} + +// Labels returns the list of labels. +func (mcf *MultiCountersFunc) Labels() []string { + return mcf.labels +} + +// NewMultiCountersFunc creates a new MultiCountersFunc mapping to the provided +// function. +func NewMultiCountersFunc(name string, labels []string, f CountersFunc) *MultiCountersFunc { + t := &MultiCountersFunc{ + CountersFunc: f, + labels: labels, + } + if name != "" { + publish(name, t) + } + return t +} diff --git a/src/xbase/stats/counters_test.go b/src/xbase/stats/counters_test.go new file mode 100644 index 00000000..bbff4a72 --- /dev/null +++ b/src/xbase/stats/counters_test.go @@ -0,0 +1,163 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stats + +import ( + "expvar" + "math/rand" + "reflect" + "sort" + "testing" + "time" +) + +func TestCounters(t *testing.T) { + clear() + c := NewCounters("counter1") + c.Add("c1", 1) + c.Add("c2", 1) + c.Add("c2", 1) + want1 := `{"c1": 1, "c2": 2}` + want2 := `{"c2": 2, "c1": 1}` + if s := c.String(); s != want1 && s != want2 { + t.Errorf("want %s or %s, got %s", want1, want2, s) + } + counts := c.Counts() + if counts["c1"] != 1 { + t.Errorf("want 1, got %d", counts["c1"]) + } + if counts["c2"] != 2 { + t.Errorf("want 2, got %d", counts["c2"]) + } + f := CountersFunc(func() map[string]int64 { + return map[string]int64{ + "c1": 1, + "c2": 2, + } + }) + if s := f.String(); s != want1 && s != want2 { + t.Errorf("want %s or %s, got %s", want1, want2, s) + } +} + +func TestCountersTags(t *testing.T) { + clear() + c := NewCounters("counterTag1") + want := map[string]int64{} + got := c.Counts() + if !reflect.DeepEqual(got, want) { + t.Errorf("want %v, got %v", want, got) + } + + c = NewCounters("counterTag2", "tag1", "tag2") + want = map[string]int64{"tag1": 0, "tag2": 0} + got = c.Counts() + if !reflect.DeepEqual(got, want) { + t.Errorf("want %v, got %v", want, got) + } +} + +func TestMultiCounters(t *testing.T) { + clear() + c := NewMultiCounters("mapCounter1", []string{"aaa", "bbb"}) + c.Add([]string{"c1a", "c1b"}, 1) + c.Add([]string{"c2a", "c2b"}, 1) + c.Add([]string{"c2a", "c2b"}, 1) + want1 := `{"c1a.c1b": 1, "c2a.c2b": 2}` + want2 := `{"c2a.c2b": 2, "c1a.c1b": 1}` + if s := c.String(); s != want1 && s != want2 { + t.Errorf("want %s or %s, got %s", want1, want2, s) + } + counts := c.Counts() + if counts["c1a.c1b"] != 1 { + t.Errorf("want 1, got %d", counts["c1a.c1b"]) + } + if counts["c2a.c2b"] != 2 { + t.Errorf("want 2, got %d", counts["c2a.c2b"]) + } + f := NewMultiCountersFunc("", []string{"aaa", "bbb"}, func() map[string]int64 { + return map[string]int64{ + "c1a.c1b": 1, + "c2a.c2b": 2, + } + }) + if s := f.String(); s != want1 && s != want2 { + t.Errorf("want %s or %s, got %s", want1, want2, s) + } +} + +func TestCountersHook(t *testing.T) { + var gotname string + var gotv *Counters + clear() + Register(func(name string, v expvar.Var) { + gotname = name + gotv = v.(*Counters) + }) + + v := NewCounters("counter2") + if gotname != "counter2" { + t.Errorf("want counter2, got %s", gotname) + } + if gotv != v { + t.Errorf("want %#v, got %#v", v, gotv) + } +} + +var benchCounter = NewCounters("bench") + +func BenchmarkCounters(b *testing.B) { + clear() + benchCounter.Add("c1", 1) + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + benchCounter.Add("c1", 1) + } + }) +} + +func BenchmarkCountersTailLatency(b *testing.B) { + // For this one, ignore the time reported by 'go test'. + // The 99th Percentile log line is all that matters. + // (Cmd: go test -bench=BenchmarkCountersTailLatency -benchtime=30s -cpu=10) + clear() + benchCounter.Add("c1", 1) + c := make(chan time.Duration, 100) + done := make(chan struct{}) + go func() { + all := make([]int, b.N) + i := 0 + for dur := range c { + all[i] = int(dur) + i++ + } + sort.Ints(all) + p99 := time.Duration(all[b.N*99/100]) + b.Logf("99th Percentile (for N=%v): %v", b.N, p99) + close(done) + }() + + b.ResetTimer() + b.SetParallelism(100) // The actual number of goroutines is 100*GOMAXPROCS + b.RunParallel(func(pb *testing.PB) { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + + var start time.Time + + for pb.Next() { + // sleep between 0~200ms to simulate 10 QPS per goroutine. + time.Sleep(time.Duration(r.Int63n(200)) * time.Millisecond) + start = time.Now() + benchCounter.Add("c1", 1) + c <- time.Since(start) + } + }) + b.StopTimer() + + close(c) + <-done +} diff --git a/src/xbase/stats/export.go b/src/xbase/stats/export.go new file mode 100644 index 00000000..04664a9e --- /dev/null +++ b/src/xbase/stats/export.go @@ -0,0 +1,389 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package stats is a wrapper for expvar. It addtionally +// exports new types that can be used to track performance. +// It also provides a callback hook that allows a program +// to export the variables using methods other than /debug/vars. +// All variables support a String function that +// is expected to return a JSON representation +// of the variable. +// Any function named Add will add the specified +// number to the variable. +// Any function named Counts returns a map of counts +// that can be used by Rates to track rates over time. +package stats + +import ( + "bytes" + "expvar" + "flag" + "fmt" + "strconv" + "sync" + "time" + + "xbase/sync2" +) + +var emitStats = flag.Bool("emit_stats", false, "true iff we should emit stats to push-based monitoring/stats backends") +var statsEmitPeriod = flag.Duration("stats_emit_period", time.Duration(60*time.Second), "Interval between emitting stats to all registered backends") +var statsBackend = flag.String("stats_backend", "influxdb", "The name of the registered push-based monitoring/stats backend to use") + +// NewVarHook is the type of a hook to export variables in a different way +type NewVarHook func(name string, v expvar.Var) + +type varGroup struct { + sync.Mutex + vars map[string]expvar.Var + newVarHook NewVarHook +} + +func (vg *varGroup) register(nvh NewVarHook) { + vg.Lock() + defer vg.Unlock() + if vg.newVarHook != nil { + panic("You've already registered a function") + } + if nvh == nil { + panic("nil not allowed") + } + vg.newVarHook = nvh + // Call hook on existing vars because some might have been + // created before the call to register + for k, v := range vg.vars { + nvh(k, v) + } + vg.vars = nil +} + +func (vg *varGroup) publish(name string, v expvar.Var) { + vg.Lock() + defer vg.Unlock() + if expvar.Get(name) == nil { + expvar.Publish(name, v) + if vg.newVarHook != nil { + vg.newVarHook(name, v) + } else { + vg.vars[name] = v + } + } +} + +var defaultVarGroup = varGroup{vars: make(map[string]expvar.Var)} + +// Register allows you to register a callback function +// that will be called whenever a new stats variable gets +// created. This can be used to build alternate methods +// of exporting stats variables. +func Register(nvh NewVarHook) { + defaultVarGroup.register(nvh) +} + +// Publish is expvar.Publish+hook +func Publish(name string, v expvar.Var) { + publish(name, v) +} + +func publish(name string, v expvar.Var) { + defaultVarGroup.publish(name, v) +} + +// PushBackend is an interface for any stats/metrics backend that requires data +// to be pushed to it. It's used to support push-based metrics backends, as expvar +// by default only supports pull-based ones. +type PushBackend interface { + // PushAll pushes all stats from expvar to the backend + PushAll() error +} + +var pushBackends = make(map[string]PushBackend) +var once sync.Once + +// RegisterPushBackend allows modules to register PushBackend implementations. +// Should be called on init(). +func RegisterPushBackend(name string, backend PushBackend) { + if _, ok := pushBackends[name]; ok { + //log.Fatalf("PushBackend %s already exists; can't register the same name multiple times", name) + } + pushBackends[name] = backend + if *emitStats { + // Start a single goroutine to emit stats periodically + once.Do(func() { + go emitToBackend(statsEmitPeriod) + }) + } +} + +// emitToBackend does a periodic emit to the selected PushBackend. If a push fails, +// it will be logged as a warning (but things will otherwise proceed as normal). +func emitToBackend(emitPeriod *time.Duration) { + ticker := time.NewTicker(*emitPeriod) + defer ticker.Stop() + for range ticker.C { + backend, ok := pushBackends[*statsBackend] + if !ok { + //log.Errorf("No PushBackend registered with name %s", *statsBackend) + return + } + err := backend.PushAll() + if err != nil { + // TODO(aaijazi): This might cause log spam... + //log.Warningf("Pushing stats to backend %v failed: %v", *statsBackend, err) + } + } +} + +// Float is expvar.Float+Get+hook +type Float struct { + mu sync.Mutex + f float64 +} + +// NewFloat creates a new Float and exports it. +func NewFloat(name string) *Float { + v := new(Float) + publish(name, v) + return v +} + +// Add adds the provided value to the Float +func (v *Float) Add(delta float64) { + v.mu.Lock() + v.f += delta + v.mu.Unlock() +} + +// Set sets the value +func (v *Float) Set(value float64) { + v.mu.Lock() + v.f = value + v.mu.Unlock() +} + +// Get returns the value +func (v *Float) Get() float64 { + v.mu.Lock() + f := v.f + v.mu.Unlock() + return f +} + +// String is the implementation of expvar.var +func (v *Float) String() string { + return strconv.FormatFloat(v.Get(), 'g', -1, 64) +} + +// FloatFunc converts a function that returns +// a float64 as an expvar. +type FloatFunc func() float64 + +// String is the implementation of expvar.var +func (f FloatFunc) String() string { + return strconv.FormatFloat(f(), 'g', -1, 64) +} + +// Int is expvar.Int+Get+hook +type Int struct { + i sync2.AtomicInt64 +} + +// NewInt returns a new Int +func NewInt(name string) *Int { + v := new(Int) + if name != "" { + publish(name, v) + } + return v +} + +// Add adds the provided value to the Int +func (v *Int) Add(delta int64) { + v.i.Add(delta) +} + +// Set sets the value +func (v *Int) Set(value int64) { + v.i.Set(value) +} + +// Get returns the value +func (v *Int) Get() int64 { + return v.i.Get() +} + +// String is the implementation of expvar.var +func (v *Int) String() string { + return strconv.FormatInt(v.i.Get(), 10) +} + +// Duration exports a time.Duration +type Duration struct { + i sync2.AtomicDuration +} + +// NewDuration returns a new Duration +func NewDuration(name string) *Duration { + v := new(Duration) + publish(name, v) + return v +} + +// Add adds the provided value to the Duration +func (v *Duration) Add(delta time.Duration) { + v.i.Add(delta) +} + +// Set sets the value +func (v *Duration) Set(value time.Duration) { + v.i.Set(value) +} + +// Get returns the value +func (v *Duration) Get() time.Duration { + return v.i.Get() +} + +// String is the implementation of expvar.var +func (v *Duration) String() string { + return strconv.FormatInt(int64(v.i.Get()), 10) +} + +// IntFunc converts a function that returns +// an int64 as an expvar. +type IntFunc func() int64 + +// String is the implementation of expvar.var +func (f IntFunc) String() string { + return strconv.FormatInt(f(), 10) +} + +// DurationFunc converts a function that returns +// an time.Duration as an expvar. +type DurationFunc func() time.Duration + +// String is the implementation of expvar.var +func (f DurationFunc) String() string { + return strconv.FormatInt(int64(f()), 10) +} + +// String is expvar.String+Get+hook +type String struct { + mu sync.Mutex + s string +} + +// NewString returns a new String +func NewString(name string) *String { + v := new(String) + publish(name, v) + return v +} + +// Set sets the value +func (v *String) Set(value string) { + v.mu.Lock() + v.s = value + v.mu.Unlock() +} + +// Get returns the value +func (v *String) Get() string { + v.mu.Lock() + s := v.s + v.mu.Unlock() + return s +} + +// String is the implementation of expvar.var +func (v *String) String() string { + return strconv.Quote(v.Get()) +} + +// StringFunc converts a function that returns +// an string as an expvar. +type StringFunc func() string + +// String is the implementation of expvar.var +func (f StringFunc) String() string { + return strconv.Quote(f()) +} + +// JSONFunc is the public type for a single function that returns json directly. +type JSONFunc func() string + +// String is the implementation of expvar.var +func (f JSONFunc) String() string { + return f() +} + +// PublishJSONFunc publishes any function that returns +// a JSON string as a variable. The string is sent to +// expvar as is. +func PublishJSONFunc(name string, f func() string) { + publish(name, JSONFunc(f)) +} + +// StringMap is a map of string -> string +type StringMap struct { + mu sync.Mutex + values map[string]string +} + +// NewStringMap returns a new StringMap +func NewStringMap(name string) *StringMap { + v := &StringMap{values: make(map[string]string)} + publish(name, v) + return v +} + +// Set will set a value (existing or not) +func (v *StringMap) Set(name, value string) { + v.mu.Lock() + v.values[name] = value + v.mu.Unlock() +} + +// Get will return the value, or "" f not set. +func (v *StringMap) Get(name string) string { + v.mu.Lock() + s := v.values[name] + v.mu.Unlock() + return s +} + +// String is the implementation of expvar.Var +func (v *StringMap) String() string { + v.mu.Lock() + defer v.mu.Unlock() + return stringMapToString(v.values) +} + +// StringMapFunc is the function equivalent of StringMap +type StringMapFunc func() map[string]string + +// String is used by expvar. +func (f StringMapFunc) String() string { + m := f() + if m == nil { + return "{}" + } + return stringMapToString(m) +} + +func stringMapToString(m map[string]string) string { + b := bytes.NewBuffer(make([]byte, 0, 4096)) + fmt.Fprintf(b, "{") + firstValue := true + for k, v := range m { + if firstValue { + firstValue = false + } else { + fmt.Fprintf(b, ", ") + } + fmt.Fprintf(b, "\"%v\": %v", k, strconv.Quote(v)) + } + fmt.Fprintf(b, "}") + return b.String() +} diff --git a/src/xbase/stats/export_test.go b/src/xbase/stats/export_test.go new file mode 100644 index 00000000..e088ac19 --- /dev/null +++ b/src/xbase/stats/export_test.go @@ -0,0 +1,229 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stats + +import ( + "expvar" + "testing" + "time" +) + +func clear() { + defaultVarGroup.vars = make(map[string]expvar.Var) + defaultVarGroup.newVarHook = nil +} + +func TestNoHook(t *testing.T) { + clear() + v := NewInt("plainint") + v.Set(1) + if v.String() != "1" { + t.Errorf("want 1, got %s", v.String()) + } +} + +func TestFloat(t *testing.T) { + var gotname string + var gotv *Float + clear() + Register(func(name string, v expvar.Var) { + gotname = name + gotv = v.(*Float) + }) + v := NewFloat("Float") + if gotname != "Float" { + t.Errorf("want Float, got %s", gotname) + } + if gotv != v { + t.Errorf("want %#v, got %#v", v, gotv) + } + v.Set(5.1) + if v.Get() != 5.1 { + t.Errorf("want 5.1, got %v", v.Get()) + } + v.Add(1.0) + if v.Get() != 6.1 { + t.Errorf("want 6.1, got %v", v.Get()) + } + if v.String() != "6.1" { + t.Errorf("want 6.1, got %v", v.Get()) + } + + f := FloatFunc(func() float64 { + return 1.234 + }) + if f.String() != "1.234" { + t.Errorf("want 1.234, got %v", f.String()) + } +} + +func TestInt(t *testing.T) { + var gotname string + var gotv *Int + clear() + Register(func(name string, v expvar.Var) { + gotname = name + gotv = v.(*Int) + }) + v := NewInt("Int") + if gotname != "Int" { + t.Errorf("want Int, got %s", gotname) + } + if gotv != v { + t.Errorf("want %#v, got %#v", v, gotv) + } + v.Set(5) + if v.Get() != 5 { + t.Errorf("want 5, got %v", v.Get()) + } + v.Add(1) + if v.Get() != 6 { + t.Errorf("want 6, got %v", v.Get()) + } + if v.String() != "6" { + t.Errorf("want 6, got %v", v.Get()) + } + + f := IntFunc(func() int64 { + return 1 + }) + if f.String() != "1" { + t.Errorf("want 1, got %v", f.String()) + } +} + +func TestDuration(t *testing.T) { + var gotname string + var gotv *Duration + clear() + Register(func(name string, v expvar.Var) { + gotname = name + gotv = v.(*Duration) + }) + v := NewDuration("Duration") + if gotname != "Duration" { + t.Errorf("want Duration, got %s", gotname) + } + if gotv != v { + t.Errorf("want %#v, got %#v", v, gotv) + } + v.Set(time.Duration(5)) + if v.Get() != 5 { + t.Errorf("want 5, got %v", v.Get()) + } + v.Add(time.Duration(1)) + if v.Get() != 6 { + t.Errorf("want 6, got %v", v.Get()) + } + if v.String() != "6" { + t.Errorf("want 6, got %v", v.Get()) + } + + f := DurationFunc(func() time.Duration { + return time.Duration(1) + }) + if f.String() != "1" { + t.Errorf("want 1, got %v", f.String()) + } +} + +func TestString(t *testing.T) { + var gotname string + var gotv *String + clear() + Register(func(name string, v expvar.Var) { + gotname = name + gotv = v.(*String) + }) + v := NewString("String") + if gotname != "String" { + t.Errorf("want String, got %s", gotname) + } + if gotv != v { + t.Errorf("want %#v, got %#v", v, gotv) + } + v.Set("a\"b") + if v.Get() != "a\"b" { + t.Errorf("want \"a\"b\", got %#v", gotv) + } + if v.String() != "\"a\\\"b\"" { + t.Errorf("want \"\"a\\\"b\"\", got %#v", gotv) + } + + f := StringFunc(func() string { + return "a" + }) + if f.String() != "\"a\"" { + t.Errorf("want \"a\", got %v", f.String()) + } +} + +type Mystr string + +func (m *Mystr) String() string { + return string(*m) +} + +func TestPublish(t *testing.T) { + var gotname string + var gotv expvar.Var + clear() + Register(func(name string, v expvar.Var) { + gotname = name + gotv = v.(*Mystr) + }) + v := Mystr("abcd") + Publish("Mystr", &v) + if gotname != "Mystr" { + t.Errorf("want Mystr, got %s", gotname) + } + if gotv != &v { + t.Errorf("want %#v, got %#v", &v, gotv) + } +} + +func f() string { + return "abcd" +} + +func TestPublishFunc(t *testing.T) { + var gotname string + var gotv JSONFunc + clear() + Register(func(name string, v expvar.Var) { + gotname = name + gotv = v.(JSONFunc) + }) + PublishJSONFunc("Myfunc", f) + if gotname != "Myfunc" { + t.Errorf("want Myfunc, got %s", gotname) + } + if gotv.String() != f() { + t.Errorf("want %v, got %#v", f(), gotv()) + } +} + +func TestStringMap(t *testing.T) { + clear() + c := NewStringMap("stringmap1") + c.Set("c1", "val1") + c.Set("c2", "val2") + c.Set("c2", "val3") + want1 := `{"c1": "val1", "c2": "val3"}` + want2 := `{"c2": "val3", "c1": "val1"}` + if s := c.String(); s != want1 && s != want2 { + t.Errorf("want %s or %s, got %s", want1, want2, s) + } + + f := StringMapFunc(func() map[string]string { + return map[string]string{ + "c1": "val1", + "c2": "val3", + } + }) + if s := f.String(); s != want1 && s != want2 { + t.Errorf("want %s or %s, got %s", want1, want2, s) + } +} diff --git a/src/xbase/stats/histogram.go b/src/xbase/stats/histogram.go new file mode 100644 index 00000000..36f0c8dd --- /dev/null +++ b/src/xbase/stats/histogram.go @@ -0,0 +1,150 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stats + +import ( + "bytes" + "fmt" + + "xbase/sync2" +) + +// Histogram tracks counts and totals while +// splitting the counts under different buckets +// using specified cutoffs. +type Histogram struct { + cutoffs []int64 + labels []string + countLabel string + totalLabel string + hook func(int64) + + buckets []sync2.AtomicInt64 + total sync2.AtomicInt64 +} + +// NewHistogram creates a histogram with auto-generated labels +// based on the cutoffs. The buckets are categorized using the +// following criterion: cutoff[i-1] < value <= cutoff[i]. Anything +// higher than the highest cutoff is labeled as "inf". +func NewHistogram(name string, cutoffs []int64) *Histogram { + labels := make([]string, len(cutoffs)+1) + for i, v := range cutoffs { + labels[i] = fmt.Sprintf("%d", v) + } + labels[len(labels)-1] = "inf" + return NewGenericHistogram(name, cutoffs, labels, "Count", "Total") +} + +// NewGenericHistogram creates a histogram where all the labels are +// supplied by the caller. The number of labels has to be one more than +// the number of cutoffs because the last label captures everything that +// exceeds the highest cutoff. +func NewGenericHistogram(name string, cutoffs []int64, labels []string, countLabel, totalLabel string) *Histogram { + if len(cutoffs) != len(labels)-1 { + panic("mismatched cutoff and label lengths") + } + h := &Histogram{ + cutoffs: cutoffs, + labels: labels, + countLabel: countLabel, + totalLabel: totalLabel, + buckets: make([]sync2.AtomicInt64, len(labels)), + } + if name != "" { + publish(name, h) + } + return h +} + +// Add adds a new measurement to the Histogram. +func (h *Histogram) Add(value int64) { + for i := range h.labels { + if i == len(h.labels)-1 || value <= h.cutoffs[i] { + h.buckets[i].Add(1) + h.total.Add(value) + break + } + } + if h.hook != nil { + h.hook(value) + } +} + +// String returns a string representation of the Histogram. +// Note that sum of all buckets may not be equal to the total temporarily, +// because Add() increments bucket and total with two atomic operations. +func (h *Histogram) String() string { + b, _ := h.MarshalJSON() + return string(b) +} + +// MarshalJSON returns a JSON representation of the Histogram. +// Note that sum of all buckets may not be equal to the total temporarily, +// because Add() increments bucket and total with two atomic operations. +func (h *Histogram) MarshalJSON() ([]byte, error) { + b := bytes.NewBuffer(make([]byte, 0, 4096)) + fmt.Fprintf(b, "{") + totalCount := int64(0) + for i, label := range h.labels { + totalCount += h.buckets[i].Get() + fmt.Fprintf(b, "\"%v\": %v, ", label, totalCount) + } + fmt.Fprintf(b, "\"%s\": %v, ", h.countLabel, totalCount) + fmt.Fprintf(b, "\"%s\": %v", h.totalLabel, h.total.Get()) + fmt.Fprintf(b, "}") + return b.Bytes(), nil +} + +// Counts returns a map from labels to the current count in the Histogram for that label. +func (h *Histogram) Counts() map[string]int64 { + counts := make(map[string]int64, len(h.labels)) + for i, label := range h.labels { + counts[label] = h.buckets[i].Get() + } + return counts +} + +// CountLabel returns the count label that was set when this Histogram was created. +func (h *Histogram) CountLabel() string { + return h.countLabel +} + +// Count returns the number of times Add has been called. +func (h *Histogram) Count() (count int64) { + for i := range h.buckets { + count += h.buckets[i].Get() + } + return +} + +// TotalLabel returns the total label that was set when this Histogram was created. +func (h *Histogram) TotalLabel() string { + return h.totalLabel +} + +// Total returns the sum of all values that have been added to this Histogram. +func (h *Histogram) Total() (total int64) { + return h.total.Get() +} + +// Labels returns the labels that were set when this Histogram was created. +func (h *Histogram) Labels() []string { + return h.labels +} + +// Cutoffs returns the cutoffs that were set when this Histogram was created. +func (h *Histogram) Cutoffs() []int64 { + return h.cutoffs +} + +// Buckets returns a snapshot of the current values in all buckets. +func (h *Histogram) Buckets() []int64 { + buckets := make([]int64, len(h.buckets)) + for i := range h.buckets { + buckets[i] = h.buckets[i].Get() + } + return buckets +} diff --git a/src/xbase/stats/histogram_test.go b/src/xbase/stats/histogram_test.go new file mode 100644 index 00000000..64dc0c0d --- /dev/null +++ b/src/xbase/stats/histogram_test.go @@ -0,0 +1,76 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stats + +import ( + "expvar" + "testing" +) + +func TestHistogram(t *testing.T) { + clear() + h := NewHistogram("hist1", []int64{1, 5}) + for i := 0; i < 10; i++ { + h.Add(int64(i)) + } + want := `{"1": 2, "5": 6, "inf": 10, "Count": 10, "Total": 45}` + if h.String() != want { + t.Errorf("got %v, want %v", h.String(), want) + } + counts := h.Counts() + counts["Count"] = h.Count() + counts["Total"] = h.Total() + for k, want := range map[string]int64{ + "1": 2, + "5": 4, + "inf": 4, + "Count": 10, + "Total": 45, + } { + if got := counts[k]; got != want { + t.Errorf("histogram counts [%v]: got %d, want %d", k, got, want) + } + } + if got, want := h.CountLabel(), "Count"; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := h.TotalLabel(), "Total"; got != want { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestGenericHistogram(t *testing.T) { + clear() + h := NewGenericHistogram( + "histgen", + []int64{1, 5}, + []string{"one", "five", "max"}, + "count", + "total", + ) + want := `{"one": 0, "five": 0, "max": 0, "count": 0, "total": 0}` + if got := h.String(); got != want { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestHistogramHook(t *testing.T) { + var gotname string + var gotv *Histogram + clear() + Register(func(name string, v expvar.Var) { + gotname = name + gotv = v.(*Histogram) + }) + + name := "hist2" + v := NewHistogram(name, []int64{1}) + if gotname != name { + t.Errorf("got %v; want %v", gotname, name) + } + if gotv != v { + t.Errorf("got %#v, want %#v", gotv, v) + } +} diff --git a/src/xbase/stats/multidimensional.go b/src/xbase/stats/multidimensional.go new file mode 100644 index 00000000..ee215d96 --- /dev/null +++ b/src/xbase/stats/multidimensional.go @@ -0,0 +1,35 @@ +package stats + +import ( + "fmt" + "strings" +) + +// MultiTracker is a CountTracker that tracks counts grouping them by +// more than one dimension. +type MultiTracker interface { + CountTracker + Labels() []string +} + +// CounterForDimension returns a CountTracker for the provided +// dimension. It will panic if the dimension isn't a legal label for +// mt. +func CounterForDimension(mt MultiTracker, dimension string) CountTracker { + for i, lab := range mt.Labels() { + if lab == dimension { + return CountersFunc(func() map[string]int64 { + result := make(map[string]int64) + for k, v := range mt.Counts() { + if k == "All" { + result[k] = v + continue + } + result[strings.Split(k, ".")[i]] += v + } + return result + }) + } + } + panic(fmt.Sprintf("label %v is not one of %v", dimension, mt.Labels())) +} diff --git a/src/xbase/stats/multidimensional_test.go b/src/xbase/stats/multidimensional_test.go new file mode 100644 index 00000000..6cdb7240 --- /dev/null +++ b/src/xbase/stats/multidimensional_test.go @@ -0,0 +1,30 @@ +package stats + +import ( + "reflect" + "testing" + "time" +) + +func TestMultiTimingsCounterFor(t *testing.T) { + clear() + mtm := NewMultiTimings("multitimings3", []string{"dim1", "dim2"}) + + mtm.Add([]string{"tag1a", "tag1b"}, 500*time.Microsecond) + mtm.Add([]string{"tag1a", "tag2b"}, 500*time.Millisecond) + mtm.Add([]string{"tag2a", "tag2b"}, 500*time.Millisecond) + + cases := []struct { + dim string + want map[string]int64 + }{ + {"dim1", map[string]int64{"tag1a": 2, "tag2a": 1, "All": 3}}, + {"dim2", map[string]int64{"tag1b": 1, "tag2b": 2, "All": 3}}, + } + for _, c := range cases { + counts := CounterForDimension(mtm, c.dim).Counts() + if !reflect.DeepEqual(c.want, counts) { + t.Errorf("mtm.CounterFor(%q).Counts()=%v, want %v", c.dim, counts, c.want) + } + } +} diff --git a/src/xbase/stats/rates.go b/src/xbase/stats/rates.go new file mode 100644 index 00000000..befee870 --- /dev/null +++ b/src/xbase/stats/rates.go @@ -0,0 +1,177 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stats + +import ( + "encoding/json" + "math" + "sync" + "time" +) + +var timeNow = time.Now + +// CountTracker defines the interface that needs to +// be supported by a variable for being tracked by +// Rates. +type CountTracker interface { + // Counts returns a map which maps each category to a count. + // Subsequent calls must return a monotonously increasing count for the same + // category. + // Optionally, an implementation may include the "All" category which has + // the total count across all categories (e.g. timing.go does this). + Counts() map[string]int64 +} + +// Rates is capable of reporting the rate (typically QPS) +// for any variable that satisfies the CountTracker interface. +type Rates struct { + // mu guards all fields. + mu sync.Mutex + timeStamps *RingInt64 + counts map[string]*RingInt64 + countTracker CountTracker + samples int + interval time.Duration + // previousTotalCount is the total number of counts (across all categories) + // seen in the last sampling interval. + // It's used to calculate the latest total rate. + previousTotalCount int64 + // timestampLastSampling is the time the periodic sampling was run last. + timestampLastSampling time.Time + // totalRate is the rate of total counts per second seen in the latest + // sampling interval e.g. 100 queries / 5 seconds sampling interval = 20 QPS. + totalRate float64 + closed chan bool + wg sync.WaitGroup +} + +// NewRates reports rolling rate information for countTracker. samples specifies +// the number of samples to report, and interval specifies the time interval +// between samples. The minimum interval is 1 second. +// If passing the special value of -1s as interval, we don't snapshot. +// (use this for tests). +func NewRates(name string, countTracker CountTracker, samples int, interval time.Duration) *Rates { + if interval < 1*time.Second && interval != -1*time.Second { + panic("interval too small") + } + rt := &Rates{ + timeStamps: NewRingInt64(samples + 1), + counts: make(map[string]*RingInt64), + countTracker: countTracker, + samples: samples + 1, + interval: interval, + timestampLastSampling: timeNow(), + closed: make(chan bool), + } + if name != "" { + publish(name, rt) + } + if interval > 0 { + rt.wg.Add(1) + go rt.track() + } + return rt +} + +func (rt *Rates) Close() { + close(rt.closed) + rt.wg.Wait() +} + +func (rt *Rates) track() { + defer rt.wg.Done() + for { + rt.snapshot() + select { + case <-rt.closed: + return + case <-time.After(rt.interval): + } + } +} + +func (rt *Rates) snapshot() { + rt.mu.Lock() + defer rt.mu.Unlock() + + now := timeNow() + rt.timeStamps.Add(now.UnixNano()) + + // Record current count for each category. + var totalCount int64 + for k, v := range rt.countTracker.Counts() { + if k != "All" { + // Include call categories except "All" (which is returned by the + // "Timer.Counts()" implementation) to avoid double counting. + totalCount += v + } + if values, ok := rt.counts[k]; ok { + values.Add(v) + } else { + rt.counts[k] = NewRingInt64(rt.samples) + rt.counts[k].Add(0) + rt.counts[k].Add(v) + } + } + + // Calculate current total rate. + // NOTE: We assume that every category with a non-zero value, which was + // tracked in "rt.previousTotalCount" in a previous sampling interval, is + // tracked in the current sampling interval in "totalCount" as well. + // (I.e. categories and their count must not "disappear" in + // "rt.countTracker.Counts()".) + durationSeconds := now.Sub(rt.timestampLastSampling).Seconds() + rate := float64(totalCount-rt.previousTotalCount) / durationSeconds + // Round rate with a precision of 0.1. + rt.totalRate = math.Floor(rate*10+0.5) / 10 + rt.previousTotalCount = totalCount + rt.timestampLastSampling = now +} + +// Get returns for each category (string) its latest rates (up to X values +// where X is the configured number of samples of the Rates struct). +// Rates are ordered from least recent (index 0) to most recent (end of slice). +func (rt *Rates) Get() (rateMap map[string][]float64) { + rt.mu.Lock() + defer rt.mu.Unlock() + + rateMap = make(map[string][]float64) + timeStamps := rt.timeStamps.Values() + if len(timeStamps) <= 1 { + return + } + for k, v := range rt.counts { + rateMap[k] = make([]float64, len(timeStamps)-1) + values := v.Values() + valueIndex := len(values) - 1 + for i := len(timeStamps) - 1; i > 0; i-- { + if valueIndex <= 0 { + rateMap[k][i-1] = 0 + continue + } + elapsed := float64((timeStamps[i] - timeStamps[i-1]) / 1e9) + rateMap[k][i-1] = float64(values[valueIndex]-values[valueIndex-1]) / elapsed + valueIndex-- + } + } + return +} + +// TotalRate returns the current total rate (counted across categories). +func (rt *Rates) TotalRate() float64 { + rt.mu.Lock() + defer rt.mu.Unlock() + + return rt.totalRate +} + +func (rt *Rates) String() string { + data, err := json.Marshal(rt.Get()) + if err != nil { + data, _ = json.Marshal(err.Error()) + } + return string(data) +} diff --git a/src/xbase/stats/rates_test.go b/src/xbase/stats/rates_test.go new file mode 100644 index 00000000..0fdf0dec --- /dev/null +++ b/src/xbase/stats/rates_test.go @@ -0,0 +1,130 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stats + +import ( + "expvar" + "testing" + "time" +) + +// For tests, we want to control exactly the time used by Rates. +// The way Rates works is: +// - at creation, do a snapshot. +// - every interval, do a snapshot. +// So in these tests, we make sure to always call snapshot() every interval. +// We do other actions after epsilon, but then wait for intervalMinusEpsilon +// and call snapshot(). +const ( + interval = 1 * time.Second + epsilon = 50 * time.Millisecond + intervalMinusEpsilon = interval - epsilon +) + +func TestRates(t *testing.T) { + now := time.Now() + timeNow = func() time.Time { + return now + } + + clear() + c := NewCounters("rcounter1") + r := NewRates("rates1", c, 3, -1*time.Second) + r.snapshot() + now = now.Add(epsilon) + c.Add("tag1", 0) + c.Add("tag2", 0) + now = now.Add(intervalMinusEpsilon) + r.snapshot() + now = now.Add(epsilon) + checkRates(t, r, "after 1s", 0.0, `{"tag1":[0],"tag2":[0]}`) + + c.Add("tag1", 10) + c.Add("tag2", 20) + now = now.Add(intervalMinusEpsilon) + r.snapshot() + now = now.Add(epsilon) + checkRates(t, r, "after 2s", 30.0, `{"tag1":[0,10],"tag2":[0,20]}`) + + now = now.Add(intervalMinusEpsilon) + r.snapshot() + now = now.Add(epsilon) + checkRates(t, r, "after 3s", 0.0, `{"tag1":[0,10,0],"tag2":[0,20,0]}`) + + now = now.Add(intervalMinusEpsilon) + r.snapshot() + now = now.Add(epsilon) + checkRates(t, r, "after 4s", 0.0, `{"tag1":[10,0,0],"tag2":[20,0,0]}`) +} + +func checkRates(t *testing.T, r *Rates, desc string, wantRate float64, wantRateMap string) { + if got := r.String(); got != wantRateMap { + t.Errorf("%v: want %s, got %s", desc, wantRateMap, got) + } + if got := r.TotalRate(); got != wantRate { + t.Errorf("%v: want rate %v, got rate %v", desc, wantRate, got) + } +} + +func TestRatesConsistency(t *testing.T) { + now := time.Now() + timeNow = func() time.Time { + return now + } + + // This tests the following invariant: in the time window + // covered by rates, the sum of the rates reported must be + // equal to the count reported by the counter. + clear() + c := NewCounters("rcounter4") + r := NewRates("rates4", c, 100, -1*time.Second) + r.snapshot() + + now = now.Add(epsilon) + c.Add("a", 1000) + now = now.Add(intervalMinusEpsilon) + r.snapshot() + now = now.Add(epsilon) + c.Add("a", 1) + now = now.Add(intervalMinusEpsilon) + r.snapshot() + now = now.Add(epsilon) + + result := r.Get() + counts := c.Counts() + t.Logf("r.Get(): %v", result) + t.Logf("c.Counts(): %v", counts) + + rate, count := result["a"], counts["a"] + + var sum float64 + for _, v := range rate { + sum += v + } + if sum != float64(counts["a"]) { + t.Errorf("rate inconsistent with count: sum of %v != %v", rate, count) + } + +} + +func TestRatesHook(t *testing.T) { + clear() + c := NewCounters("rcounter2") + var gotname string + var gotv *Rates + clear() + Register(func(name string, v expvar.Var) { + gotname = name + gotv = v.(*Rates) + }) + + v := NewRates("rates2", c, 2, 10*time.Second) + if gotname != "rates2" { + t.Errorf("want rates2, got %s", gotname) + } + if gotv != v { + t.Errorf("want %#v, got %#v", v, gotv) + } +} diff --git a/src/xbase/stats/ring.go b/src/xbase/stats/ring.go new file mode 100644 index 00000000..8be547f0 --- /dev/null +++ b/src/xbase/stats/ring.go @@ -0,0 +1,37 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stats + +// RingInt64 tuple. +// Ring of int64 values +// Not thread safe +type RingInt64 struct { + position int + values []int64 +} + +// NewRingInt64 creates new ring int64. +func NewRingInt64(capacity int) *RingInt64 { + return &RingInt64{values: make([]int64, 0, capacity)} +} + +// Add used to add new val. +func (ri *RingInt64) Add(val int64) { + if len(ri.values) == cap(ri.values) { + ri.values[ri.position] = val + ri.position = (ri.position + 1) % cap(ri.values) + } else { + ri.values = append(ri.values, val) + } +} + +// Values returns the values of the ring. +func (ri *RingInt64) Values() (values []int64) { + values = make([]int64, len(ri.values)) + for i := 0; i < len(ri.values); i++ { + values[i] = ri.values[(ri.position+i)%cap(ri.values)] + } + return values +} diff --git a/src/xbase/stats/timings.go b/src/xbase/stats/timings.go new file mode 100644 index 00000000..6f2ce277 --- /dev/null +++ b/src/xbase/stats/timings.go @@ -0,0 +1,199 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stats + +import ( + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + "xbase/sync2" +) + +// Timings is meant to tracks timing data +// by named categories as well as histograms. +type Timings struct { + totalCount sync2.AtomicInt64 + totalTime sync2.AtomicInt64 + + // mu protects get and set of hook and the map. + // Modification to the value in the map is not protected. + mu sync.RWMutex + histograms map[string]*Histogram + hook func(string, time.Duration) +} + +// NewTimings creates a new Timings object, and publishes it if name is set. +// categories is an optional list of categories to initialize to 0. +// Categories that aren't initialized will be missing from the map until the +// first time they are updated. +func NewTimings(name string, categories ...string) *Timings { + t := &Timings{histograms: make(map[string]*Histogram)} + for _, cat := range categories { + t.histograms[cat] = NewGenericHistogram("", bucketCutoffs, bucketLabels, "Count", "Time") + } + if name != "" { + publish(name, t) + } + return t +} + +// Add will add a new value to the named histogram. +func (t *Timings) Add(name string, elapsed time.Duration) { + // Get existing Histogram. + t.mu.RLock() + hist, ok := t.histograms[name] + hook := t.hook + t.mu.RUnlock() + + // Create Histogram if it does not exist. + if !ok { + t.mu.Lock() + hist, ok = t.histograms[name] + if !ok { + hist = NewGenericHistogram("", bucketCutoffs, bucketLabels, "Count", "Time") + t.histograms[name] = hist + } + t.mu.Unlock() + } + + elapsedNs := int64(elapsed) + hist.Add(elapsedNs) + t.totalCount.Add(1) + t.totalTime.Add(elapsedNs) + if hook != nil { + hook(name, elapsed) + } +} + +// Record is a convenience function that records completion +// timing data based on the provided start time of an event. +func (t *Timings) Record(name string, startTime time.Time) { + t.Add(name, time.Since(startTime)) +} + +// String is for expvar. +func (t *Timings) String() string { + t.mu.RLock() + defer t.mu.RUnlock() + + tm := struct { + TotalCount int64 + TotalTime int64 + Histograms map[string]*Histogram + }{ + t.totalCount.Get(), + t.totalTime.Get(), + t.histograms, + } + + data, err := json.Marshal(tm) + if err != nil { + data, _ = json.Marshal(err.Error()) + } + return string(data) +} + +// Histograms returns a map pointing at the histograms. +func (t *Timings) Histograms() (h map[string]*Histogram) { + t.mu.RLock() + defer t.mu.RUnlock() + h = make(map[string]*Histogram, len(t.histograms)) + for k, v := range t.histograms { + h[k] = v + } + return +} + +// Count returns the total count for all values. +func (t *Timings) Count() int64 { + return t.totalCount.Get() +} + +// Time returns the total time elapsed for all values. +func (t *Timings) Time() int64 { + return t.totalTime.Get() +} + +// Counts returns the total count for each value. +func (t *Timings) Counts() map[string]int64 { + t.mu.RLock() + defer t.mu.RUnlock() + + counts := make(map[string]int64, len(t.histograms)+1) + for k, v := range t.histograms { + counts[k] = v.Count() + } + counts["All"] = t.totalCount.Get() + return counts +} + +// Cutoffs returns the cutoffs used in the component histograms. +// Do not change the returned slice. +func (t *Timings) Cutoffs() []int64 { + return bucketCutoffs +} + +var bucketCutoffs = []int64{5e5, 1e6, 5e6, 1e7, 5e7, 1e8, 5e8, 1e9, 5e9, 1e10} + +var bucketLabels []string + +func init() { + bucketLabels = make([]string, len(bucketCutoffs)+1) + for i, v := range bucketCutoffs { + bucketLabels[i] = fmt.Sprintf("%d", v) + } + bucketLabels[len(bucketLabels)-1] = "inf" +} + +// MultiTimings is meant to tracks timing data by categories as well +// as histograms. The names of the categories are compound names made +// with joining multiple strings with '.'. +type MultiTimings struct { + Timings + labels []string +} + +// NewMultiTimings creates a new MultiTimings object. +func NewMultiTimings(name string, labels []string) *MultiTimings { + t := &MultiTimings{ + Timings: Timings{histograms: make(map[string]*Histogram)}, + labels: labels, + } + if name != "" { + publish(name, t) + } + return t +} + +// Labels returns descriptions of the parts of each compound category name. +func (mt *MultiTimings) Labels() []string { + return mt.labels +} + +// Add will add a new value to the named histogram. +func (mt *MultiTimings) Add(names []string, elapsed time.Duration) { + if len(names) != len(mt.labels) { + panic("MultiTimings: wrong number of values in Add") + } + mt.Timings.Add(strings.Join(names, "."), elapsed) +} + +// Record is a convenience function that records completion +// timing data based on the provided start time of an event. +func (mt *MultiTimings) Record(names []string, startTime time.Time) { + if len(names) != len(mt.labels) { + panic("MultiTimings: wrong number of values in Record") + } + mt.Timings.Record(strings.Join(names, "."), startTime) +} + +// Cutoffs returns the cutoffs used in the component histograms. +// Do not change the returned slice. +func (mt *MultiTimings) Cutoffs() []int64 { + return bucketCutoffs +} diff --git a/src/xbase/stats/timings_test.go b/src/xbase/stats/timings_test.go new file mode 100644 index 00000000..c7485f45 --- /dev/null +++ b/src/xbase/stats/timings_test.go @@ -0,0 +1,54 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stats + +import ( + "expvar" + "testing" + "time" +) + +func TestTimings(t *testing.T) { + clear() + tm := NewTimings("timings1") + tm.Add("tag1", 500*time.Microsecond) + tm.Add("tag1", 1*time.Millisecond) + tm.Add("tag2", 1*time.Millisecond) + want := `{"TotalCount":3,"TotalTime":2500000,"Histograms":{"tag1":{"500000":1,"1000000":2,"5000000":2,"10000000":2,"50000000":2,"100000000":2,"500000000":2,"1000000000":2,"5000000000":2,"10000000000":2,"inf":2,"Count":2,"Time":1500000},"tag2":{"500000":0,"1000000":1,"5000000":1,"10000000":1,"50000000":1,"100000000":1,"500000000":1,"1000000000":1,"5000000000":1,"10000000000":1,"inf":1,"Count":1,"Time":1000000}}}` + if got := tm.String(); got != want { + t.Errorf("got %s, want %s", got, want) + } +} + +func TestMultiTimings(t *testing.T) { + clear() + mtm := NewMultiTimings("maptimings1", []string{"dim1", "dim2"}) + mtm.Add([]string{"tag1a", "tag1b"}, 500*time.Microsecond) + mtm.Add([]string{"tag1a", "tag1b"}, 1*time.Millisecond) + mtm.Add([]string{"tag2a", "tag2b"}, 1*time.Millisecond) + want := `{"TotalCount":3,"TotalTime":2500000,"Histograms":{"tag1a.tag1b":{"500000":1,"1000000":2,"5000000":2,"10000000":2,"50000000":2,"100000000":2,"500000000":2,"1000000000":2,"5000000000":2,"10000000000":2,"inf":2,"Count":2,"Time":1500000},"tag2a.tag2b":{"500000":0,"1000000":1,"5000000":1,"10000000":1,"50000000":1,"100000000":1,"500000000":1,"1000000000":1,"5000000000":1,"10000000000":1,"inf":1,"Count":1,"Time":1000000}}}` + if got := mtm.String(); got != want { + t.Errorf("got %s, want %s", got, want) + } +} + +func TestTimingsHook(t *testing.T) { + var gotname string + var gotv *Timings + clear() + Register(func(name string, v expvar.Var) { + gotname = name + gotv = v.(*Timings) + }) + + name := "timings2" + v := NewTimings(name) + if gotname != name { + t.Errorf("got %q, want %q", gotname, name) + } + if gotv != v { + t.Errorf("got %#v, want %#v", gotv, v) + } +} diff --git a/src/xbase/sync2/atomic.go b/src/xbase/sync2/atomic.go new file mode 100644 index 00000000..ee99932e --- /dev/null +++ b/src/xbase/sync2/atomic.go @@ -0,0 +1,166 @@ +// Copyright 2013, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sync2 + +import ( + "sync" + "sync/atomic" + "time" +) + +// AtomicInt32 is a wrapper with a simpler interface around atomic.(Add|Store|Load|CompareAndSwap)Int32 functions. +type AtomicInt32 struct { + int32 +} + +// NewAtomicInt32 initializes a new AtomicInt32 with a given value. +func NewAtomicInt32(n int32) AtomicInt32 { + return AtomicInt32{n} +} + +// Add atomically adds n to the value. +func (i *AtomicInt32) Add(n int32) int32 { + return atomic.AddInt32(&i.int32, n) +} + +// Set atomically sets n as new value. +func (i *AtomicInt32) Set(n int32) { + atomic.StoreInt32(&i.int32, n) +} + +// Get atomically returns the current value. +func (i *AtomicInt32) Get() int32 { + return atomic.LoadInt32(&i.int32) +} + +// CompareAndSwap atomatically swaps the old with the new value. +func (i *AtomicInt32) CompareAndSwap(oldval, newval int32) (swapped bool) { + return atomic.CompareAndSwapInt32(&i.int32, oldval, newval) +} + +// AtomicInt64 is a wrapper with a simpler interface around atomic.(Add|Store|Load|CompareAndSwap)Int64 functions. +type AtomicInt64 struct { + int64 +} + +// NewAtomicInt64 initializes a new AtomicInt64 with a given value. +func NewAtomicInt64(n int64) AtomicInt64 { + return AtomicInt64{n} +} + +// Add atomically adds n to the value. +func (i *AtomicInt64) Add(n int64) int64 { + return atomic.AddInt64(&i.int64, n) +} + +// Set atomically sets n as new value. +func (i *AtomicInt64) Set(n int64) { + atomic.StoreInt64(&i.int64, n) +} + +// Get atomically returns the current value. +func (i *AtomicInt64) Get() int64 { + return atomic.LoadInt64(&i.int64) +} + +// CompareAndSwap atomatically swaps the old with the new value. +func (i *AtomicInt64) CompareAndSwap(oldval, newval int64) (swapped bool) { + return atomic.CompareAndSwapInt64(&i.int64, oldval, newval) +} + +// AtomicDuration is a wrapper with a simpler interface around atomic.(Add|Store|Load|CompareAndSwap)Int64 functions. +type AtomicDuration struct { + int64 +} + +// NewAtomicDuration initializes a new AtomicDuration with a given value. +func NewAtomicDuration(duration time.Duration) AtomicDuration { + return AtomicDuration{int64(duration)} +} + +// Add atomically adds duration to the value. +func (d *AtomicDuration) Add(duration time.Duration) time.Duration { + return time.Duration(atomic.AddInt64(&d.int64, int64(duration))) +} + +// Set atomically sets duration as new value. +func (d *AtomicDuration) Set(duration time.Duration) { + atomic.StoreInt64(&d.int64, int64(duration)) +} + +// Get atomically returns the current value. +func (d *AtomicDuration) Get() time.Duration { + return time.Duration(atomic.LoadInt64(&d.int64)) +} + +// CompareAndSwap atomatically swaps the old with the new value. +func (d *AtomicDuration) CompareAndSwap(oldval, newval time.Duration) (swapped bool) { + return atomic.CompareAndSwapInt64(&d.int64, int64(oldval), int64(newval)) +} + +// AtomicBool gives an atomic boolean variable. +type AtomicBool struct { + int32 +} + +// NewAtomicBool initializes a new AtomicBool with a given value. +func NewAtomicBool(n bool) AtomicBool { + if n { + return AtomicBool{1} + } + return AtomicBool{0} +} + +// Set atomically sets n as new value. +func (i *AtomicBool) Set(n bool) { + if n { + atomic.StoreInt32(&i.int32, 1) + } else { + atomic.StoreInt32(&i.int32, 0) + } +} + +// Get atomically returns the current value. +func (i *AtomicBool) Get() bool { + return atomic.LoadInt32(&i.int32) != 0 +} + +// AtomicString gives you atomic-style APIs for string, but +// it's only a convenience wrapper that uses a mutex. So, it's +// not as efficient as the rest of the atomic types. +type AtomicString struct { + mu sync.Mutex + str string +} + +func NewAtomicString(s string) AtomicString { + return AtomicString{str: s} +} + +// Set atomically sets str as new value. +func (s *AtomicString) Set(str string) { + s.mu.Lock() + s.str = str + s.mu.Unlock() +} + +// Get atomically returns the current value. +func (s *AtomicString) Get() string { + s.mu.Lock() + str := s.str + s.mu.Unlock() + return str +} + +// CompareAndSwap atomatically swaps the old with the new value. +func (s *AtomicString) CompareAndSwap(oldval, newval string) (swqpped bool) { + s.mu.Lock() + defer s.mu.Unlock() + if s.str == oldval { + s.str = newval + return true + } + return false +} diff --git a/src/xbase/sync2/atomic_test.go b/src/xbase/sync2/atomic_test.go new file mode 100644 index 00000000..2fcf18d4 --- /dev/null +++ b/src/xbase/sync2/atomic_test.go @@ -0,0 +1,47 @@ +// Copyright 2013, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sync2 + +import ( + "testing" +) + +func TestAtomicString(t *testing.T) { + var s AtomicString + if s.Get() != "" { + t.Errorf("want empty, got %s", s.Get()) + } + s.Set("a") + if s.Get() != "a" { + t.Errorf("want a, got %s", s.Get()) + } + if s.CompareAndSwap("b", "c") { + t.Errorf("want false, got true") + } + if s.Get() != "a" { + t.Errorf("want a, got %s", s.Get()) + } + if !s.CompareAndSwap("a", "c") { + t.Errorf("want true, got false") + } + if s.Get() != "c" { + t.Errorf("want c, got %s", s.Get()) + } +} + +func TestAtomicBool(t *testing.T) { + b := NewAtomicBool(true) + if !b.Get() { + t.Error("b.Get: false, want true") + } + b.Set(false) + if b.Get() { + t.Error("b.Get: true, want false") + } + b.Set(true) + if !b.Get() { + t.Error("b.Get: false, want true") + } +} diff --git a/src/xbase/sync2/doc.go b/src/xbase/sync2/doc.go new file mode 100644 index 00000000..2580e414 --- /dev/null +++ b/src/xbase/sync2/doc.go @@ -0,0 +1,6 @@ +// Copyright 2014, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sync2 provides extra functionality along the same lines as sync. +package sync2 diff --git a/src/xbase/sync2/semaphore.go b/src/xbase/sync2/semaphore.go new file mode 100644 index 00000000..6629042e --- /dev/null +++ b/src/xbase/sync2/semaphore.go @@ -0,0 +1,68 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sync2 + +// What's in a name? Channels have all you need to emulate a counting +// semaphore with a boatload of extra functionality. However, in some +// cases, you just want a familiar API. + +import ( + "time" +) + +// Semaphore is a counting semaphore with the option to +// specify a timeout. +type Semaphore struct { + slots chan struct{} + timeout time.Duration +} + +// NewSemaphore creates a Semaphore. The count parameter must be a positive +// number. A timeout of zero means that there is no timeout. +func NewSemaphore(count int, timeout time.Duration) *Semaphore { + sem := &Semaphore{ + slots: make(chan struct{}, count), + timeout: timeout, + } + for i := 0; i < count; i++ { + sem.slots <- struct{}{} + } + return sem +} + +// Acquire returns true on successful acquisition, and +// false on a timeout. +func (sem *Semaphore) Acquire() bool { + if sem.timeout == 0 { + <-sem.slots + return true + } + tm := time.NewTimer(sem.timeout) + defer tm.Stop() + select { + case <-sem.slots: + return true + case <-tm.C: + return false + } +} + +// TryAcquire acquires a semaphore if it's immediately available. +// It returns false otherwise. +func (sem *Semaphore) TryAcquire() bool { + select { + case <-sem.slots: + return true + default: + return false + } +} + +// Release releases the acquired semaphore. You must +// not release more than the number of semaphores you've +// acquired. +func (sem *Semaphore) Release() { + sem.slots <- struct{}{} +} diff --git a/src/xbase/sync2/semaphore_flaky_test.go b/src/xbase/sync2/semaphore_flaky_test.go new file mode 100644 index 00000000..753137b4 --- /dev/null +++ b/src/xbase/sync2/semaphore_flaky_test.go @@ -0,0 +1,55 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sync2 + +import ( + "testing" + "time" +) + +func TestSemaNoTimeout(t *testing.T) { + s := NewSemaphore(1, 0) + s.Acquire() + released := false + go func() { + time.Sleep(10 * time.Millisecond) + released = true + s.Release() + }() + s.Acquire() + if !released { + t.Errorf("release: false, want true") + } +} + +func TestSemaTimeout(t *testing.T) { + s := NewSemaphore(1, 5*time.Millisecond) + s.Acquire() + go func() { + time.Sleep(10 * time.Millisecond) + s.Release() + }() + if s.Acquire() { + t.Errorf("Acquire: true, want false") + } + time.Sleep(10 * time.Millisecond) + if !s.Acquire() { + t.Errorf("Acquire: false, want true") + } +} + +func TestSemaTryAcquire(t *testing.T) { + s := NewSemaphore(1, 0) + if !s.TryAcquire() { + t.Errorf("TryAcquire: false, want true") + } + if s.TryAcquire() { + t.Errorf("TryAcquire: true, want false") + } + s.Release() + if !s.TryAcquire() { + t.Errorf("TryAcquire: false, want true") + } +} diff --git a/src/xbase/throttle.go b/src/xbase/throttle.go new file mode 100644 index 00000000..824ed2f3 --- /dev/null +++ b/src/xbase/throttle.go @@ -0,0 +1,63 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package xbase + +import ( + "sync" + "time" + "xbase/sync2" + + "github.com/beefsack/go-rate" +) + +// Throttle tuple. +type Throttle struct { + limit sync2.AtomicInt32 + rate *rate.RateLimiter + mu sync.Mutex +} + +// NewThrottle creates the new throttle. +func NewThrottle(l int) *Throttle { + return &Throttle{ + limit: sync2.NewAtomicInt32(int32(l)), + rate: rate.New(l, time.Second), + } +} + +// Acquire used to acquire the lock of throttle. +func (throttle *Throttle) Acquire() { + if throttle.limit.Get() <= 0 { + return + } + + throttle.mu.Lock() + defer throttle.mu.Unlock() + throttle.rate.Wait() +} + +// Release used to do nothing. +func (throttle *Throttle) Release() { +} + +// Set used to set the quota for the throttle. +func (throttle *Throttle) Set(l int) { + throttle.mu.Lock() + defer throttle.mu.Unlock() + + throttle.limit.Set(int32(l)) + throttle.rate = rate.New(l, time.Second) +} + +// Limits returns the limits of the throttle. +func (throttle *Throttle) Limits() int { + throttle.mu.Lock() + defer throttle.mu.Unlock() + return int(throttle.limit.Get()) +} diff --git a/src/xbase/throttle_test.go b/src/xbase/throttle_test.go new file mode 100644 index 00000000..1f0337a6 --- /dev/null +++ b/src/xbase/throttle_test.go @@ -0,0 +1,39 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package xbase + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestThrottleNoLimits(t *testing.T) { + throttle := NewThrottle(100) + for i := 0; i < 10; i++ { + go func() { + throttle.Acquire() + time.Sleep(1000) + defer throttle.Release() + }() + } + time.Sleep(time.Second * 2) + + for i := 0; i < 10; i++ { + go func() { + throttle.Acquire() + time.Sleep(1000) + defer throttle.Release() + }() + } + throttle.Set(0) + time.Sleep(time.Second * 2) + assert.True(t, throttle.limit.Get() == 0) +} diff --git a/src/xcontext/xcontext.go b/src/xcontext/xcontext.go new file mode 100644 index 00000000..d80794f0 --- /dev/null +++ b/src/xcontext/xcontext.go @@ -0,0 +1,88 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package xcontext + +import ( + "github.com/xelabs/go-mysqlstack/sqlparser/depends/sqltypes" +) + +// RequestMode type. +type RequestMode int + +const ( + // ReqNormal mode will send the query to the backend which computed by the planner. + // This is the default mode. + ReqNormal RequestMode = iota + + // ReqScatter mode will send the RawQuery to all backends. + ReqScatter + + // ReqSingle mode will send the RawQuery to the first backend which computed by the scatter. + ReqSingle +) + +// TxnMode type. +type TxnMode int + +const ( + // TxnNone enum. + TxnNone TxnMode = iota + // TxnRead enum. + TxnRead + // TxnWrite enum. + TxnWrite +) + +// ResultContext tuple. +type ResultContext struct { + Results *sqltypes.Result +} + +// NewResultContext returns the result context. +func NewResultContext() *ResultContext { + return &ResultContext{} +} + +// RequestContext tuple. +type RequestContext struct { + RawQuery string + Mode RequestMode + TxnMode TxnMode + Querys []QueryTuple +} + +// NewRequestContext creates RequestContext +// The default Mode is ReqNormal +func NewRequestContext() *RequestContext { + return &RequestContext{} +} + +// QueryTuple tuple. +type QueryTuple struct { + // Query string. + Query string + + // Backend name. + Backend string + + // Range info. + Range string +} + +// QueryTuples represents the query tuple slice. +type QueryTuples []QueryTuple + +// Len impl. +func (q QueryTuples) Len() int { return len(q) } + +// Swap impl. +func (q QueryTuples) Swap(i, j int) { q[i], q[j] = q[j], q[i] } + +// Less impl. +func (q QueryTuples) Less(i, j int) bool { return q[i].Backend < q[j].Backend } diff --git a/src/xcontext/xcontext_test.go b/src/xcontext/xcontext_test.go new file mode 100644 index 00000000..6ebeb04e --- /dev/null +++ b/src/xcontext/xcontext_test.go @@ -0,0 +1,28 @@ +/* + * Radon + * + * Copyright 2018 The Radon Authors. + * Code is licensed under the GPLv3. + * + */ + +package xcontext + +import ( + "sort" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestXContext(t *testing.T) { + q1 := QueryTuple{Query: "select b1", Backend: "b1"} + q2 := QueryTuple{Query: "select a2", Backend: "a2"} + q3 := QueryTuple{Query: "select 00", Backend: "00"} + querys := []QueryTuple{q1, q2, q3} + + sort.Sort(QueryTuples(querys)) + assert.Equal(t, querys[0], q3) + assert.Equal(t, querys[1], q2) + assert.Equal(t, querys[2], q1) +}